3

Finished a POC of Spark structured streaming from Kinesis as a source, it was working fine with Spark 2.4.5, that I had installed locally at the time.

Then at some point I had to upgrade to Spark 3.0.0 (also tested with 3.0.1) and this is when it started failing with this error:

Continuous processing does not support StreamingRelation operations.;;
kinesis
org.apache.spark.sql.AnalysisException: Continuous processing does not support StreamingRelation operations.;;
kinesis
    at org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker$.throwError(UnsupportedOperationChecker.scala:431)
    at org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker$.$anonfun$checkForContinuous$1(UnsupportedOperationChecker.scala:408)
    at org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker$.$anonfun$checkForContinuous$1$adapted(UnsupportedOperationChecker.scala:390)
    at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:177)
    at org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker$.checkForContinuous(UnsupportedOperationChecker.scala:390)
    at org.apache.spark.sql.streaming.StreamingQueryManager.createQuery(StreamingQueryManager.scala:290)
    at org.apache.spark.sql.streaming.StreamingQueryManager.startQuery(StreamingQueryManager.scala:359)
    at org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)
    at com.niceic.dl.kinesispuller.RunnerErr$.main(RunnerErrorSpark3.scala:50)
    at com.niceic.dl.kinesispuller.Spark3ErrorTest.$anonfun$new$2(test.scala:32)
    at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
    at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
    at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
    at org.scalatest.Transformer.apply(Transformer.scala:22)
    at org.scalatest.Transformer.apply(Transformer.scala:20)
    at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:190)
    at org.scalatest.TestSuite.withFixture(TestSuite.scala:196)
    at org.scalatest.TestSuite.withFixture$(TestSuite.scala:195)
    at org.scalatest.funsuite.AnyFunSuite.withFixture(AnyFunSuite.scala:1563)
    at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:188)
    at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:200)
    at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
    at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:200)
    at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:182)
    at org.scalatest.funsuite.AnyFunSuite.runTest(AnyFunSuite.scala:1563)
    at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:233)
    at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
    at scala.collection.immutable.List.foreach(List.scala:431)
    at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
    at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
    at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
    at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:233)
    at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:232)
    at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1563)
    at org.scalatest.Suite.run(Suite.scala:1112)
    at org.scalatest.Suite.run$(Suite.scala:1094)
    at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1563)
    at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:237)
    at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
    at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:237)
    at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:236)
    at com.niceic.dl.kinesispuller.Spark3ErrorTest.org$scalatest$BeforeAndAfterAllConfigMap$$super$run(test.scala:24)
    at org.scalatest.BeforeAndAfterAllConfigMap.liftedTree1$1(BeforeAndAfterAllConfigMap.scala:248)
    at org.scalatest.BeforeAndAfterAllConfigMap.run(BeforeAndAfterAllConfigMap.scala:245)
    at org.scalatest.BeforeAndAfterAllConfigMap.run$(BeforeAndAfterAllConfigMap.scala:242)
    at com.niceic.dl.kinesispuller.Spark3ErrorTest.run(test.scala:24)
    at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
    at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1320)
    at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1314)
    at scala.collection.immutable.List.foreach(List.scala:431)
    at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1314)
    at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:993)
    at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:971)
    at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1480)
    at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:971)
    at org.scalatest.tools.Runner$.run(Runner.scala:798)
    at org.scalatest.tools.Runner.run(Runner.scala)
    at org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2or3(ScalaTestRunner.java:41)
    at org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:28)

I run my code in IntelliJ (Spark installed locally with homebrew):

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.Trigger

object RunnerErr {
  def main(): Unit = {
    val spark = SparkSession
      .builder
      .appName("KinesisSpark3Error")
      .master("local[*]")
      .getOrCreate()

    val kinesisSqlOptions = Map("streamName" -> "shards2"
      , "endpointUrl" -> "https://kinesis.us-west-2.amazonaws.com"
      , "awsAccessKeyId" -> "<awsAccessKeyId>"
      , "awsSecretKey" -> "<awsSecretKey>"
      , "startingPosition" -> "latest"
    )

    val df = spark.readStream
      .format("kinesis")
      .options(kinesisSqlOptions)
      .load

    df.writeStream
      .outputMode("append")
      .format("console")
      .trigger(Trigger.Continuous("2 seconds")) //<-works on 2.4.5 only
//      .trigger(Trigger.ProcessingTime("2 seconds")) //<-works on both
      .start()
      .awaitTermination()
  }
}

I don't see any changes related to continuous trigger in Spark documentation between versions 2.4 and 3.0. The error is coming from UnsupportedOperationChecker.scala:

  def checkForContinuous(plan: LogicalPlan, outputMode: OutputMode): Unit = {
    checkForStreaming(plan, outputMode)

    plan.foreachUp { implicit subPlan =>
      subPlan match {
        case (_: Project | _: Filter | _: MapElements | _: MapPartitions |
              _: DeserializeToObject | _: SerializeFromObject | _: SubqueryAlias |
              _: TypedFilter) =>
        case node if node.nodeName == "StreamingRelationV2" =>
        case Repartition(1, false, _) =>
        case node: Aggregate =>
          val aboveSinglePartitionCoalesce = node.find {
            case Repartition(1, false, _) => true
            case _ => false
          }.isDefined

          if (!aboveSinglePartitionCoalesce) {
            throwError(s"In continuous processing mode, coalesce(1) must be called before " +
              s"aggregate operation ${node.nodeName}.")
          }
        case node =>
          throwError(s"Continuous processing does not support ${node.nodeName} operations.")
      }

On the last case node of the snippet debugger shows node.sourceName = "kinesis". Again, works on Spark 2.4.5, fails on 3.0.0 and 3.0.1.

Kinesis Spark connector used:

  • for Spark 2.4.5 is "com.qubole.spark" %% "spark-sql-kinesis" % "1.2.0_spark-2.4"
  • for Spark 3+ is "com.qubole.spark" %% "spark-sql-kinesis" % "1.2.0_spark-3.0"

At this point I believe it's a bug - what do you think? Any input would be greatly appreciated - thanks!

Update1: Found in Spark source code that there was a change in DataStreamReader load method where for the kinesis source it returns StreamingRelation in Spark 3 and StreamingRelationV2 in Spark 2.4.5. Method checkForContinuous called on the dataframe plan later can handle StreamingRelationV2 and throws on StreamingRelation.

Update2: Qubole answered: "Spark 3 has significant changes in Data source V2 APIs which is required for continuous streaming. We had to remove the code for continuous streaming to support the connector in Spark 3. #92"

prestone
  • 31
  • 2
  • why not use the official integration? http://spark.apache.org/docs/latest/streaming-kinesis-integration.html – zhang-yuan Jan 08 '21 at 03:23
  • That one is for DStreams, and I need to use Spark Structured Streaming. This is the only kinesis connector that I know of that can do it. – prestone Jan 09 '21 at 07:34
  • I'd say you may want to contact to Qubole or file an issue in Qubole repository. https://github.com/qubole/kinesis-sql – Jungtaek Lim Jan 11 '21 at 00:58

0 Answers0