0

Here is my spark streaming code.

  val spark = SparkSession
    .builder
    .appName("StructuredWatermarkingWC")
    .master("local[*]")
    .config("spark.sql.streaming.checkpointing","/tmp/")
    .getOrCreate()
  spark.sparkContext.setLogLevel("DEBUG")

  val streamingInputDF = spark.readStream.format("kafka")
    .option("kafka.bootstrap.servers","127.0.0.1:9092")
    .option("subscribe", "streaming_test")
    .load()

  val query = streamingInputDF
    .writeStream
    .format("console")
    .option("truncate", "false")
    .outputMode(OutputMode.Update())
    .start().awaitTermination()

  spark.streams.awaitAnyTermination()

I am getting the following exception when i run this code

    20/03/18 11:59:15 ERROR MicroBatchExecution: Query [id = f9d5e4f8-1b03-4cc9-8fef-3c5640f5723c, runId = cb7e6d13-442b-4226-b14d-a1baa4ca7aef] terminated with error
java.lang.AbstractMethodError
    at org.apache.spark.internal.Logging$class.initializeLogIfNecessary(Logging.scala:99)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.initializeLogIfNecessary(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.log(Logging.scala:46)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.log(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.logDebug(Logging.scala:58)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.logDebug(KafkaSourceProvider.scala:369)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$ConfigUpdater.set(KafkaSourceProvider.scala:439)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.kafkaParamsForDriver(KafkaSourceProvider.scala:394)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider.createSource(KafkaSourceProvider.scala:90)
    at org.apache.spark.sql.execution.datasources.DataSource.createSource(DataSource.scala:277)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:77)
    at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:194)
    at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:77)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:266)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:256)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan$lzycompute(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan(MicroBatchExecution.scala:61)
    at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:265)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
20/03/18 11:59:15 DEBUG StateStoreCoordinator: Deactivating instances related to checkpoint location cb7e6d13-442b-4226-b14d-a1baa4ca7aef: 
Exception in thread "stream execution thread for [id = f9d5e4f8-1b03-4cc9-8fef-3c5640f5723c, runId = cb7e6d13-442b-4226-b14d-a1baa4ca7aef]" Exception in thread "main" java.lang.AbstractMethodError
    at org.apache.spark.internal.Logging$class.initializeLogIfNecessary(Logging.scala:99)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.initializeLogIfNecessary(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.log(Logging.scala:46)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.log(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.logDebug(Logging.scala:58)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.logDebug(KafkaSourceProvider.scala:369)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$ConfigUpdater.set(KafkaSourceProvider.scala:439)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.kafkaParamsForDriver(KafkaSourceProvider.scala:394)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider.createSource(KafkaSourceProvider.scala:90)
    at org.apache.spark.sql.execution.datasources.DataSource.createSource(DataSource.scala:277)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:77)
    at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:194)
    at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:77)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:266)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:256)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan$lzycompute(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan(MicroBatchExecution.scala:61)
    at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:265)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
org.apache.spark.sql.streaming.StreamingQueryException: null
=== Streaming Query ===
Identifier: [id = f9d5e4f8-1b03-4cc9-8fef-3c5640f5723c, runId = cb7e6d13-442b-4226-b14d-a1baa4ca7aef]
Current Committed Offsets: {}
Current Available Offsets: {}

Current State: INITIALIZING
Thread State: RUNNABLE
    at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:295)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
Caused by: java.lang.AbstractMethodError
    at org.apache.spark.internal.Logging$class.initializeLogIfNecessary(Logging.scala:99)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.initializeLogIfNecessary(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.log(Logging.scala:46)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.log(KafkaSourceProvider.scala:369)
    at org.apache.spark.internal.Logging$class.logDebug(Logging.scala:58)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.logDebug(KafkaSourceProvider.scala:369)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$ConfigUpdater.set(KafkaSourceProvider.scala:439)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider$.kafkaParamsForDriver(KafkaSourceProvider.scala:394)
    at org.apache.spark.sql.kafka010.KafkaSourceProvider.createSource(KafkaSourceProvider.scala:90)
    at org.apache.spark.sql.execution.datasources.DataSource.createSource(DataSource.scala:277)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1$$anonfun$applyOrElse$1.apply(MicroBatchExecution.scala:77)
    at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:194)
    at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:80)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:77)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$1.applyOrElse(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
    at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:266)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:256)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan$lzycompute(MicroBatchExecution.scala:75)
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.logicalPlan(MicroBatchExecution.scala:61)
    at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:265)
    ... 1 more
OneCricketeer
  • 179,855
  • 19
  • 132
  • 245
Srinivas
  • 2,010
  • 7
  • 26
  • 51
  • @mazaneicha - Apologies, I corrected them. – Srinivas Mar 18 '20 at 13:09
  • Do you have log4j-xxx.jar in your classpath? – mazaneicha Mar 18 '20 at 13:42
  • No I did not. I ran it via intellij. Let me run via spark shell with the log4j in class path – Srinivas Mar 18 '20 at 13:47
  • Intellij should setup the classpath correctly. What Scala versions are you using? Please show your dependencies – OneCricketeer Mar 19 '20 at 15:31
  • Seq( "org.apache.spark" %% "spark-core" % "2.3.0", "org.apache.spark" % "spark-sql_2.11" % "2.3.0", "org.apache.spark" % "spark-streaming_2.11" % "2.3.0", "org.apache.spark" %% "spark-hive" % "2.3.0", "org.scalatest" %% "scalatest" % "2.2.1" ,"org.apache.spark" %% "spark-streaming-twitter" % "1.6.3", "org.apache.bahir" % "spark-streaming-twitter_2.11" % "2.2.0", "org.apache.spark" %% "spark-streaming-kafka" % "1.6.3", "org.apache.spark" %% "spark-sql-kafka-0-10" % "2.2.0" ) – Srinivas Mar 20 '20 at 16:22

0 Answers0