0

I'm running a PySpark job on Google Cloud Dataproc, which uses structured streaming with a trigger of 'once'. The job reads Parquet data from a raw layer (a GCS bucket), applies certain business rules, and then writes the data in Delta format to a trusted layer (another GCS bucket). I'm using a Dataproc 2.1‐debian11 image with Spark 3.3.0 and Delta 2.3.0.

Despite this setup, I'm encountering an unusual issue. The job throws a java.io.IOException error, but it doesn't stop the execution and the job completes successfully.

Here's the error message I'm getting:

ERROR MicroBatchExecution: Query [id = a7859181-5d6e-4cb4-a4bc-93bbba0de6bf, runId = bd43ed9f-7300-4df5-b806-85badb86db5f] terminated with error
java.io.IOException: Failed to write 941418 bytes in 'gs://promo-bucket-data/bk-promo-co-grupoexito-datalake-dev/NAPSE/PROMOCIONES/PROMO_ENCABEZADO/_checkpoint/sources/0/.0.0eaeaaaa-014a-4d64-b7d8-1609c46c0c75.tmp'
    at com.google.cloud.hadoop.repackaged.gcs.com.google.cloud.hadoop.util.BaseAbstractGoogleAsyncWriteChannel.write(BaseAbstractGoogleAsyncWriteChannel.java:136) ~[gcs-connector-hadoop3-2.2.16.jar:?]
    at java.nio.channels.Channels.writeFullyImpl(Channels.java:74) ~[?:?]
    at java.nio.channels.Channels.writeFully(Channels.java:97) ~[?:?]
    at java.nio.channels.Channels$1.write(Channels.java:172) ~[?:?]
    at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?]
    at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?]
    at java.io.FilterOutputStream.close(FilterOutputStream.java:182) ~[?:?]
    at com.google.cloud.hadoop.fs.gcs.GoogleHadoopOutputStream.lambda$close$2(GoogleHadoopOutputStream.java:144) ~[gcs-connector-hadoop3-2.2.16.jar:?]
    at com.google.cloud.hadoop.fs.gcs.GhfsStorageStatistics.trackDuration(GhfsStorageStatistics.java:77) ~[gcs-connector-hadoop3-2.2.16.jar:?]
    at com.google.cloud.hadoop.fs.gcs.GoogleHadoopOutputStream.close(GoogleHadoopOutputStream.java:136) ~[gcs-connector-hadoop3-2.2.16.jar:?]
    at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-client-api-3.3.3.jar:?]
    at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-client-api-3.3.3.jar:?]
    at org.apache.spark.sql.execution.streaming.CheckpointFileManager$RenameBasedFSDataOutputStream.close(CheckpointFileManager.scala:152) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.HDFSMetadataLog.$anonfun$addNewBatchByStream$2(HDFSMetadataLog.scala:176) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23) ~[scala-library-2.12.14.jar:?]
    at scala.Option.getOrElse(Option.scala:189) ~[scala-library-2.12.14.jar:?]
    at org.apache.spark.sql.execution.streaming.HDFSMetadataLog.addNewBatchByStream(HDFSMetadataLog.scala:171) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.HDFSMetadataLog.add(HDFSMetadataLog.scala:116) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.CompactibleFileStreamLog.add(CompactibleFileStreamLog.scala:168) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.FileStreamSourceLog.add(FileStreamSourceLog.scala:66) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.FileStreamSource.fetchMaxOffset(FileStreamSource.scala:198) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.FileStreamSource.latestOffset(FileStreamSource.scala:340) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$constructNextBatch$4(MicroBatchExecution.scala:448) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:375) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:373) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:68) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$constructNextBatch$2(MicroBatchExecution.scala:447) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286) ~[scala-library-2.12.14.jar:?]
    at scala.collection.Iterator.foreach(Iterator.scala:943) ~[scala-library-2.12.14.jar:?]
    at scala.collection.Iterator.foreach$(Iterator.scala:943) ~[scala-library-2.12.14.jar:?]
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1431) ~[scala-library-2.12.14.jar:?]
    at scala.collection.IterableLike.foreach(IterableLike.scala:74) ~[scala-library-2.12.14.jar:?]
    at scala.collection.IterableLike.foreach$(IterableLike.scala:73) ~[scala-library-2.12.14.jar:?]
    at scala.collection.AbstractIterable.foreach(Iterable.scala:56) ~[scala-library-2.12.14.jar:?]
    at scala.collection.TraversableLike.map(TraversableLike.scala:286) ~[scala-library-2.12.14.jar:?]
    at scala.collection.TraversableLike.map$(TraversableLike.scala:279) ~[scala-library-2.12.14.jar:?]
    at scala.collection.AbstractTraversable.map(Traversable.scala:108) ~[scala-library-2.12.14.jar:?]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$constructNextBatch$1(MicroBatchExecution.scala:436) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23) ~[scala-library-2.12.14.jar:?]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.withProgressLocked(MicroBatchExecution.scala:687) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.constructNextBatch(MicroBatchExecution.scala:432) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:237) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) ~[scala-library-2.12.14.jar:?]
    at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:375) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:373) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:68) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:218) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.SingleBatchExecutor.execute(TriggerExecutor.scala:39) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:212) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:307) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) ~[scala-library-2.12.14.jar:?]
    at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:285) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:208) ~[spark-sql_2.12-3.3.0.jar:3.3.0]
Caused by: java.nio.channels.ClosedByInterruptException
    at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:199) ~[?:?]
    at java.nio.channels.Channels$WritableByteChannelImpl.write(Channels.java:466) ~[?:?]
    at com.google.cloud.hadoop.repackaged.gcs.com.google.cloud.hadoop.util.BaseAbstractGoogleAsyncWriteChannel.write(BaseAbstractGoogleAsyncWriteChannel.java:133) ~[gcs-connector-hadoop3-2.2.16.jar:?]
    ... 53 more

Despite this error, the job doesn't stop and finishes correctly. However, I would like to understand the cause of this error message and if there's a way to fix it.

Any insights or suggestions would be greatly appreciated.

Thank you!

Islam Elbanna
  • 1,438
  • 2
  • 9
  • 15

0 Answers0