I am not able to capture the newly inserted rows in the dataframe. I have researhed about it and found nothing done in python only in SQL.
#implementing autoloader
autoloader_df1 = (spark.readStream.format("cloudFiles")
.options(**cloudFilesConf)
.option("cloudFiles.format", "csv")
.option("Header",True)
.option("readChangeFeed", "true")
.schema(dataset_schema)`enter code here`
.option("readChangeFeed", "true")
.option("enableChangeDataFeed","true")
.load("wasbs://landing@pocbatabricks1.blob.core.windows.net/NYC_Taxi")#upload path to read the files
)
main_table = (spark.read.format("csv")
.options(**cloudFilesConf)
.option("Header",True)
.option("readChangeFeed", "true")
.schema(dataset_schema)
.load("wasbs://landing@pocbatabricks1.blob.core.windows.net/NYC_Taxi")
)
main_table.write.format("delta").mode("overwrite").save("/mnt/TaxiMount/bronze/cdc/main-table")
main_table.write.format("delta").mode("overwrite").save("/mnt/TaxiMount/bronze/cdc/update-table")
main_df = spark.read.format("delta").option("ignoreChanges","true").load("/mnt/TaxiMount/bronze/cdc/main-table")
update_df = spark.read.format("delta").load("/mnt/TaxiMount/bronze/cdc/update-table")
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.types import *
import json
jschema1 = '{"fields":[{"metadata":{},"name":"id","nullable":false,"type":"string"},{"metadata":{},"name":"vendor_id","nullable":true,"type":"integer"},{"metadata":{},"name":"pickup_datetime","nullable":true,"type":"timestamp"},{"metadata":{},"name":"dropoff_datetime","nullable":true,"type":"timestamp"},{"metadata":{},"name":"passenger_count","nullable":true,"type":"integer"},{"metadata":{},"name":"pickup_longitude","nullable":true,"type":"float"},{"metadata":{},"name":"pickup_latitude","nullable":true,"type":"float"},{"metadata":{},"name":"dropoff_longitude","nullable":true,"type":"float"},{"metadata":{},"name":"dropoff_latitude","nullable":true,"type":"float"},{"metadata":{},"name":"store_and_fwd_flag","nullable":true,"type":"string"},{"metadata":{},"name":"trip_duration","nullable":true,"type":"integer"},{"metadata":{},"name":"batchId","nullable":true,"type":"long"}],"type":"struct"}'
changeLog_schema = StructType.fromJson(json.loads(jschema1))
emp_RDD = spark.sparkContext.emptyRDD()
changeLogDelta = sqlContext.createDataFrame(data = emp_RDD,
schema = changeLog_schema)
changeLogDelta.write.format("delta").mode("overwrite").save("/mnt/TaxiMount/bronze/cdc/change_log3")
%scala
//to write into main delta and change log
import org.apache.spark.sql._
import io.delta.tables._
import org.apache.spark.sql.functions.{col, lit, when}
val mainDelta = DeltaTable.forPath("/mnt/TaxiMount/bronze/cdc/main-table")
val changeLog = DeltaTable.forPath("/mnt/TaxiMount/bronze/cdc/change_log3")
def upsertToDeltaCaptureCDC(microBatchOutputDF: DataFrame, batchId: Long){
val batchDF = microBatchOutputDF.withColumn("batchId",lit(batchId)).dropDuplicates(Seq("id","batchId"))
mainDelta.as("m").merge(batchDF.as("b"),"m.id=b.id").whenMatched().updateExpr(Map("vendor_id" -> "b.vendor_id","pickup_datetime" ->"b.pickup_datetime","dropoff_datetime" -> "b.dropoff_datetime", "passenger_count" -> "b.passenger_count", "pickup_longitude" ->"b.pickup_longitude" , "pickup_latitude" ->"b.pickup_latitude" , "dropoff_longitude" -> "b.dropoff_longitude","store_and_fwd_flag" -> "b.store_and_fwd_flag","trip_duration" -> "b.trip_duration")).execute()
changeLog.as("c").merge(batchDF.as("b"),"c.id=b.id AND c.batchId = b.BatchId").whenNotMatched().insertExpr(Map("id"->"b.id","vendor_id" -> "b.vendor_id","pickup_datetime" ->"b.pickup_datetime","dropoff_datetime" -> "b.dropoff_datetime", "passenger_count" -> "b.passenger_count", "pickup_longitude" ->"b.pickup_longitude" , "pickup_latitude" ->"b.pickup_latitude" , "dropoff_longitude" -> "b.dropoff_longitude","store_and_fwd_flag" -> "b.store_and_fwd_flag","trip_duration" -> "b.trip_duration", "batchId" ->"b.batchId")).execute()}
%scala
import org.apache.spark.sql.streaming.Trigger
val streamingUpserts = spark.readStream.format("Delta").load("/mnt/TaxiMount/bronze/cdc/update-table")
streamingUpserts.writeStream.format("delta").foreachBatch(upsertToDeltaCaptureCDC _).trigger(Trigger.ProcessingTime("2 seconds")).option("ignoreChanges","true").option("checkpointLocation","/mnt/TaxiMount/bronze/cdc/update-table-checkpoint3/").start()
Now, when running this code I get an error java.lang.UnsupportedOperationException: Detected a data update (for example part-00000-dfcab3cb-472d-4151-827d-4a366fc821d0-c000.snappy.parquet) in the source table at version 4. This is currently not supported. If you'd like to ignore updates, set the option 'ignoreChanges' to 'true'. If you would like the data update to be reflected, please restart this query with a fresh checkpoint directory.
I have tried changing the path but it is still giving out the same error.