0

I need to parse data from kafka which includes one timestamp column. Unfortunately, my code returns null for timestamp column.

Here is my Timestamp sample 2023-06-18T14:49:11.8545562+03:30 which is saved in CreationAt column, and my entire JSON sample is as follows:

{"CustomerId":606811,"Latitude":35.834896,"Longitude":50.019657,"Response":{"Stores":[{"Id":771,"LegacyStoreId":5497,"LegacyStoreTypeId":1,"PartnerId":3,"StoreName":"test","StoreDisplayName":"test","PartnerName":"test","ServiceRadius":3,"Longitude":56.009797,"Latitude":35.829067,"StatusCode":1,"CityId":200,"CityName":null,"Description":"test","IsOk24":false,"RouteDistanceInMeter":0,"IsRouteDistanceValid":false,"IsOutOfOrders":false,"AirDistanceInMeter":1100,"IsAirDistanceInValid":true,"IsDeliveryCoverage":true,"IsNonCoverageArea":false,"Rate":3.9,"Reviews":560,"IsHighPriorityStore":false,"StoreScore":0,"PartnerRank":1,"DeliveryCost":"80000","FirstDeliveryTime":"test","Labels":[],"MinCartTotalPrice":500000},{"Id":463,"LegacyStoreId":4765,"LegacyStoreTypeId":1,"PartnerId":3,"StoreName":"test","StoreDisplayName":"test","PartnerName":"test","ServiceRadius":3,"Longitude":56.995281,"Latitude":35.82251,"StatusCode":1,"CityId":200,"CityName":null,"Description":"test","IsOk24":false,"RouteDistanceInMeter":0,"IsRouteDistanceValid":false,"IsOutOfOrders":false,"AirDistanceInMeter":2593,"IsAirDistanceInValid":true,"IsDeliveryCoverage":true,"IsNonCoverageArea":false,"Rate":3.8,"Reviews":532,"IsHighPriorityStore":false,"StoreScore":0,"PartnerRank":1,"DeliveryCost":"80000","FirstDeliveryTime":"test","Labels":[],"MinCartTotalPrice":500000}]},"Id":"f2655da4-c236-4f86-9ca0-8063a4c77da8","CreationAt":"2023-06-18T14:49:11.8545562+03:30"}

And here is my code

spark= SparkSession \
.builder \
.appName("striming") \
.config("spark.jars.packages",
        "*****************") \
.config('spark.driver.extraClassPath', '/usr/local/spark/resources/jars/sqljdbc42.jar') \
.config('spark.executor.extraClassPath', '/usr/local/spark/resources/jars/sqljdbc42.jar') \
.config("spark.cores.max", "1") \
.config("spark.executor.memory", "1g") \
.config("spark.executor.cores", "1") \
.config("spark.dynamicAllocation.initialExecutors", "1") \
.master("local[1]") \
.getOrCreate()  

sc = spark.sparkContext

sqlContext = SQLContext(sc)

kafka_df = spark.readStream.format("kafka").option("kafka.bootstrap.servers",
                                               "My Kafka Servers").option(
"subscribe",
"Event").option("startingOffsets", "earliest").option("failOnDataLoss", "false").option('multiline', "True").load()

schema = StructType([
StructField("CreationAt", TimestampType(), True),
StructField("CustomerId", LongType(), True),
StructField("Id", StringType(), True),
StructField("Latitude", DoubleType(), True),
StructField("Longitude", DoubleType(), True),
StructField("Response", 
    StructType([
        StructField("Stores", 
            ArrayType(StructType([
                StructField("Id", IntegerType(), True),
                StructField("LegacyStoreId", IntegerType(), True),
                StructField("LegacyStoreTypeId", IntegerType(), True),
                StructField("PartnerId", IntegerType(), True),
                StructField("StoreName", StringType(), True),
                StructField("StoreDisplayName", StringType(), True),
                StructField("PartnerName", StringType(), True)
            ])), True),
    ]), True),
])

value_df = kafka_df.select(from_json(col("value").cast("string"), schema).alias("StoreSelection"))

value_df.printSchema()

sites_flat = value_df.selectExpr("StoreSelection")\
                   .select("StoreSelection.CustomerId", "StoreSelection.Latitude", "StoreSelection.Longitude", "StoreSelection.CreationAt", explode_outer("StoreSelection.Response.Stores").alias("Stores"))\
                   .select( "CustomerId","Latitude","Longitude", "CreationAt","Stores.LegacyStoreId", "Stores.StoreName", "Stores.PartnerName")\
                   .select( "CustomerId","Latitude","Longitude", "CreationAt","LegacyStoreId", "StoreName", "PartnerName")

sites_flat.printSchema()

def foreach_batch_function(df, epoch_id):
   df.write \
   df.show()

invoiceWriterQuery = sites_flat.writeStream.foreachBatch(foreach_batch_function).outputMode("update") \
   .option("checkpointLocation", "/usr/local/airflow/dags/otime/log_file").trigger(
    processingTime="1 minute").start().awaitTermination()

invoiceWriterQuery.awaitTermination()

Thank you very much in advance

marc_s
  • 732,580
  • 175
  • 1,330
  • 1,459
Ali Moayed
  • 33
  • 5

0 Answers0