This worked for me. Below code is written in Scala V2.13.3.
package com.spark.structured.stream.multisink
import org.apache.spark.sql.SparkSession
import java.text.SimpleDateFormat
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types.TimestampType
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.streaming.Trigger.ProcessingTime
import org.apache.spark.sql.types.StructType
object MultipleStreamingSink extends App {
val spark = SparkSession
.builder()
.master("local[*]")
.getOrCreate()
import spark.implicits._
val csvSchema = new StructType()
.add("name", "string").add("age", "integer").add("num","integer").add("date", "string")
val sample = spark.readStream
.schema(csvSchema)
.format("csv")
.options(Map("inferSchema" ->"true", "delimiter"->",", "header"->"true"))
.load("path/to/input/dir")
val sample1 = sample.withColumn("datetime",col("date").cast(TimestampType)).drop("date")
val sampleAgg1 = sample1.withWatermark("datetime", "10 minutes")
.groupBy(window($"datetime", "5 minutes", "5 minutes"), col("name"))
.agg(count("age").alias("age_count"))
val sampleAgg2 = sample1.withWatermark("datetime", "10 minutes")
.groupBy(window($"datetime", "5 minutes", "5 minutes"), col("age"))
.agg(count("name").alias("name_count"))
// I have used console to stream the output, use your sinks accordingly
val sink1 = sampleAgg1
.withColumn("window_start_time", col("window.start"))
.withColumn("window_end_time", col("window.end"))
.drop("window")
.writeStream
.queryName("count by name")
.option("checkpointLocation", "/tmp/1")
.outputMode(OutputMode.Update())
.trigger(Trigger.ProcessingTime("60 seconds"))
.format("console")
.option("numRows", 100)
.option("truncate", false)
.start()
val sink2 = sampleAgg2
.withColumn("window_start_time", col("window.start"))
.withColumn("window_end_time", col("window.end"))
.drop("window")
.writeStream
.option("checkpointLocation", "/tmp/2")
.queryName("count by age")
.outputMode(OutputMode.Update())
.trigger(Trigger.ProcessingTime("60 seconds"))
.format("console")
.option("numRows", 100)
.option("truncate", false)
.start()
sink1.awaitTermination()
sink2.awaitTermination()
This is my sample csv file content,
name,age,num,date
abc,28,123,2021-06-01T07:15:00
def,27,124,2021-06-01T08:16:00
abc,28,125,2021-06-01T07:15:00
ghi,28,126,2021-06-01T07:17:00