I am trying to build an application with spark job server API(for spark 2.2.0). But I found that there is no support for namedObject with sparkSession. my looks like:
import com.typesafe.config.Config
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
import org.scalactic._
import spark.jobserver.{NamedDataFrame, NamedObjectSupport, SparkSessionJob}
import spark.jobserver.api.{JobEnvironment, SingleProblem, ValidationProblem}
import scala.util.Try
object word1 extends SparkSessionJob with NamedObjectSupport {
type JobData = Seq[String]
type JobOutput = String
def runJob(sparkSession: SparkSession, runtime: JobEnvironment, data: JobData): JobOutput =
{
val df = sparkSession.sparkContext.parallelize(data)
val ndf = NamedDataFrame(df, true, StorageLevel.MEMORY_ONLY)
this.namedObjects.update("df1", ndf)
this.namedObjects.getNames().toString
}
def validate(sparkSession: SparkSession, runtime: JobEnvironment, config: Config):
JobData Or Every[ValidationProblem] = {
Try(config.getString("input.string").split(" ").toSeq)
.map(words => Good(words))
.getOrElse(Bad(One(SingleProblem("No input.string param"))))
}
}
but there is error at line this.namedObjects.update(). I think they do not have support for namedObject. while the same code is compiling with SparkJob:
object word1 extends SparkJob with NamedObjectSupport
Is there support of namedObjects with sparksession ? If not then what is work around to persist dataframe/dataset ?