-1

I'm trying to run K-means on Apache Spark with Scala.When I used the example that's on the Spark website https://spark.apache.org/docs/2.3.0/ml-clustering.html everything goes fine but when I tried to use a cvs file I have this issue

scala> val censocsv = spark.read.format("csv").option("sep",",").option("inferSchema","true").option("header", "true").load("censodiscapacidad.csv")
2018-10-01 21:58:31 WARN  SizeEstimator:66 - Failed to check whether UseCompressedOops is set; assuming yes
2018-10-01 21:58:49 WARN  ObjectStore:568 - Failed to get database global_temp, returning NoSuchObjectException
censocsv: org.apache.spark.sql.DataFrame = [ANIO: int, DELEGACION: double ... 123 more fields]

scala> val kmeans = new KMeans().setK(2).setSeed(1L)
kmeans: org.apache.spark.ml.clustering.KMeans = kmeans_860c02e56190

scala> val model = kmeans.fit(censocsv)
java.lang.IllegalArgumentException: Field "features" does not exist.
  at org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:267)
  at org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:267)
  at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
  at scala.collection.AbstractMap.getOrElse(Map.scala:59)
  at org.apache.spark.sql.types.StructType.apply(StructType.scala:266)
  at org.apache.spark.ml.util.SchemaUtils$.checkColumnType(SchemaUtils.scala:40)
  at org.apache.spark.ml.clustering.KMeansParams$class.validateAndTransformSchema(KMeans.scala:93)
  at org.apache.spark.ml.clustering.KMeans.validateAndTransformSchema(KMeans.scala:254)
  at org.apache.spark.ml.clustering.KMeans.transformSchema(KMeans.scala:340)
  at org.apache.spark.ml.PipelineStage.transformSchema(Pipeline.scala:74)
  at org.apache.spark.ml.clustering.KMeans.fit(KMeans.scala:305)
  ... 51 elided

scala> val predictions = model.transform(censocsv)
<console>:31: error: not found: value model
       val predictions = model.transform(censocsv)
                         ^

scala> 
Brian McCutchon
  • 8,354
  • 3
  • 33
  • 45

1 Answers1

0

This looks like a duplicate of Field "features" does not exist. SparkML
You need to add a Vector containing the feature columns to your DataFrame.

user2151703
  • 15
  • 1
  • 7