5

When I run the code below, I get the error java.lang.AssertionError: assertion failed: Found duplicate rewrite attributes. Prior to updating our databricks runtime this ran smoothly.

  1. top10_df is a dataframe of data with unique keys in the list groups.

  2. res_df is an aggregation of the unique keys in top10_df with min and max dates.

  3. once res_df is created and persisted it is joined back into the top10_df on the unique keys in groups.

groups = ['col1','col2','col3','col4']
min_date_created = fn.min('date_created').alias('min_date_created')
max_date_created = fn.max('date_created').alias('max_date_created')

res_df = (top10_df
            .groupBy(groups)
            .agg(min_date_created
            ,max_date_created
            )
         )
res_df.persist()
print(res_df.count())

score_rank = fn.row_number().over(w.partitionBy(groups).orderBy(fn.desc('score')))
unique_issue_id = fn.row_number().over(w.orderBy(groups))

out_df = (top10_df.alias('t10')
                    .join(res_df.alias('res'),groups,'left')
                    .where(fn.col('t10.date_created')==fn.col('res.max_date_created'))
                    .drop(fn.col('t10.date_created'))
                    .drop(fn.col('t10.date_updated'))
                    .withColumn('score_rank',score_rank)
                    .where(fn.col('score_rank')==1)
                    .drop('score_rank'
                          ,'latest_revision_complete_hash'
                          ,'latest_revision_durable_hash'
                         )
                    .withColumn('unique_issue_id',unique_issue_id)
                   .withColumnRenamed('res.id','resource_id')
                  )

out_df.persist()
print(out_df.count())
mdl003
  • 153
  • 2
  • 10

1 Answers1

0

Instead of:

 out_df = (top10_df.alias('t10')
                .join(res_df.alias('res'),groups,'left')

right after the join, select and alias all columns in your right-hand-side df to disambiguate the duplicate attributes:

out_df = (
    top10_df.alias('t10')
    .join(
        res_df.alias('res').select(
            fn.col('groups').alias('groups'),
            fn.col('min_date_created').alias('min_date_created'),
            fn.col('max_date_created').alias('max_date_created')
        ),
        groups,
        'left'
    )
ZygD
  • 22,092
  • 39
  • 79
  • 102