print(f"Error processing record with doi {doi}: {str(e)}")
return[]
spark.read.json(folder_path)
.filter(col("doi").isNotNull())
.select(
col("doi").alias("DOI"),
col("rors").alias("OAlex"),
explode(col("raw_aff_string")).alias("aff_string")#this allows to split all the raw_aff_string and to parallelize better
)
.withColumn("Matchings",oalex_affro(col("doi"),col("aff_string")))#this one says create a new column with name Matchinds as the result of the function as second argument
.groupBy("DOI")#this groups by doi to have just one row per each doi
.agg(first("OAlex").alias("OAlex"),#for each DOI it says what are the other columns Since OALEX is equal for each doi just select the first, while use the collect_list function to aggregate the Matchings