Oalex #13
|
@ -30,10 +30,11 @@ spark.read.json(folder_path)
|
||||||
explode(col("raw_aff_string")).alias("aff_string") #this allows to split all the raw_aff_string and to parallelize better
|
explode(col("raw_aff_string")).alias("aff_string") #this allows to split all the raw_aff_string and to parallelize better
|
||||||
)
|
)
|
||||||
.withColumn("Matchings", oalex_affro(col("doi"), col("aff_string"))) #this one says create a new column with name Matchinds as the result of the function as second argument
|
.withColumn("Matchings", oalex_affro(col("doi"), col("aff_string"))) #this one says create a new column with name Matchinds as the result of the function as second argument
|
||||||
.select("DOI","OAlex","Matchins")
|
.drop(col("aff_string")
|
||||||
|
.select(col("DOI"),col("OAlex"),explode("Matchins").alias("match")
|
||||||
.groupBy("DOI") #this groups by doi to have just one row per each doi
|
.groupBy("DOI") #this groups by doi to have just one row per each doi
|
||||||
.agg(first("OAlex").alias("OAlex"), #for each DOI it says what are the other columns Since OALEX is equal for each doi just select the first, while use the collect_list function to aggregate the Matchings
|
.agg(first("OAlex").alias("OAlex"), #for each DOI it says what are the other columns Since OALEX is equal for each doi just select the first, while use the collect_list function to aggregate the Matchings
|
||||||
collect_list("Matchings").alias("Matchings"))
|
collect_list("match").alias("Matchings"))
|
||||||
.write
|
.write
|
||||||
.mode("overwrite") #in case the folder already exists on hadoop it does not break
|
.mode("overwrite") #in case the folder already exists on hadoop it does not break
|
||||||
.option("compression","gzip") #to reduce the space
|
.option("compression","gzip") #to reduce the space
|
||||||
|
|
Loading…
Reference in New Issue