affRo/strings.py

43 lines
1.8 KiB
Python
Raw Normal View History

2024-10-16 12:42:51 +02:00
import json
from affro_cluster import *
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, explode, first, collect_list
2024-10-16 12:42:51 +02:00
import sys
spark = SparkSession.builder.appName("JSONProcessing").getOrCreate()
2024-10-18 10:48:18 +02:00
folder_path = sys.argv[1]
hdfs_output_path = sys.argv[2]
#Version of affro application on a single raw_aff_string and returns just the Matchins set
def oalex_affro(doi, aff_string):
try:
matchings = affro(aff_string)
if not isinstance(matchings, list):
matchings = [matchings]
return matchings
2024-10-16 12:42:51 +02:00
except Exception as e:
print(f"Error processing record with doi {doi}: {str(e)}")
return []
spark.read.json(folder_path)
.filter(col("doi").isNotNull())
.select(
col("doi").alias("DOI"),
col("rors").alias("OAlex"),
explode(col("raw_aff_string")).alias("aff_string") #this allows to split all the raw_aff_string and to parallelize better
)
.withColumn("Matchings", oalex_affro(col("doi"), col("aff_string"))) #this one says create a new column with name Matchinds as the result of the function as second argument
.drop(col("aff_string")
.select(col("DOI"),col("OAlex"),explode("Matchins").alias("match")
.groupBy("DOI") #this groups by doi to have just one row per each doi
.agg(first("OAlex").alias("OAlex"), #for each DOI it says what are the other columns Since OALEX is equal for each doi just select the first, while use the collect_list function to aggregate the Matchings
collect_list("match").alias("Matchings"))
.write
.mode("overwrite") #in case the folder already exists on hadoop it does not break
.option("compression","gzip") #to reduce the space
.json(hdfs_output_path)
2024-10-16 12:42:51 +02:00