From 2806511e0289da6a6163dde07eee9b010b6eaa0a Mon Sep 17 00:00:00 2001 From: Miriam Baglioni Date: Thu, 5 Dec 2024 21:26:08 +0100 Subject: [PATCH] [oalex] change collec_list to collect_set so that the same match will be there just one time --- strings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strings.py b/strings.py index dea9c43..2622f56 100644 --- a/strings.py +++ b/strings.py @@ -5,7 +5,7 @@ from pyspark.sql.types import StringType, ArrayType, StructType, StructField, Do from affro_cluster import * from pyspark.sql import SparkSession -from pyspark.sql.functions import col, explode, first, collect_list, udf +from pyspark.sql.functions import col, explode, first, collect_set, udf import sys spark = SparkSession.builder.appName("JSONProcessing").getOrCreate() @@ -81,7 +81,7 @@ affs.join(exploded, on="affiliation") \ ) \ .groupBy("DOI") \ .agg(first("OAlex").alias("OAlex"), #for each DOI it says what are the other columns Since OALEX is equal for each doi just select the first, while use the collect_list function to aggregate the Matchings - collect_list("match").alias("Matchings") #each exploded match is collected again + collect_set("match").alias("Matchings") #each exploded match is collected again ) \ .write \ .mode("overwrite") \