From 548ba915ac644d75b0604485927705ab045a68a6 Mon Sep 17 00:00:00 2001 From: "miriam.baglioni" Date: Thu, 16 Apr 2020 15:58:42 +0200 Subject: [PATCH] first phase of data preparation. For each result type (parallel) it produces the possible updates --- .../PrepareResultOrcidAssociationStep1.java | 43 ++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/PrepareResultOrcidAssociationStep1.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/PrepareResultOrcidAssociationStep1.java index c8a7b90ce..4af652ef0 100644 --- a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/PrepareResultOrcidAssociationStep1.java +++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/PrepareResultOrcidAssociationStep1.java @@ -10,19 +10,19 @@ import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; +import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Arrays; import java.util.List; -import java.util.Optional; import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession; -public class PrepareResultOrcidAssociation { - private static final Logger log = LoggerFactory.getLogger(PrepareResultOrcidAssociation.class); +public class PrepareResultOrcidAssociationStep1 { + private static final Logger log = LoggerFactory.getLogger(PrepareResultOrcidAssociationStep1.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); @@ -64,13 +64,14 @@ public class PrepareResultOrcidAssociation { if (isTest(parser)) { removeOutputDir(spark, outputPath); } - prepareInfo(spark, inputPath, outputPath, resultClazz, resultType); + prepareInfo(spark, inputPath, outputPath, resultClazz, resultType, allowedsemrel); }); } private static void prepareInfo(SparkSession spark, String inputPath, String outputPath, Class resultClazz, - String resultType) { + String resultType, + List allowedsemrel) { //read the relation table and the table related to the result it is using final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); @@ -81,8 +82,38 @@ public class PrepareResultOrcidAssociation { log.info("Reading Graph table from: {}", inputPath + "/" + resultType); Dataset result = readPathEntity(spark, inputPath + "/" + resultType, resultClazz); + result.createOrReplaceTempView("result"); - + getPossibleResultOrcidAssociation(spark, allowedsemrel, outputPath); } + + private static void getPossibleResultOrcidAssociation(SparkSession spark, List allowedsemrel, String outputPath){ + String query = " select target resultId, author authorList" + + " from (select id, collect_set(named_struct('name', name, 'surname', surname, 'fullname', fullname, 'orcid', orcid)) author " + + " from ( " + + " select id, MyT.fullname, MyT.name, MyT.surname, MyP.value orcid " + + " from result " + + " lateral view explode (author) a as MyT " + + " lateral view explode (MyT.pid) p as MyP " + + " where MyP.qualifier.classid = 'ORCID') tmp " + + " group by id) r_t " + + " join (" + + " select source, target " + + " from relation " + + " where datainfo.deletedbyinference = false " + + getConstraintList(" relclass = '" ,allowedsemrel) + ") rel_rel " + + " on source = id"; + + spark.sql(query) + .as(Encoders.bean(ResultOrcidList.class)) + .toJSON() + .write() + .mode(SaveMode.Append) + .option("compression","gzip") + .text(outputPath) + ; + } + + }