diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/countrypropagation/SparkCountryPropagationJob.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/countrypropagation/SparkCountryPropagationJob.java deleted file mode 100644 index cf80649b6..000000000 --- a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/countrypropagation/SparkCountryPropagationJob.java +++ /dev/null @@ -1,333 +0,0 @@ -package eu.dnetlib.dhp.countrypropagation; - -import com.fasterxml.jackson.databind.ObjectMapper; -import eu.dnetlib.dhp.PropagationConstant; -import eu.dnetlib.dhp.TypedRow; -import eu.dnetlib.dhp.application.ArgumentApplicationParser; -import eu.dnetlib.dhp.schema.oaf.*; -import net.sf.saxon.expr.ContextMappingFunction; -import net.sf.saxon.expr.flwor.Tuple; -import net.sf.saxon.om.Item; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.apache.spark.SparkConf; -import org.apache.spark.api.java.JavaPairRDD; -import org.apache.spark.api.java.JavaRDD; -import org.apache.spark.api.java.JavaSparkContext; -import org.apache.spark.sql.*; -import org.apache.spark.sql.Dataset; -import org.codehaus.janino.Java; -import scala.Tuple2; - -import javax.sql.DataSource; -import java.beans.Encoder; -import java.io.File; -import java.io.IOException; -import java.util.*; - -import static eu.dnetlib.dhp.PropagationConstant.*; - -public class SparkCountryPropagationJob { - - public static void main(String[] args) throws Exception { - - final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkCountryPropagationJob.class.getResourceAsStream("/eu/dnetlib/dhp/countrypropagation/input_countrypropagation_parameters.json"))); - parser.parseArgument(args); - SparkConf conf = new SparkConf(); - conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); - final SparkSession spark = SparkSession - .builder() - .appName(SparkCountryPropagationJob.class.getSimpleName()) - .master(parser.get("master")) - .config(conf) - .enableHiveSupport() - .getOrCreate(); - - - final String inputPath = parser.get("sourcePath"); - final String outputPath = "/tmp/provision/propagation/countrytoresultfrominstitutionalrepositories"; - - createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration())); - - List whitelist = Arrays.asList(parser.get("whitelist").split(";")); - List allowedtypes = Arrays.asList(parser.get("allowedtypes").split(";")); - boolean writeUpdates = TRUE.equals(parser.get("writeUpdate")); - boolean saveGraph = TRUE.equals(parser.get("saveGraph")); - -// datasource(spark, whitelist, outputPath, inputPath, "true".equals(parser.get("writeUpdate")), -// "true".equals(parser.get("saveGraph")), allowedtypes); -// -// } -// -// -// private static void datasource(SparkSession spark, List whitelist, String outputPath, String inputPath, -// boolean writeUpdates, boolean saveGraph, List allowedtypes){ - - String whitelisted = ""; - for (String i : whitelist){ - whitelisted += " OR id = '" + i + "'"; - } - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - - - Dataset datasource = spark.createDataset(sc.textFile(inputPath + "/datasource") - .map(item -> new ObjectMapper().readValue(item, Datasource.class)).rdd(), Encoders.bean(Datasource.class)); - - Dataset relation = spark.createDataset(sc.textFile(inputPath + "/relation") - .map(item -> new ObjectMapper().readValue(item, Relation.class)).rdd(), Encoders.bean(Relation.class)); - - Dataset organization = spark.createDataset(sc.textFile(inputPath + "/organization") - .map(item -> new ObjectMapper().readValue(item, Organization.class)).rdd(), Encoders.bean(Organization.class)); - - Dataset dataset = spark.createDataset(sc.textFile(inputPath + "/dataset") - .map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Dataset.class)).rdd(), - Encoders.bean(eu.dnetlib.dhp.schema.oaf.Dataset.class)); - - Dataset other = spark.createDataset(sc.textFile(inputPath + "/otherresearchproduct") - .map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.OtherResearchProduct.class)).rdd(), - Encoders.bean(eu.dnetlib.dhp.schema.oaf.OtherResearchProduct.class)); - - Dataset software = spark.createDataset(sc.textFile(inputPath + "/software") - .map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Software.class)).rdd(), - Encoders.bean(eu.dnetlib.dhp.schema.oaf.Software.class)); - - Dataset publication = spark.createDataset(sc.textFile(inputPath + "/publication") - .map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Publication.class)).rdd(), - Encoders.bean(eu.dnetlib.dhp.schema.oaf.Publication.class)); - - datasource.createOrReplaceTempView("datasource"); - relation.createOrReplaceTempView("relation"); - organization.createOrReplaceTempView("organization"); -//todo add length(country.classid)>0 - String query = "SELECT source ds, target org, country.classid country " + - "FROM ( SELECT id " + - "FROM datasource " + - "WHERE (datainfo.deletedbyinference = false " + whitelisted + ") " + - getConstraintList("datasourcetype.classid = '", allowedtypes) + - // "datasourcetype.classid = 'pubsrepository::institutional' " + - // "AND (datainfo.deletedbyinference = false " + whitelisted + ") ) d " + - "JOIN ( SELECT source, target " + - "FROM relation " + - "WHERE relclass = 'provides' " + - "AND datainfo.deletedbyinference = false ) rel " + - "ON d.id = rel.source " + - "JOIN (SELECT id, country " + - "FROM organization " + - "WHERE datainfo.deletedbyinference = false ) o " + - "ON o.id = rel.target"; - - //todo broadcast - Dataset rels = spark.sql(query); - rels.createOrReplaceTempView("rels"); - - software.createOrReplaceTempView("software"); - final JavaRDD toupdateresultsoftware = propagateOnResult(spark, "software"); - - dataset.createOrReplaceTempView("dataset"); - final JavaRDD toupdateresultdataset = propagateOnResult(spark, "dataset"); - - other.createOrReplaceTempView("other"); - final JavaRDD toupdateresultother = propagateOnResult(spark, "other"); - - publication.createOrReplaceTempView("publication"); - final JavaRDD toupdateresultpublication = propagateOnResult(spark, "publication"); - - if(writeUpdates){ - writeUpdates(toupdateresultsoftware, toupdateresultdataset, toupdateresultother, toupdateresultpublication, outputPath); - } - - if(saveGraph){ - createUpdateForSoftwareDataset(toupdateresultsoftware, inputPath, spark) - .map(s -> new ObjectMapper().writeValueAsString(s)) - .saveAsTextFile(outputPath + "/software"); - - createUpdateForDatasetDataset(toupdateresultdataset,inputPath,spark) - .map(d -> new ObjectMapper().writeValueAsString(d)) - .saveAsTextFile(outputPath + "/dataset"); - - createUpdateForOtherDataset(toupdateresultother, inputPath, spark) - .map(o -> new ObjectMapper().writeValueAsString(o)) - .saveAsTextFile(outputPath + "/otherresearchproduct"); - - createUpdateForPublicationDataset(toupdateresultpublication, inputPath, spark) - .map(p -> new ObjectMapper().writeValueAsString(p)) - .saveAsTextFile(outputPath + "/publication"); - } - - } - - private static void writeUpdates(JavaRDD software, JavaRDD dataset, JavaRDD other , JavaRDD publication, String outputPath){ - createUpdateForResultDatasetWrite(software, outputPath, "update_software"); - createUpdateForResultDatasetWrite(dataset, outputPath, "update_dataset"); - createUpdateForResultDatasetWrite(other, outputPath, "update_other"); - createUpdateForResultDatasetWrite(publication, outputPath, "update_publication"); - } - - private static JavaRDD createUpdateForOtherDataset(JavaRDD toupdateresult, String inputPath, SparkSession spark) { - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - - return sc.textFile(inputPath + "/otherresearchproduct") - .map(item -> new ObjectMapper().readValue(item, OtherResearchProduct.class)) - .mapToPair(s -> new Tuple2<>(s.getId(), s)).leftOuterJoin(getStringResultJavaPairRDD(toupdateresult)) - .map(c -> { - OtherResearchProduct oaf = c._2()._1(); - List countryList = oaf.getCountry(); - if (c._2()._2().isPresent()) { - HashSet countries = new HashSet<>(); - for (Qualifier country : countryList) { - countries.add(country.getClassid()); - } - Result r = c._2()._2().get(); - for (Country country : r.getCountry()) { - if (!countries.contains(country.getClassid())) { - countryList.add(country); - } - } - oaf.setCountry(countryList); - } - return oaf; - }); - } - - private static JavaRDD createUpdateForPublicationDataset(JavaRDD toupdateresult, String inputPath, SparkSession spark) { - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - - return sc.textFile(inputPath + "/publication") - .map(item -> new ObjectMapper().readValue(item, Publication.class)) - .mapToPair(s -> new Tuple2<>(s.getId(), s)).leftOuterJoin(getStringResultJavaPairRDD(toupdateresult)) - .map(c -> { - Publication oaf = c._2()._1(); - List countryList = oaf.getCountry(); - if (c._2()._2().isPresent()) { - HashSet countries = new HashSet<>(); - for (Qualifier country : countryList) { - countries.add(country.getClassid()); - } - Result r = c._2()._2().get(); - for (Country country : r.getCountry()) { - if (!countries.contains(country.getClassid())) { - countryList.add(country); - } - } - oaf.setCountry(countryList); - } - return oaf; - }); - } - - private static JavaRDD createUpdateForSoftwareDataset(JavaRDD toupdateresult, String inputPath, SparkSession spark) { - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - - return sc.textFile(inputPath + "/software") - .map(item -> new ObjectMapper().readValue(item, Software.class)) - .mapToPair(s -> new Tuple2<>(s.getId(), s)).leftOuterJoin(getStringResultJavaPairRDD(toupdateresult)) - .map(c -> { - Software oaf = c._2()._1(); - List countryList = oaf.getCountry(); - if (c._2()._2().isPresent()) { - HashSet countries = new HashSet<>(); - for (Qualifier country : countryList) { - countries.add(country.getClassid()); - } - Result r = c._2()._2().get(); - for (Country country : r.getCountry()) { - if (!countries.contains(country.getClassid())) { - countryList.add(country); - } - } - oaf.setCountry(countryList); - } - return oaf; - }); - } - - private static JavaRDD createUpdateForDatasetDataset(JavaRDD toupdateresult, String inputPath, SparkSession spark) { - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - - return sc.textFile(inputPath + "/dataset") - .map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Dataset.class)) - .mapToPair(d -> new Tuple2<>(d.getId(), d)).leftOuterJoin(getStringResultJavaPairRDD(toupdateresult)) - .map(c -> { - eu.dnetlib.dhp.schema.oaf.Dataset oaf = c._2()._1(); - List countryList = oaf.getCountry(); - if (c._2()._2().isPresent()) { - HashSet countries = new HashSet<>(); - for (Qualifier country : countryList) { - countries.add(country.getClassid()); - } - Result r = c._2()._2().get(); - for (Country country : r.getCountry()) { - if (!countries.contains(country.getClassid())) { - countryList.add(country); - } - } - oaf.setCountry(countryList); - } - return oaf; - }); - } - - private static JavaRDD propagateOnResult(SparkSession spark, String result_type) { - String query; - query = "SELECT id, inst.collectedfrom.key cf , inst.hostedby.key hb " + - "FROM ( SELECT id, instance " + - "FROM " + result_type + - " WHERE datainfo.deletedbyinference = false) ds " + - "LATERAL VIEW EXPLODE(instance) i AS inst"; - Dataset cfhb = spark.sql(query); - cfhb.createOrReplaceTempView("cfhb"); - - return countryPropagationAssoc(spark, "cfhb").toJavaRDD(); - - } - - private static Dataset countryPropagationAssoc(SparkSession spark, String cfhbTable){ - String query = "SELECT id, collect_set(country) country "+ - "FROM ( SELECT id, country " + - "FROM rels " + - "JOIN " + cfhbTable + - " ON cf = ds " + - "UNION ALL " + - "SELECT id , country " + - "FROM rels " + - "JOIN " + cfhbTable + - " ON hb = ds ) tmp " + - "GROUP BY id"; - return spark.sql(query); - } - - private static JavaPairRDD getStringResultJavaPairRDD(JavaRDD toupdateresult) { - return toupdateresult.map(c -> { - List countryList = new ArrayList<>(); - List tmp = c.getList(1); - for (String country : tmp) { - countryList.add(getCountry(country)); - } - Result r = new Result(); - r.setId(c.getString(0)); - r.setCountry(countryList); - return r; - }).mapToPair(r -> new Tuple2<>(r.getId(), r)); - } - - private static void createUpdateForResultDatasetWrite(JavaRDD toupdateresult, String outputPath, String type){ - toupdateresult.map(c -> { - List countryList = new ArrayList<>(); - List tmp = c.getList(1); - for (String country : tmp) { - countryList.add(getCountry(country)); - } - Result r = new Result(); - r.setId(c.getString(0)); - r.setCountry(countryList); - return r; - - }).map(r ->new ObjectMapper().writeValueAsString(r)) - .saveAsTextFile(outputPath+"/"+type); - } - - - -} \ No newline at end of file diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob.java deleted file mode 100644 index 1798aa67a..000000000 --- a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob.java +++ /dev/null @@ -1,118 +0,0 @@ -package eu.dnetlib.dhp.projecttoresult; - -import com.fasterxml.jackson.databind.ObjectMapper; -import eu.dnetlib.dhp.TypedRow; -import eu.dnetlib.dhp.application.ArgumentApplicationParser; -import eu.dnetlib.dhp.schema.oaf.*; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.io.Text; -import org.apache.spark.api.java.JavaPairRDD; -import org.apache.spark.api.java.JavaRDD; -import org.apache.spark.api.java.JavaSparkContext; -import org.apache.spark.sql.SparkSession; -import scala.Tuple2; - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; - -import static eu.dnetlib.dhp.PropagationConstant.*; -import static eu.dnetlib.dhp.PropagationConstant.toPair; - -public class SparkResultToProjectThroughSemRelJob { - public static void main(String[] args) throws Exception { - - final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkResultToProjectThroughSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/projecttoresult/input_projecttoresult_parameters.json"))); - parser.parseArgument(args); - final SparkSession spark = SparkSession - .builder() - .appName(SparkResultToProjectThroughSemRelJob.class.getSimpleName()) - .master(parser.get("master")) - .enableHiveSupport() - .getOrCreate(); - - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - final String inputPath = parser.get("sourcePath"); - final String outputPath = "/tmp/provision/propagation/projecttoresult"; - - final List allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";")); - - File directory = new File(outputPath); - - if (!directory.exists()) { - directory.mkdirs(); - } - - JavaRDD relations = sc.sequenceFile(inputPath + "/relation", Text.class, Text.class) - .map(item -> new ObjectMapper().readValue(item._2().toString(), Relation.class)).cache(); - - JavaPairRDD result_result = getResultResultSemRel(allowedsemrel, relations); - - JavaPairRDD result_project = relations - .filter(r -> !r.getDataInfo().getDeletedbyinference()) - .filter(r -> RELATION_RESULT_PROJECT_REL_CLASS.equals(r.getRelClass()) && RELATION_RESULTPROJECT_REL_TYPE.equals(r.getRelType())) - .map(r -> new TypedRow().setSourceId(r.getSource()).setTargetId(r.getTarget())) - .mapToPair(toPair()); - - //relationships from project to result. One pair for each relationship for results having allowed semantics relation with another result - JavaPairRDD project_result = result_project.join(result_result) - .map(c -> { - String projectId = c._2()._1().getTargetId(); - String resultId = c._2()._2().getTargetId(); - return new TypedRow().setSourceId(projectId).setTargetId(resultId); - }) - .mapToPair(toPair()); - - //relationships from project to result. One Pair for each project => project id list of results related to the project - JavaPairRDD project_results = relations - .filter(r -> !r.getDataInfo().getDeletedbyinference()) - .filter(r -> RELATION_PROJECT_RESULT_REL_CLASS.equals(r.getRelClass()) && RELATION_RESULTPROJECT_REL_TYPE.equals(r.getRelType())) - .map(r -> new TypedRow().setSourceId(r.getSource()).setTargetId(r.getTarget())) - .mapToPair(toPair()) - .reduceByKey((a, b) -> { - if (a == null) { - return b; - } - if (b == null) { - return a; - } - a.addAll(b.getAccumulator()); - return a; - }); - - - - JavaRDD newRels = project_result.join(project_results) - .flatMap(c -> { - String resId = c._2()._1().getTargetId(); - - if (c._2()._2().getAccumulator().contains(resId)) { - return null; - } - String progId = c._2()._1().getSourceId(); - List rels = new ArrayList(); - - rels.add(getRelation(progId, resId, RELATION_PROJECT_RESULT_REL_CLASS, - RELATION_RESULTPROJECT_REL_TYPE, RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); - rels.add(getRelation(resId, progId, RELATION_RESULT_PROJECT_REL_CLASS, - RELATION_RESULTPROJECT_REL_TYPE, RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); - return rels.iterator(); - }) - .cache(); - - newRels.map(p -> new ObjectMapper().writeValueAsString(p)) - .saveAsTextFile(outputPath + "/relation_new"); - - newRels.union(relations).map(p -> new ObjectMapper().writeValueAsString(p)) - .saveAsTextFile(outputPath + "/relation"); - - } - - - - -} diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java deleted file mode 100644 index 9dbbf140b..000000000 --- a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java +++ /dev/null @@ -1,222 +0,0 @@ -package eu.dnetlib.dhp.projecttoresult; - -import com.fasterxml.jackson.databind.ObjectMapper; -import eu.dnetlib.dhp.TypedRow; -import eu.dnetlib.dhp.application.ArgumentApplicationParser; -import eu.dnetlib.dhp.schema.oaf.Relation; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.fs.FileSystem; -import org.apache.spark.SparkConf; -import org.apache.spark.api.java.JavaPairRDD; -import org.apache.spark.api.java.JavaRDD; -import org.apache.spark.api.java.JavaSparkContext; -import org.apache.spark.sql.Dataset; -import org.apache.spark.sql.Encoders; -import org.apache.spark.sql.Row; -import org.apache.spark.sql.SparkSession; -import scala.Tuple2; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; - -import static eu.dnetlib.dhp.PropagationConstant.*; - -public class SparkResultToProjectThroughSemRelJob2 { - public static void main(String[] args) throws Exception { - - final ArgumentApplicationParser parser = new ArgumentApplicationParser( - IOUtils.toString(SparkResultToProjectThroughSemRelJob2.class - .getResourceAsStream("/eu/dnetlib/dhp/projecttoresult/input_projecttoresult_parameters.json"))); - parser.parseArgument(args); - - parser.getObjectMap().keySet().stream().forEach(k -> System.out.println(k + " = " + parser.getObjectMap().get(k))); - SparkConf conf = new SparkConf(); - conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); - final SparkSession spark = SparkSession - .builder() - .appName(SparkResultToProjectThroughSemRelJob2.class.getSimpleName()) - .master(parser.get("master")) - .config(conf) - .enableHiveSupport() - .getOrCreate(); - - - final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); - final String inputPath = parser.get("sourcePath"); - final String outputPath = "/tmp/provision/propagation/projecttoresult"; - boolean writeUpdates = "true".equals(parser.get("writeUpdate")); - boolean saveGraph = "true".equals(parser.get("saveGraph")); - - final List allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";")); - - createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration())); - - Dataset relation = spark.createDataset(sc.textFile(inputPath + "/relation") - .map(item -> new ObjectMapper().readValue(item, Relation.class)).rdd(), Encoders.bean(Relation.class)); - - relation.createOrReplaceTempView("relation"); - - String query = "Select source, target " + - "from relation " + - "where datainfo.deletedbyinference = false and relClass = '" + RELATION_RESULT_PROJECT_REL_CLASS + "'"; - - Dataset resproj_relation = spark.sql(query); - - query = "Select source, target " + - "from relation " + - "where datainfo.deletedbyinference = false " + getConstraintList(" relClass = '", allowedsemrel ); - - Dataset resres_relation = spark.sql(query); - resres_relation.createOrReplaceTempView("resres_relation"); - resproj_relation.createOrReplaceTempView("resproj_relation"); - - query ="SELECT proj, collect_set(r1target) result_set " + - "FROM (" + - " SELECT r1.source as source, r1.target as r1target, r2.target as proj " + - " FROM resres_relation r1 " + - " JOIN resproj_relation r2 " + - " ON r1.source = r2.source " + - " ) tmp " + - "GROUP BY proj "; - - Dataset toaddrelations = spark.sql(query); - - query = "select target, collect_set(source) result_list from " + - "resproj_relation " + - "group by target"; - - Dataset project_resultlist = spark.sql(query); - - //if (writeUpdaes){ - toaddrelations.toJavaRDD().map(r->new ObjectMapper().writeValueAsString(r)) - .saveAsTextFile(outputPath + "/toupdaterelations"); - //} - - if(saveGraph){ - JavaRDD new_relations = toaddrelations.toJavaRDD().mapToPair(r -> new Tuple2<>(r.getString(0), r.getList(1))) - .leftOuterJoin(project_resultlist.toJavaRDD().mapToPair(pr -> new Tuple2<>(pr.getString(0), pr.getList(1)))) - .flatMap(c -> { - List toAddRel = new ArrayList<>(); - toAddRel.addAll(c._2()._1()); - if (c._2()._2().isPresent()) { - List originalRels = c._2()._2().get(); - for (Object o : originalRels) { - if (toAddRel.contains(o)) { - toAddRel.remove(o); - } - } - } - List relationList = new ArrayList<>(); - String projId = c._1(); - for (Object r : toAddRel) { - String rId = (String) r; - relationList.add(getRelation(rId, projId, RELATION_RESULT_PROJECT_REL_CLASS, RELATION_RESULTPROJECT_REL_TYPE, - RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); - relationList.add(getRelation(projId, rId, RELATION_PROJECT_RESULT_REL_CLASS, RELATION_RESULTPROJECT_REL_TYPE, - RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, - PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); - - } - if(relationList.size()==0){ - return null; - } - return relationList.iterator(); - }).filter(r -> !(r==null)); - - - new_relations.map(r-> new ObjectMapper().writeValueAsString(r)) - .saveAsTextFile(outputPath + "/new_relations" ); - - sc.textFile(inputPath + "/relation") - .map(item -> new ObjectMapper().readValue(item, Relation.class)) - .union(new_relations) - .map(r -> new ObjectMapper().writeValueAsString(r)) - .saveAsTextFile(outputPath + "/relation"); - - } - - - //JavaPairRDD result_result = getResultResultSemRel(allowedsemrel, relations); - -// JavaPairRDD result_project = relations -// .filter(r -> !r.getDataInfo().getDeletedbyinference()) -// .filter(r -> RELATION_RESULT_PROJECT_REL_CLASS.equals(r.getRelClass()) -// && RELATION_RESULTPROJECT_REL_TYPE.equals(r.getRelType())) -// .map(rel ->{ -// -// TypedRow tr = new TypedRow(); -// tr.setSourceId(rel.getSource()); -// tr.setTargetId(rel.getTarget()); -// return tr; -// }) -// .mapToPair(toPair()); -// -// //relationships from project to result. One pair for each relationship for results having allowed semantics relation with another result -// JavaPairRDD project_result = result_project.join(result_result) -// .map(c -> { -// String projectId = c._2()._1().getTargetId(); -// String resultId = c._2()._2().getTargetId(); -// TypedRow tr = new TypedRow(); tr.setSourceId(projectId); tr.setTargetId(resultId); -// return tr; -// }) -// .mapToPair(toPair()); -// -// //relationships from project to result. One Pair for each project => project id list of results related to the project -// JavaPairRDD project_results = relations -// .filter(r -> !r.getDataInfo().getDeletedbyinference()) -// .filter(r -> RELATION_PROJECT_RESULT_REL_CLASS.equals(r.getRelClass()) && RELATION_RESULTPROJECT_REL_TYPE.equals(r.getRelType())) -// .map(r -> { -// TypedRow tr = new TypedRow(); tr.setSourceId(r.getSource()); tr.setTargetId(r.getTarget()); -// return tr; -// }) -// .mapToPair(toPair()) -// .reduceByKey((a, b) -> { -// if (a == null) { -// return b; -// } -// if (b == null) { -// return a; -// } -// a.addAll(b.getAccumulator()); -// return a; -// }); -// -// -// -// JavaRDD newRels = project_result.join(project_results) -// .flatMap(c -> { -// String resId = c._2()._1().getTargetId(); -// -// if (c._2()._2().getAccumulator().contains(resId)) { -// return null; -// } -// String progId = c._2()._1().getSourceId(); -// List rels = new ArrayList(); -// -// rels.add(getRelation(progId, resId, RELATION_PROJECT_RESULT_REL_CLASS, -// RELATION_RESULTPROJECT_REL_TYPE, RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, -// PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); -// rels.add(getRelation(resId, progId, RELATION_RESULT_PROJECT_REL_CLASS, -// RELATION_RESULTPROJECT_REL_TYPE, RELATION_RESULTPROJECT_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, -// PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_ID, PROPAGATION_RELATION_RESULT_PROJECT_SEM_REL_CLASS_NAME)); -// return rels.iterator(); -// }) -// .cache(); -// -// newRels.map(p -> new ObjectMapper().writeValueAsString(p)) -// .saveAsTextFile(outputPath + "/relation_new"); -// -// newRels.union(relations).map(p -> new ObjectMapper().writeValueAsString(p)) -// .saveAsTextFile(outputPath + "/relation"); - - } - - - - -}