forked from D-Net/dnet-hadoop
This commit is contained in:
parent
c33a593381
commit
5e72a51f11
|
@ -4,31 +4,26 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import eu.dnetlib.dhp.TypedRow;
|
import eu.dnetlib.dhp.TypedRow;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Author;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.spark.api.java.JavaPairRDD;
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
import org.apache.spark.api.java.function.Function;
|
|
||||||
import org.apache.spark.api.java.function.PairFunction;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.PropagationConstant.*;
|
import static eu.dnetlib.dhp.PropagationConstant.*;
|
||||||
|
|
||||||
public class SparkOrcidToResultFromSemRelJob {
|
public class SparkOrcidToResultFromSemRelJob {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkOrcidToResultFromSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/orcidtoresultfromsemrel/input_orcid" +
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkOrcidToResultFromSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/orcidtoresultfromsemrel/input_orcidtoresult_parameters.json")));
|
||||||
"toresult_parameters.json")));
|
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
final SparkSession spark = SparkSession
|
final SparkSession spark = SparkSession
|
||||||
.builder()
|
.builder()
|
||||||
|
@ -42,26 +37,24 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
final String outputPath = "/tmp/provision/propagation/orcidtoresult";
|
final String outputPath = "/tmp/provision/propagation/orcidtoresult";
|
||||||
|
|
||||||
final List<String> allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";"));
|
final List<String> allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";"));
|
||||||
|
boolean writeUpdate = TRUE.equals(parser.get("writeUpdate"));
|
||||||
|
boolean saveGraph = TRUE.equals(parser.get("saveGraph"));
|
||||||
|
|
||||||
File directory = new File(outputPath);
|
createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration()));
|
||||||
|
|
||||||
if (!directory.exists()) {
|
JavaRDD<Relation> relations = sc.textFile(inputPath + "/relation")
|
||||||
directory.mkdirs();
|
.map(item -> new ObjectMapper().readValue(item, Relation.class)).cache();
|
||||||
}
|
|
||||||
|
|
||||||
JavaRDD<Relation> relations = sc.sequenceFile(inputPath + "/relation", Text.class, Text.class)
|
|
||||||
.map(item -> new ObjectMapper().readValue(item._2().toString(), Relation.class)).cache();
|
|
||||||
|
|
||||||
JavaPairRDD<String, TypedRow> result_result = getResultResultSemRel(allowedsemrel, relations);
|
JavaPairRDD<String, TypedRow> result_result = getResultResultSemRel(allowedsemrel, relations);
|
||||||
|
|
||||||
JavaRDD<Publication> publications = sc.sequenceFile(inputPath + "/publication", Text.class, Text.class)
|
JavaRDD<Publication> publications = sc.textFile(inputPath + "/publication")
|
||||||
.map(item -> new ObjectMapper().readValue(item._2().toString(), Publication.class));
|
.map(item -> new ObjectMapper().readValue(item, Publication.class));
|
||||||
JavaRDD<Dataset> datasets = sc.sequenceFile(inputPath + "/dataset", Text.class, Text.class)
|
JavaRDD<Dataset> datasets = sc.textFile(inputPath + "/dataset")
|
||||||
.map(item -> new ObjectMapper().readValue(item._2().toString(), Dataset.class));
|
.map(item -> new ObjectMapper().readValue(item, Dataset.class));
|
||||||
JavaRDD<Software> software = sc.sequenceFile(inputPath + "/software", Text.class, Text.class)
|
JavaRDD<Software> software = sc.textFile(inputPath + "/software")
|
||||||
.map(item -> new ObjectMapper().readValue(item._2().toString(), Software.class));
|
.map(item -> new ObjectMapper().readValue(item, Software.class));
|
||||||
JavaRDD<OtherResearchProduct> other = sc.sequenceFile(inputPath + "/otherresearchproduct", Text.class, Text.class)
|
JavaRDD<OtherResearchProduct> other = sc.textFile(inputPath + "/otherresearchproduct")
|
||||||
.map(item -> new ObjectMapper().readValue(item._2().toString(), OtherResearchProduct.class));
|
.map(item -> new ObjectMapper().readValue(item, OtherResearchProduct.class));
|
||||||
|
|
||||||
//get the results having at least one author pid we are interested in
|
//get the results having at least one author pid we are interested in
|
||||||
JavaPairRDD<String, TypedRow> resultswithorcid = publications.map(p -> getTypedRow(p))
|
JavaPairRDD<String, TypedRow> resultswithorcid = publications.map(p -> getTypedRow(p))
|
||||||
|
@ -87,15 +80,25 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
JavaPairRDD<String, Result> sfw = software.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
JavaPairRDD<String, Result> sfw = software.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
||||||
JavaPairRDD<String, Result> orp = other.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
JavaPairRDD<String, Result> orp = other.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
||||||
|
|
||||||
updateResult(pubs, to_add_orcid_to_result, outputPath, "publication");
|
if(writeUpdate){
|
||||||
updateResult(dss, to_add_orcid_to_result, outputPath, "dataset");
|
writeResult(pubs, to_add_orcid_to_result, outputPath, "publication");
|
||||||
updateResult(sfw, to_add_orcid_to_result, outputPath, "software");
|
writeResult(dss, to_add_orcid_to_result, outputPath, "dataset");
|
||||||
updateResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
|
writeResult(sfw, to_add_orcid_to_result, outputPath, "software");
|
||||||
|
writeResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (saveGraph){
|
||||||
|
updateResult(pubs, to_add_orcid_to_result, outputPath, "publication");
|
||||||
|
updateResult(dss, to_add_orcid_to_result, outputPath, "dataset");
|
||||||
|
updateResult(sfw, to_add_orcid_to_result, outputPath, "software");
|
||||||
|
updateResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private static Author enrichAutor(Author autoritative_author, Author author) {
|
public static eu.dnetlib.dhp.schema.oaf.Author enrichAutor(eu.dnetlib.dhp.schema.oaf.Author autoritative_author, eu.dnetlib.dhp.schema.oaf.Author author) {
|
||||||
boolean toaddpid = false;
|
boolean toaddpid = false;
|
||||||
|
|
||||||
if (StringUtils.isNoneEmpty(autoritative_author.getSurname())) {
|
if (StringUtils.isNoneEmpty(autoritative_author.getSurname())) {
|
||||||
|
@ -137,32 +140,91 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private static List<eu.dnetlib.dhp.schema.oaf.Author> enrichAutors(List<eu.dnetlib.dhp.schema.oaf.Author> autoritative_authors,
|
||||||
|
List<eu.dnetlib.dhp.schema.oaf.Author> to_enrich_authors, boolean filter){
|
||||||
|
// List<Author> autoritative_authors = p._2()._2().get().getAuthors();
|
||||||
|
// List<Author> to_enrich_authors = r.getAuthor();
|
||||||
|
|
||||||
private static void updateResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult, String outputPath, String type) {
|
return to_enrich_authors
|
||||||
|
.stream()
|
||||||
|
.map(a -> {
|
||||||
|
if (filter) {
|
||||||
|
if (containsAllowedPid(a)) {
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
List<eu.dnetlib.dhp.schema.oaf.Author> lst = autoritative_authors.stream()
|
||||||
|
.map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
|
if (lst.size() == 0) {
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
|
|
||||||
|
}).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void writeResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
||||||
|
String outputPath, String type) {
|
||||||
|
|
||||||
|
results.join(toupdateresult)
|
||||||
|
.map(p -> {
|
||||||
|
Result r = p._2()._1();
|
||||||
|
|
||||||
|
List<eu.dnetlib.dhp.schema.oaf.Author> autoritative_authors = p._2()._2().getAuthors();
|
||||||
|
List<eu.dnetlib.dhp.schema.oaf.Author> to_enrich_authors = r.getAuthor();
|
||||||
|
|
||||||
|
r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, false));
|
||||||
|
// .stream()
|
||||||
|
// .map(a -> {
|
||||||
|
// if(filter) {
|
||||||
|
// if (containsAllowedPid(a)) {
|
||||||
|
// return a;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// List<Author> lst = autoritative_authors.stream()
|
||||||
|
// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
|
// if(lst.size() == 0){
|
||||||
|
// return a;
|
||||||
|
// }
|
||||||
|
// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
|
//
|
||||||
|
// }).collect(Collectors.toList()));
|
||||||
|
|
||||||
|
return r;
|
||||||
|
})
|
||||||
|
.map(p -> new ObjectMapper().writeValueAsString(p))
|
||||||
|
.saveAsTextFile(outputPath + "/" + type + "_update");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private static void updateResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
||||||
|
String outputPath, String type) {
|
||||||
results.leftOuterJoin(toupdateresult)
|
results.leftOuterJoin(toupdateresult)
|
||||||
.map(p -> {
|
.map(p -> {
|
||||||
Result r = p._2()._1();
|
Result r = p._2()._1();
|
||||||
if (p._2()._2().isPresent()){
|
if (p._2()._2().isPresent()){
|
||||||
List<Author> autoritative_authors = p._2()._2().get().getAuthors();
|
List<eu.dnetlib.dhp.schema.oaf.Author> autoritative_authors = p._2()._2().get().getAuthors();
|
||||||
List<Author> to_enrich_authors = r.getAuthor();
|
List<eu.dnetlib.dhp.schema.oaf.Author> to_enrich_authors = r.getAuthor();
|
||||||
//.stream().filter(a -> !containsAllowedPid(a))
|
|
||||||
//.collect(Collectors.toList());
|
|
||||||
|
|
||||||
r.setAuthor(to_enrich_authors
|
r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, true));
|
||||||
.stream()
|
// .stream()
|
||||||
.map(a -> {
|
// .map(a -> {
|
||||||
if (containsAllowedPid(a)) {
|
// if(filter) {
|
||||||
return a;
|
// if (containsAllowedPid(a)) {
|
||||||
}
|
// return a;
|
||||||
|
// }
|
||||||
List<Author> lst = autoritative_authors.stream()
|
// }
|
||||||
.map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
//
|
||||||
if(lst.size() == 0){
|
// List<Author> lst = autoritative_authors.stream()
|
||||||
return a;
|
// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
}
|
// if(lst.size() == 0){
|
||||||
return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
// return a;
|
||||||
|
// }
|
||||||
}).collect(Collectors.toList()));
|
// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
|
//
|
||||||
|
// }).collect(Collectors.toList()));
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
})
|
})
|
||||||
|
@ -195,7 +257,7 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean containsAllowedPid(Author a){
|
private static boolean containsAllowedPid(eu.dnetlib.dhp.schema.oaf.Author a){
|
||||||
|
|
||||||
|
|
||||||
return (a.getPid().stream().map(pid -> {
|
return (a.getPid().stream().map(pid -> {
|
||||||
|
|
|
@ -1,40 +1,35 @@
|
||||||
package eu.dnetlib.dhp.orcidtoresultfromsemrel;
|
package eu.dnetlib.dhp.orcidtoresultfromsemrel;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import eu.dnetlib.dhp.TypedRow;
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.JavaPairRDD;
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
import org.apache.spark.api.java.function.Function;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.api.java.function.PairFunction;
|
import org.apache.spark.sql.Row;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
import java.io.File;
|
import java.util.*;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.PropagationConstant.*;
|
import static eu.dnetlib.dhp.PropagationConstant.*;
|
||||||
|
|
||||||
public class SparkOrcidToResultFromSemRelJob {
|
public class SparkOrcidToResultFromSemRelJob2 {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkOrcidToResultFromSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/orcidtoresultfromremrel/input_orcidtoresult_parameters.json")));
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkOrcidToResultFromSemRelJob2.class.getResourceAsStream("/eu/dnetlib/dhp/orcidtoresultfromsemrel/input_orcidtoresult_parameters.json")));
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
|
SparkConf conf = new SparkConf();
|
||||||
|
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
|
||||||
final SparkSession spark = SparkSession
|
final SparkSession spark = SparkSession
|
||||||
.builder()
|
.builder()
|
||||||
.appName(SparkOrcidToResultFromSemRelJob.class.getSimpleName())
|
.appName(SparkOrcidToResultFromSemRelJob2.class.getSimpleName())
|
||||||
.master(parser.get("master"))
|
.master(parser.get("master"))
|
||||||
|
.config(conf)
|
||||||
.enableHiveSupport()
|
.enableHiveSupport()
|
||||||
.getOrCreate();
|
.getOrCreate();
|
||||||
|
|
||||||
|
@ -48,63 +43,176 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
|
|
||||||
createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration()));
|
createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration()));
|
||||||
|
|
||||||
JavaRDD<Relation> relations = sc.textFile(inputPath + "/relation")
|
org.apache.spark.sql.Dataset<Relation> relation = spark.createDataset(sc.textFile(inputPath + "/relation")
|
||||||
.map(item -> new ObjectMapper().readValue(item, Relation.class)).cache();
|
.map(item -> new ObjectMapper().readValue(item, Relation.class)).rdd(), Encoders.bean(Relation.class));
|
||||||
|
|
||||||
JavaPairRDD<String, TypedRow> result_result = getResultResultSemRel(allowedsemrel, relations);
|
org.apache.spark.sql.Dataset<Dataset> dataset = spark.createDataset(sc.textFile(inputPath + "/dataset")
|
||||||
|
.map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Dataset.class)).rdd(),
|
||||||
|
Encoders.bean(eu.dnetlib.dhp.schema.oaf.Dataset.class));
|
||||||
|
|
||||||
JavaRDD<Publication> publications = sc.textFile(inputPath + "/publication")
|
org.apache.spark.sql.Dataset<OtherResearchProduct> other = spark.createDataset(sc.textFile(inputPath + "/otherresearchproduct")
|
||||||
.map(item -> new ObjectMapper().readValue(item, Publication.class));
|
.map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.OtherResearchProduct.class)).rdd(),
|
||||||
JavaRDD<Dataset> datasets = sc.textFile(inputPath + "/dataset")
|
Encoders.bean(eu.dnetlib.dhp.schema.oaf.OtherResearchProduct.class));
|
||||||
.map(item -> new ObjectMapper().readValue(item, Dataset.class));
|
|
||||||
JavaRDD<Software> software = sc.textFile(inputPath + "/software")
|
|
||||||
.map(item -> new ObjectMapper().readValue(item, Software.class));
|
|
||||||
JavaRDD<OtherResearchProduct> other = sc.textFile(inputPath + "/otherresearchproduct")
|
|
||||||
.map(item -> new ObjectMapper().readValue(item, OtherResearchProduct.class));
|
|
||||||
|
|
||||||
|
org.apache.spark.sql.Dataset<Software> software = spark.createDataset(sc.textFile(inputPath + "/software")
|
||||||
|
.map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Software.class)).rdd(),
|
||||||
|
Encoders.bean(eu.dnetlib.dhp.schema.oaf.Software.class));
|
||||||
|
|
||||||
|
org.apache.spark.sql.Dataset<Publication> publication = spark.createDataset(sc.textFile(inputPath + "/publication")
|
||||||
|
.map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Publication.class)).rdd(),
|
||||||
|
Encoders.bean(eu.dnetlib.dhp.schema.oaf.Publication.class));
|
||||||
|
|
||||||
|
|
||||||
|
relation.createOrReplaceTempView("relation");
|
||||||
|
String query = "Select source, target " +
|
||||||
|
"from relation " +
|
||||||
|
"where datainfo.deletedbyinference = false " + getConstraintList(" relclass = '" , allowedsemrel);
|
||||||
|
|
||||||
|
org.apache.spark.sql.Dataset<Row> result_result = spark.sql(query);
|
||||||
|
|
||||||
|
publication.createOrReplaceTempView("publication");
|
||||||
|
org.apache.spark.sql.Dataset<ResultOrcidList> pubs_with_orcid = getResultWithOrcid("publication", spark)
|
||||||
|
.as(Encoders.bean(ResultOrcidList.class));
|
||||||
|
|
||||||
|
dataset.createOrReplaceTempView("dataset");
|
||||||
|
org.apache.spark.sql.Dataset<ResultOrcidList> dats_with_orcid = getResultWithOrcid("dataset", spark)
|
||||||
|
.as(Encoders.bean(ResultOrcidList.class));
|
||||||
|
|
||||||
|
other.createOrReplaceTempView("orp");
|
||||||
|
org.apache.spark.sql.Dataset<ResultOrcidList> orp_with_orcid = getResultWithOrcid("orp", spark)
|
||||||
|
.as(Encoders.bean(ResultOrcidList.class));
|
||||||
|
|
||||||
|
dataset.createOrReplaceTempView("software");
|
||||||
|
org.apache.spark.sql.Dataset<ResultOrcidList> software_with_orcid = getResultWithOrcid("software", spark)
|
||||||
|
.as(Encoders.bean(ResultOrcidList.class));
|
||||||
//get the results having at least one author pid we are interested in
|
//get the results having at least one author pid we are interested in
|
||||||
JavaPairRDD<String, TypedRow> resultswithorcid = publications.map(p -> getTypedRow(p))
|
|
||||||
.filter(p -> !(p == null))
|
|
||||||
.mapToPair(toPair())
|
|
||||||
.union(datasets.map(p -> getTypedRow(p))
|
|
||||||
.filter(p -> !(p == null))
|
|
||||||
.mapToPair(toPair()))
|
|
||||||
.union(software.map(p -> getTypedRow(p))
|
|
||||||
.filter(p -> !(p == null))
|
|
||||||
.mapToPair(toPair()))
|
|
||||||
.union(other.map(p -> getTypedRow(p))
|
|
||||||
.filter(p -> !(p == null))
|
|
||||||
.mapToPair(toPair()));
|
|
||||||
|
|
||||||
|
//target of the relation from at least one source with orcid.
|
||||||
|
//the set of authors contains all those that have orcid and are related to target
|
||||||
|
//from any source with allowed semantic relationship
|
||||||
|
JavaPairRDD<String, List<AutoritativeAuthor>> target_authorlist_from_pubs = getTargetAutoritativeAuthorList(pubs_with_orcid);
|
||||||
|
|
||||||
JavaPairRDD<String, TypedRow> to_add_orcid_to_result = resultswithorcid.join(result_result)
|
JavaPairRDD<String, List<AutoritativeAuthor>> target_authorlist_from_dats = getTargetAutoritativeAuthorList(dats_with_orcid);
|
||||||
.map(p -> p._2()._1().setSourceId(p._2()._2().getTargetId())) //associate the pid of the result (target) which should get the orcid to the typed row containing the authors with the orcid from the result(source)
|
|
||||||
.mapToPair(toPair());
|
|
||||||
|
|
||||||
JavaPairRDD<String, Result> pubs = publications.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
JavaPairRDD<String, List<AutoritativeAuthor>> target_authorlist_from_orp = getTargetAutoritativeAuthorList(orp_with_orcid);
|
||||||
JavaPairRDD<String, Result> dss = datasets.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
|
||||||
JavaPairRDD<String, Result> sfw = software.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
JavaPairRDD<String, List<AutoritativeAuthor>> target_authorlist_from_sw = getTargetAutoritativeAuthorList(software_with_orcid);
|
||||||
JavaPairRDD<String, Result> orp = other.mapToPair(p -> new Tuple2<>(p.getId(),p));
|
|
||||||
|
|
||||||
if(writeUpdate){
|
if(writeUpdate){
|
||||||
writeResult(pubs, to_add_orcid_to_result, outputPath, "publication");
|
target_authorlist_from_dats.map(r -> new ObjectMapper().writeValueAsString(r))
|
||||||
writeResult(dss, to_add_orcid_to_result, outputPath, "dataset");
|
.saveAsTextFile(outputPath + "/" + "update_dats");
|
||||||
writeResult(sfw, to_add_orcid_to_result, outputPath, "software");
|
target_authorlist_from_pubs.map(r -> new ObjectMapper().writeValueAsString(r))
|
||||||
writeResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
|
.saveAsTextFile(outputPath + "/" + "update_pubs");
|
||||||
|
target_authorlist_from_orp.map(r -> new ObjectMapper().writeValueAsString(r))
|
||||||
|
.saveAsTextFile(outputPath + "/" + "update_orp");
|
||||||
|
target_authorlist_from_sw.map(r -> new ObjectMapper().writeValueAsString(r))
|
||||||
|
.saveAsTextFile(outputPath + "/" + "update_sw");
|
||||||
|
}
|
||||||
|
|
||||||
|
if(saveGraph){
|
||||||
|
sc.textFile(inputPath + "/publication")
|
||||||
|
.map(item -> new ObjectMapper().readValue(item, eu.dnetlib.dhp.schema.oaf.Publication.class))
|
||||||
|
.mapToPair(p -> new Tuple2<>(p.getId(),p))
|
||||||
|
.leftOuterJoin(target_authorlist_from_pubs)
|
||||||
|
.map(c -> {
|
||||||
|
Result r = c._2()._1();
|
||||||
|
if(!c._2()._2().isPresent()){
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
List<eu.dnetlib.dhp.schema.oaf.Author> toenrich_author = r.getAuthor();
|
||||||
|
List<AutoritativeAuthor> autoritativeAuthors = c._2()._2().get();
|
||||||
|
for(eu.dnetlib.dhp.schema.oaf.Author author: toenrich_author){
|
||||||
|
if (!containsAllowedPid(author)){
|
||||||
|
enrichAuthor(author, autoritativeAuthors);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (saveGraph){
|
}
|
||||||
updateResult(pubs, to_add_orcid_to_result, outputPath, "publication");
|
|
||||||
updateResult(dss, to_add_orcid_to_result, outputPath, "dataset");
|
private static void enrichAuthor(eu.dnetlib.dhp.schema.oaf.Author a, List<AutoritativeAuthor> au){
|
||||||
updateResult(sfw, to_add_orcid_to_result, outputPath, "software");
|
for (AutoritativeAuthor aa: au){
|
||||||
updateResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
|
if(enrichAuthor(aa, a)){
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// private static JavaPairRDD<String, List<AutoritativeAuthor>> getTargetAutoritativeAuthorList(org.apache.spark.sql.Dataset<Row> result_result, org.apache.spark.sql.Dataset<ResultOrcidList> pubs_with_orcid) {
|
||||||
|
// return pubs_with_orcid
|
||||||
|
// .toJavaRDD()
|
||||||
|
// .mapToPair(p -> new Tuple2<>(p.getId(), p.getAuthorList()))
|
||||||
|
// .join(result_result.toJavaRDD().mapToPair(rel -> new Tuple2<>(rel.getString(0), rel.getString(1))))
|
||||||
|
// .mapToPair(c -> new Tuple2<>(c._2._2(), c._2()._1()))
|
||||||
|
// .reduceByKey((a, b) -> {
|
||||||
|
// if(a == null){
|
||||||
|
// return b;
|
||||||
|
// }
|
||||||
|
// if(b==null){
|
||||||
|
// return a;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Set<String> authSet = new HashSet<>();
|
||||||
|
// a.stream().forEach(au -> authSet.add(au.getOrcid()));
|
||||||
|
//
|
||||||
|
// b.stream().forEach(au -> {
|
||||||
|
// if (!authSet.contains(au.getOrcid())) {
|
||||||
|
// a.add(au);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// );
|
||||||
|
// return a;
|
||||||
|
// });
|
||||||
|
// }
|
||||||
|
private static JavaPairRDD<String, List<AutoritativeAuthor>> getTargetAutoritativeAuthorList( org.apache.spark.sql.Dataset<ResultOrcidList> pubs_with_orcid) {
|
||||||
|
return pubs_with_orcid
|
||||||
|
.toJavaRDD()
|
||||||
|
.mapToPair(p -> new Tuple2<>(p.getResultId(), p.getAuthorList()))
|
||||||
|
.reduceByKey((a, b) -> {
|
||||||
|
if(a == null){
|
||||||
|
return b;
|
||||||
|
}
|
||||||
|
if(b==null){
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
Set<String> authSet = new HashSet<>();
|
||||||
|
a.stream().forEach(au -> authSet.add(au.getOrcid()));
|
||||||
|
|
||||||
private static Author enrichAutor(Author autoritative_author, Author author) {
|
b.stream().forEach(au -> {
|
||||||
|
if (!authSet.contains(au.getOrcid())) {
|
||||||
|
a.add(au);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
return a;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private static org.apache.spark.sql.Dataset<Row> getResultWithOrcid(String table, SparkSession spark){
|
||||||
|
String query = " select target, author " +
|
||||||
|
" from (select id, collect_set(named_struct('name', name, 'surname', surname, 'fullname', fullname, 'orcid', orcid)) author " +
|
||||||
|
" from ( " +
|
||||||
|
" select id, MyT.fullname, MyT.name, MyT.surname, MyP.value orcid " +
|
||||||
|
" from " + table +
|
||||||
|
" lateral view explode (author) a as MyT " +
|
||||||
|
" lateral view explode (MyT.pid) p as MyP " +
|
||||||
|
" where MyP.qualifier.classid = 'ORCID') tmp " +
|
||||||
|
" group by id) r_t " +
|
||||||
|
" join (" +
|
||||||
|
" select source, target " +
|
||||||
|
" from relation " +
|
||||||
|
" where datainfo.deletedbyinference = false and (relclass = 'isSupplementedBy' or relclass = 'isSupplementTo') rel_rel " +
|
||||||
|
" on source = id";
|
||||||
|
|
||||||
|
return spark.sql(query);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private static boolean enrichAuthor(AutoritativeAuthor autoritative_author, eu.dnetlib.dhp.schema.oaf.Author author) {
|
||||||
boolean toaddpid = false;
|
boolean toaddpid = false;
|
||||||
|
|
||||||
if (StringUtils.isNoneEmpty(autoritative_author.getSurname())) {
|
if (StringUtils.isNoneEmpty(autoritative_author.getSurname())) {
|
||||||
|
@ -128,149 +236,122 @@ public class SparkOrcidToResultFromSemRelJob {
|
||||||
}
|
}
|
||||||
if (toaddpid){
|
if (toaddpid){
|
||||||
StructuredProperty pid = new StructuredProperty();
|
StructuredProperty pid = new StructuredProperty();
|
||||||
for(StructuredProperty sp : autoritative_author.getPid()){
|
String aa_pid = autoritative_author.getOrcid();
|
||||||
if (PROPAGATION_AUTHOR_PID.equals(sp.getQualifier().getClassid())){
|
pid.setValue(aa_pid);
|
||||||
pid.setValue(sp.getValue());
|
pid.setQualifier(getQualifier(PROPAGATION_AUTHOR_PID, PROPAGATION_AUTHOR_PID ));
|
||||||
pid.setQualifier(getQualifier(sp.getQualifier().getClassid(),sp.getQualifier().getClassname() ));
|
pid.setDataInfo(getDataInfo(PROPAGATION_DATA_INFO_TYPE, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_ID, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_NAME));
|
||||||
pid.setDataInfo(getDataInfo(PROPAGATION_DATA_INFO_TYPE, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_ID, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_NAME));
|
if(author.getPid() == null){
|
||||||
if(author.getPid() == null){
|
author.setPid(Arrays.asList(pid));
|
||||||
author.setPid(Arrays.asList(pid));
|
}else{
|
||||||
}else{
|
author.getPid().add(pid);
|
||||||
author.getPid().add(pid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return author;
|
|
||||||
}
|
}
|
||||||
return null;
|
return toaddpid;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private static List<Author> enrichAutors(List<Author> autoritative_authors, List<Author> to_enrich_authors, boolean filter){
|
// private static List<Author> enrichAuthors(List<Author> autoritative_authors, List<Author> to_enrich_authors, boolean filter){
|
||||||
// List<Author> autoritative_authors = p._2()._2().get().getAuthors();
|
//// List<Author> autoritative_authors = p._2()._2().get().getAuthors();
|
||||||
// List<Author> to_enrich_authors = r.getAuthor();
|
//// List<Author> to_enrich_authors = r.getAuthor();
|
||||||
|
|
||||||
return to_enrich_authors
|
|
||||||
.stream()
|
|
||||||
.map(a -> {
|
|
||||||
if (filter) {
|
|
||||||
if (containsAllowedPid(a)) {
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
List<Author> lst = autoritative_authors.stream()
|
|
||||||
.map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
|
||||||
if (lst.size() == 0) {
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
|
||||||
|
|
||||||
}).collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void writeResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
|
||||||
String outputPath, String type) {
|
|
||||||
|
|
||||||
results.join(toupdateresult)
|
|
||||||
.map(p -> {
|
|
||||||
Result r = p._2()._1();
|
|
||||||
|
|
||||||
List<Author> autoritative_authors = p._2()._2().getAuthors();
|
|
||||||
List<Author> to_enrich_authors = r.getAuthor();
|
|
||||||
|
|
||||||
r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, false));
|
|
||||||
// .stream()
|
|
||||||
// .map(a -> {
|
|
||||||
// if(filter) {
|
|
||||||
// if (containsAllowedPid(a)) {
|
|
||||||
// return a;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// List<Author> lst = autoritative_authors.stream()
|
// return to_enrich_authors
|
||||||
// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
// .stream()
|
||||||
// if(lst.size() == 0){
|
// .map(a -> {
|
||||||
// return a;
|
// if (filter) {
|
||||||
// }
|
// if (containsAllowedPid(a)) {
|
||||||
// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
// return a;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
//
|
//
|
||||||
// }).collect(Collectors.toList()));
|
// List<Author> lst = autoritative_authors.stream()
|
||||||
|
// .map(aa -> enrichAuthor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
return r;
|
// if (lst.size() == 0) {
|
||||||
})
|
// return a;
|
||||||
.map(p -> new ObjectMapper().writeValueAsString(p))
|
// }
|
||||||
.saveAsTextFile(outputPath + "/" + type + "_update");
|
// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private static void updateResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
|
||||||
String outputPath, String type) {
|
|
||||||
results.leftOuterJoin(toupdateresult)
|
|
||||||
.map(p -> {
|
|
||||||
Result r = p._2()._1();
|
|
||||||
if (p._2()._2().isPresent()){
|
|
||||||
List<Author> autoritative_authors = p._2()._2().get().getAuthors();
|
|
||||||
List<Author> to_enrich_authors = r.getAuthor();
|
|
||||||
|
|
||||||
r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, true));
|
|
||||||
// .stream()
|
|
||||||
// .map(a -> {
|
|
||||||
// if(filter) {
|
|
||||||
// if (containsAllowedPid(a)) {
|
|
||||||
// return a;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// List<Author> lst = autoritative_authors.stream()
|
// }).collect(Collectors.toList());
|
||||||
// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
// }
|
||||||
// if(lst.size() == 0){
|
|
||||||
// return a;
|
|
||||||
// }
|
|
||||||
// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
|
||||||
//
|
//
|
||||||
// }).collect(Collectors.toList()));
|
// private static void writeResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
||||||
}
|
// String outputPath, String type) {
|
||||||
return r;
|
//
|
||||||
})
|
// results.join(toupdateresult)
|
||||||
.map(p -> new ObjectMapper().writeValueAsString(p))
|
// .map(p -> {
|
||||||
.saveAsTextFile(outputPath+"/"+type);
|
// Result r = p._2()._1();
|
||||||
}
|
//
|
||||||
|
// List<Author> autoritative_authors = p._2()._2().getAuthors();
|
||||||
private static TypedRow getTypedRow(Result p) {
|
// List<eu.dnetlib.dhp.schema.oaf.Author> to_enrich_authors = r.getAuthor();
|
||||||
TypedRow tp = new TypedRow();
|
//
|
||||||
tp.setSourceId(p.getId());
|
// r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, false));
|
||||||
List<Author> authorList = p.getAuthor()
|
//// .stream()
|
||||||
.stream()
|
//// .map(a -> {
|
||||||
.map(a -> {
|
//// if(filter) {
|
||||||
if (a.getPid().stream().map(pid -> {
|
//// if (containsAllowedPid(a)) {
|
||||||
if (PROPAGATION_AUTHOR_PID.equals(pid.getQualifier().getClassid())) {
|
//// return a;
|
||||||
return a;
|
//// }
|
||||||
}
|
//// }
|
||||||
return null;
|
////
|
||||||
}).filter(aut -> !(aut == null)).collect(Collectors.toList()).size() > 0){
|
//// List<Author> lst = autoritative_authors.stream()
|
||||||
return a;
|
//// .map(aa -> enrichAuthor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
}
|
//// if(lst.size() == 0){
|
||||||
return null;
|
//// return a;
|
||||||
}).filter(a -> !(a == null)).collect(Collectors.toList());
|
//// }
|
||||||
tp.setAuthors(authorList);
|
//// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
if(authorList.size() > 0){
|
////
|
||||||
return tp;
|
//// }).collect(Collectors.toList()));
|
||||||
}
|
//
|
||||||
return null;
|
// return r;
|
||||||
|
// })
|
||||||
|
// .map(p -> new ObjectMapper().writeValueAsString(p))
|
||||||
|
// .saveAsTextFile(outputPath + "/" + type + "_update");
|
||||||
|
// }
|
||||||
|
|
||||||
|
|
||||||
}
|
// private static void updateResult(JavaPairRDD<String, Result> results, JavaPairRDD<String, TypedRow> toupdateresult,
|
||||||
|
// String outputPath, String type) {
|
||||||
|
// results.leftOuterJoin(toupdateresult)
|
||||||
|
// .map(p -> {
|
||||||
|
// Result r = p._2()._1();
|
||||||
|
// if (p._2()._2().isPresent()){
|
||||||
|
// List<AutoritativeAuthor> autoritative_authors = p._2()._2().get().getAuthors();
|
||||||
|
// List<eu.dnetlib.dhp.schema.oaf.Author> to_enrich_authors = r.getAuthor();
|
||||||
|
//
|
||||||
|
// r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, true));
|
||||||
|
//// .stream()
|
||||||
|
//// .map(a -> {
|
||||||
|
//// if(filter) {
|
||||||
|
//// if (containsAllowedPid(a)) {
|
||||||
|
//// return a;
|
||||||
|
//// }
|
||||||
|
//// }
|
||||||
|
////
|
||||||
|
//// List<Author> lst = autoritative_authors.stream()
|
||||||
|
//// .map(aa -> enrichAuthor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
|
||||||
|
//// if(lst.size() == 0){
|
||||||
|
//// return a;
|
||||||
|
//// }
|
||||||
|
//// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
|
||||||
|
////
|
||||||
|
//// }).collect(Collectors.toList()));
|
||||||
|
// }
|
||||||
|
// return r;
|
||||||
|
// })
|
||||||
|
// .map(p -> new ObjectMapper().writeValueAsString(p))
|
||||||
|
// .saveAsTextFile(outputPath+"/"+type);
|
||||||
|
// }
|
||||||
|
|
||||||
private static boolean containsAllowedPid(Author a){
|
|
||||||
|
|
||||||
|
private static boolean containsAllowedPid(eu.dnetlib.dhp.schema.oaf.Author a) {
|
||||||
return (a.getPid().stream().map(pid -> {
|
for (StructuredProperty pid : a.getPid()) {
|
||||||
if (PROPAGATION_AUTHOR_PID.equals(pid.getQualifier().getClassid())) {
|
if (PROPAGATION_AUTHOR_PID.equals(pid.getQualifier().getClassid())) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
}
|
||||||
}).filter(aut -> (aut == true)).collect(Collectors.toList()).size()) > 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue