dnet-hadoop/dhp-workflows/dhp-enrichment/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/PrepareResultOrcidAssociati...

96 lines
3.1 KiB
Java
Raw Normal View History

2020-04-30 11:05:17 +02:00
2020-04-16 15:53:34 +02:00
package eu.dnetlib.dhp.orcidtoresultfromsemrel;
2020-04-23 12:35:49 +02:00
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.util.HashSet;
import java.util.Set;
2020-04-30 11:05:17 +02:00
2020-04-16 15:53:34 +02:00
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.compress.GzipCodec;
2020-04-16 15:53:34 +02:00
import org.apache.spark.SparkConf;
2022-04-12 11:26:48 +02:00
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapGroupsFunction;
import org.apache.spark.sql.*;
2020-04-16 15:53:34 +02:00
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
2020-04-30 11:05:17 +02:00
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import scala.Tuple2;
2020-04-16 15:53:34 +02:00
public class PrepareResultOrcidAssociationStep2 {
2020-04-30 11:05:17 +02:00
private static final Logger log = LoggerFactory.getLogger(PrepareResultOrcidAssociationStep2.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
PrepareResultOrcidAssociationStep2.class
.getResourceAsStream(
"/eu/dnetlib/dhp/orcidtoresultfromsemrel/input_prepareorcidtoresult_parameters2.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
Boolean isSparkSessionManaged = isSparkSessionManaged(parser);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
String inputPath = parser.get("sourcePath");
log.info("inputPath: {}", inputPath);
final String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath);
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
2022-04-13 17:46:22 +02:00
// removeOutputDir(spark, outputPath);
2020-04-30 11:05:17 +02:00
mergeInfo(spark, inputPath, outputPath);
});
}
private static void mergeInfo(SparkSession spark, String inputPath, String outputPath) {
2020-05-07 18:22:26 +02:00
Dataset<ResultOrcidList> resultOrcidAssoc = readPath(spark, inputPath + "/publication", ResultOrcidList.class)
.union(readPath(spark, inputPath + "/dataset", ResultOrcidList.class))
.union(readPath(spark, inputPath + "/otherresearchproduct", ResultOrcidList.class))
.union(readPath(spark, inputPath + "/software", ResultOrcidList.class));
2020-04-30 11:05:17 +02:00
resultOrcidAssoc
2022-04-13 11:48:03 +02:00
.groupByKey((MapFunction<ResultOrcidList, String>) rol -> rol.getResultId(), Encoders.STRING())
.mapGroups((MapGroupsFunction<String, ResultOrcidList, ResultOrcidList>) (k, it) -> {
ResultOrcidList resultOrcidList = it.next();
if (it.hasNext()) {
2020-04-30 11:05:17 +02:00
Set<String> orcid_set = new HashSet<>();
2022-04-12 11:26:48 +02:00
resultOrcidList.getAuthorList().stream().forEach(aa -> orcid_set.add(aa.getOrcid()));
2022-04-13 11:48:03 +02:00
it
.forEachRemaining(
val -> val
.getAuthorList()
.stream()
.forEach(
2022-04-12 11:26:48 +02:00
aa -> {
if (!orcid_set.contains(aa.getOrcid())) {
resultOrcidList.getAuthorList().add(aa);
orcid_set.add(aa.getOrcid());
}
}));
2022-04-13 11:48:03 +02:00
}
return resultOrcidList;
}, Encoders.bean(ResultOrcidList.class))
.write()
.mode(SaveMode.Overwrite)
.option("compression", "gzip")
.json(outputPath);
2020-04-30 11:05:17 +02:00
}
2020-04-16 15:53:34 +02:00
}