[Dump Funders] new code for the dump of products related to funders

This commit is contained in:
Miriam Baglioni 2022-03-31 18:23:25 +02:00
parent 5331dea71b
commit f738acb85a
5 changed files with 118 additions and 90 deletions

View File

@ -1,17 +1,7 @@
package eu.dnetlib.dhp.oa.graph.dump.funderresults; package eu.dnetlib.dhp.oa.graph.dump.funderresults;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import eu.dnetlib.dhp.oa.graph.dump.Utils;
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Serializable; import java.io.Serializable;
import java.util.List; import java.util.List;
@ -19,7 +9,21 @@ import java.util.Objects;
import java.util.Optional; import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.ForeachFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.oa.graph.dump.Utils;
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
import eu.dnetlib.dhp.schema.dump.oaf.community.Funder;
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
/** /**
* Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC * Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC
@ -30,18 +34,18 @@ public class SparkDumpFunderResults implements Serializable {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils String jsonConfiguration = IOUtils
.toString( .toString(
SparkDumpFunderResults.class SparkDumpFunderResults.class
.getResourceAsStream( .getResourceAsStream(
"/eu/dnetlib/dhp/oa/graph/dump/funder_result_parameters.json")); "/eu/dnetlib/dhp/oa/graph/dump/funder_result_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration); final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args); parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged")) .ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf) .map(Boolean::valueOf)
.orElse(Boolean.TRUE); .orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged); log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String inputPath = parser.get("sourcePath"); final String inputPath = parser.get("sourcePath");
@ -50,36 +54,35 @@ public class SparkDumpFunderResults implements Serializable {
final String outputPath = parser.get("outputPath"); final String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath); log.info("outputPath: {}", outputPath);
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
runWithSparkSession( runWithSparkSession(
conf, conf,
isSparkSessionManaged, isSparkSessionManaged,
spark -> { spark -> {
Utils.removeOutputDir(spark, outputPath); Utils.removeOutputDir(spark, outputPath);
writeResultProjectList(spark, inputPath, outputPath); writeResultProjectList(spark, inputPath, outputPath);
}); });
} }
private static void writeResultProjectList(SparkSession spark, String inputPath, String outputPath) { private static void writeResultProjectList(SparkSession spark, String inputPath, String outputPath) {
Dataset<CommunityResult> result = Utils Dataset<CommunityResult> result = Utils
.readPath(spark, inputPath + "/publication", CommunityResult.class) .readPath(spark, inputPath + "/publication", CommunityResult.class)
.union(Utils.readPath(spark, inputPath + "/dataset", CommunityResult.class)) .union(Utils.readPath(spark, inputPath + "/dataset", CommunityResult.class))
.union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class)) .union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class))
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class)); .union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
log.info("Number of result {}", result.count());
List<String> funderList = result.flatMap((FlatMapFunction<CommunityResult, String>) cr -> Dataset<String> tmp = result
cr.getProjects().stream().map(p -> { .flatMap((FlatMapFunction<CommunityResult, String>) cr -> cr.getProjects().stream().map(p -> {
String fName = p.getFunder().getShortName(); return getFunderName(p);
if (fName.equalsIgnoreCase("ec")) {
fName += "_" + p.getFunder().getFundingStream(); }).collect(Collectors.toList()).iterator(), Encoders.STRING())
} .distinct();
return fName; tmp.foreach((ForeachFunction<String>) f -> log.info("Found Funder {}", f));
}).collect(Collectors.toList()).iterator() List<String> funderList = tmp.collectAsList();
, Encoders.STRING()).distinct().collectAsList();
funderList.forEach(funder -> { funderList.forEach(funder -> {
dumpResults(funder, result, outputPath); dumpResults(funder, result, outputPath);
@ -87,30 +90,55 @@ public class SparkDumpFunderResults implements Serializable {
} }
@NotNull
private static String getFunderName(Project p) {
Optional<Funder> ofunder = Optional.ofNullable(p.getFunder());
if (ofunder.isPresent()) {
String fName = ofunder.get().getShortName();
if (fName.equalsIgnoreCase("ec")) {
fName += "_" + ofunder.get().getFundingStream();
}
return fName;
} else {
String fName = p.getId().substring(3, p.getId().indexOf("_")).toUpperCase();
if (fName.equalsIgnoreCase("ec")) {
if (p.getId().contains("h2020")) {
fName += "_H2020";
} else {
fName += "_FP7";
}
} else if (fName.equalsIgnoreCase("conicytf")) {
fName = "CONICYT";
} else if (fName.equalsIgnoreCase("dfgf")) {
fName = "DFG";
} else if (fName.equalsIgnoreCase("tubitakf")) {
fName = "TUBITAK";
} else if (fName.equalsIgnoreCase("euenvagency")) {
fName = "EEA";
}
return fName;
}
}
private static void dumpResults(String funder, Dataset<CommunityResult> results, String outputPath) { private static void dumpResults(String funder, Dataset<CommunityResult> results, String outputPath) {
results.map((MapFunction<CommunityResult, CommunityResult>) r -> { results.map((MapFunction<CommunityResult, CommunityResult>) r -> {
if (!Optional.ofNullable(r.getProjects()).isPresent()) { if (!Optional.ofNullable(r.getProjects()).isPresent()) {
return null; return null;
} }
for (Project p : r.getProjects()) { for (Project p : r.getProjects()) {
String fName = p.getFunder().getShortName(); String fName = getFunderName(p);
if (fName.equalsIgnoreCase("ec")){ if (fName.equalsIgnoreCase(funder)) {
fName += "_" + p.getFunder().getFundingStream(); return r;
} }
if (fName.equalsIgnoreCase(funder)) { }
return r; return null;
} }, Encoders.bean(CommunityResult.class))
} .filter(Objects::nonNull)
return null; .write()
}, Encoders.bean(CommunityResult.class)) .mode(SaveMode.Overwrite)
.filter(Objects::nonNull) .option("compression", "gzip")
.write() .json(outputPath + "/" + funder);
.mode(SaveMode.Overwrite)
.option("compression", "gzip")
.json(outputPath + "/" + funder);
} }
} }

View File

@ -1,10 +1,14 @@
package eu.dnetlib.dhp.oa.graph.dump.funderresults; package eu.dnetlib.dhp.oa.graph.dump.funderresults;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import eu.dnetlib.dhp.oa.graph.dump.Utils;
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult; import java.io.Serializable;
import eu.dnetlib.dhp.schema.dump.oaf.community.Project; import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.api.java.function.FlatMapFunction;
@ -13,13 +17,10 @@ import org.apache.spark.sql.*;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.Serializable; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import java.util.List; import eu.dnetlib.dhp.oa.graph.dump.Utils;
import java.util.Objects; import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
import java.util.Optional; import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
import java.util.stream.Collectors;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
/** /**
* Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC * Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC
@ -72,16 +73,16 @@ public class SparkDumpFunderResults2 implements Serializable {
.union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class)) .union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class))
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class)); .union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
List<String> funderList = result
List<String> funderList = result.flatMap((FlatMapFunction<CommunityResult, String>) cr -> .flatMap((FlatMapFunction<CommunityResult, String>) cr -> cr.getProjects().stream().map(p -> {
cr.getProjects().stream().map(p -> { String fName = p.getFunder().getShortName();
String fName = p.getFunder().getShortName(); if (fName.equalsIgnoreCase("ec")) {
if (fName.equalsIgnoreCase("ec")) { fName += "_" + p.getFunder().getFundingStream();
fName += "_" + p.getFunder().getFundingStream(); }
} return fName;
return fName; }).collect(Collectors.toList()).iterator(), Encoders.STRING())
}).collect(Collectors.toList()).iterator() .distinct()
, Encoders.STRING()).distinct().collectAsList(); .collectAsList();
funderList.forEach(funder -> { funderList.forEach(funder -> {
@ -98,7 +99,7 @@ public class SparkDumpFunderResults2 implements Serializable {
} }
for (Project p : r.getProjects()) { for (Project p : r.getProjects()) {
String fName = p.getFunder().getShortName(); String fName = p.getFunder().getShortName();
if (fName.equalsIgnoreCase("ec")){ if (fName.equalsIgnoreCase("ec")) {
fName += "_" + p.getFunder().getFundingStream(); fName += "_" + p.getFunder().getFundingStream();
} }
if (fName.equalsIgnoreCase(funder)) { if (fName.equalsIgnoreCase(funder)) {
@ -114,6 +115,4 @@ public class SparkDumpFunderResults2 implements Serializable {
.json(outputPath + "/" + funder); .json(outputPath + "/" + funder);
} }
} }

View File

@ -298,6 +298,7 @@
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
--conf spark.sql.shuffle.partitions=3840
</spark-opts> </spark-opts>
<arg>--sourcePath</arg><arg>${sourcePath}</arg> <arg>--sourcePath</arg><arg>${sourcePath}</arg>
<arg>--outputPath</arg><arg>${workingDir}/validrelation</arg> <arg>--outputPath</arg><arg>${workingDir}/validrelation</arg>

View File

@ -136,6 +136,7 @@
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/result/publication</arg> <arg>--outputPath</arg><arg>${workingDir}/result/publication</arg>
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg> <arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
</spark> </spark>
<ok to="join_link"/> <ok to="join_link"/>
<error to="Kill"/> <error to="Kill"/>
@ -162,6 +163,7 @@
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/result/dataset</arg> <arg>--outputPath</arg><arg>${workingDir}/result/dataset</arg>
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg> <arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
</spark> </spark>
<ok to="join_link"/> <ok to="join_link"/>
<error to="Kill"/> <error to="Kill"/>
@ -188,6 +190,7 @@
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--outputPath</arg><arg>${workingDir}/result/otherresearchproduct</arg> <arg>--outputPath</arg><arg>${workingDir}/result/otherresearchproduct</arg>
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg> <arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
</spark> </spark>
<ok to="join_link"/> <ok to="join_link"/>
<error to="Kill"/> <error to="Kill"/>
@ -214,6 +217,7 @@
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--outputPath</arg><arg>${workingDir}/result/software</arg> <arg>--outputPath</arg><arg>${workingDir}/result/software</arg>
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg> <arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
</spark> </spark>
<ok to="join_link"/> <ok to="join_link"/>
<error to="Kill"/> <error to="Kill"/>

View File

@ -5,8 +5,8 @@ import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
//import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkDumpFunderResults2; // import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkDumpFunderResults2;
//import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkGetFunderList; // import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkGetFunderList;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
@ -75,7 +75,6 @@ public class SplitPerFunderTest {
.getResource("/eu/dnetlib/dhp/oa/graph/dump/funderresource/ext") .getResource("/eu/dnetlib/dhp/oa/graph/dump/funderresource/ext")
.getPath(); .getPath();
SparkDumpFunderResults.main(new String[] { SparkDumpFunderResults.main(new String[] {
"-isSparkSessionManaged", Boolean.FALSE.toString(), "-isSparkSessionManaged", Boolean.FALSE.toString(),
"-outputPath", workingDir.toString() + "/split", "-outputPath", workingDir.toString() + "/split",
@ -147,9 +146,6 @@ public class SplitPerFunderTest {
.map(item -> OBJECT_MAPPER.readValue(item, CommunityResult.class)); .map(item -> OBJECT_MAPPER.readValue(item, CommunityResult.class));
Assertions.assertEquals(1, tmp.count()); Assertions.assertEquals(1, tmp.count());
} }
} }