forked from D-Net/dnet-hadoop
[Dump Funders] new code for the dump of products related to funders
This commit is contained in:
parent
5331dea71b
commit
f738acb85a
|
@ -1,17 +1,7 @@
|
|||
|
||||
package eu.dnetlib.dhp.oa.graph.dump.funderresults;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
@ -19,7 +9,21 @@ import java.util.Objects;
|
|||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.api.java.function.ForeachFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.Funder;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
|
||||
|
||||
/**
|
||||
* Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC
|
||||
|
@ -50,7 +54,6 @@ public class SparkDumpFunderResults implements Serializable {
|
|||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
|
||||
runWithSparkSession(
|
||||
|
@ -70,16 +73,16 @@ public class SparkDumpFunderResults implements Serializable {
|
|||
.union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class))
|
||||
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
|
||||
|
||||
log.info("Number of result {}", result.count());
|
||||
|
||||
List<String> funderList = result.flatMap((FlatMapFunction<CommunityResult, String>) cr ->
|
||||
cr.getProjects().stream().map(p -> {
|
||||
String fName = p.getFunder().getShortName();
|
||||
if (fName.equalsIgnoreCase("ec")) {
|
||||
fName += "_" + p.getFunder().getFundingStream();
|
||||
}
|
||||
return fName;
|
||||
}).collect(Collectors.toList()).iterator()
|
||||
, Encoders.STRING()).distinct().collectAsList();
|
||||
Dataset<String> tmp = result
|
||||
.flatMap((FlatMapFunction<CommunityResult, String>) cr -> cr.getProjects().stream().map(p -> {
|
||||
return getFunderName(p);
|
||||
|
||||
}).collect(Collectors.toList()).iterator(), Encoders.STRING())
|
||||
.distinct();
|
||||
tmp.foreach((ForeachFunction<String>) f -> log.info("Found Funder {}", f));
|
||||
List<String> funderList = tmp.collectAsList();
|
||||
|
||||
funderList.forEach(funder -> {
|
||||
dumpResults(funder, result, outputPath);
|
||||
|
@ -87,6 +90,36 @@ public class SparkDumpFunderResults implements Serializable {
|
|||
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private static String getFunderName(Project p) {
|
||||
Optional<Funder> ofunder = Optional.ofNullable(p.getFunder());
|
||||
if (ofunder.isPresent()) {
|
||||
String fName = ofunder.get().getShortName();
|
||||
if (fName.equalsIgnoreCase("ec")) {
|
||||
fName += "_" + ofunder.get().getFundingStream();
|
||||
}
|
||||
return fName;
|
||||
} else {
|
||||
String fName = p.getId().substring(3, p.getId().indexOf("_")).toUpperCase();
|
||||
if (fName.equalsIgnoreCase("ec")) {
|
||||
if (p.getId().contains("h2020")) {
|
||||
fName += "_H2020";
|
||||
} else {
|
||||
fName += "_FP7";
|
||||
}
|
||||
} else if (fName.equalsIgnoreCase("conicytf")) {
|
||||
fName = "CONICYT";
|
||||
} else if (fName.equalsIgnoreCase("dfgf")) {
|
||||
fName = "DFG";
|
||||
} else if (fName.equalsIgnoreCase("tubitakf")) {
|
||||
fName = "TUBITAK";
|
||||
} else if (fName.equalsIgnoreCase("euenvagency")) {
|
||||
fName = "EEA";
|
||||
}
|
||||
return fName;
|
||||
}
|
||||
}
|
||||
|
||||
private static void dumpResults(String funder, Dataset<CommunityResult> results, String outputPath) {
|
||||
|
||||
results.map((MapFunction<CommunityResult, CommunityResult>) r -> {
|
||||
|
@ -94,10 +127,7 @@ public class SparkDumpFunderResults implements Serializable {
|
|||
return null;
|
||||
}
|
||||
for (Project p : r.getProjects()) {
|
||||
String fName = p.getFunder().getShortName();
|
||||
if (fName.equalsIgnoreCase("ec")){
|
||||
fName += "_" + p.getFunder().getFundingStream();
|
||||
}
|
||||
String fName = getFunderName(p);
|
||||
if (fName.equalsIgnoreCase(funder)) {
|
||||
return r;
|
||||
}
|
||||
|
@ -111,6 +141,4 @@ public class SparkDumpFunderResults implements Serializable {
|
|||
.json(outputPath + "/" + funder);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
|
||||
package eu.dnetlib.dhp.oa.graph.dump.funderresults;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
|
@ -13,13 +17,10 @@ import org.apache.spark.sql.*;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
|
||||
import eu.dnetlib.dhp.schema.dump.oaf.community.Project;
|
||||
|
||||
/**
|
||||
* Splits the dumped results by funder and stores them in a folder named as the funder nsp (for all the funders, but the EC
|
||||
|
@ -72,16 +73,16 @@ public class SparkDumpFunderResults2 implements Serializable {
|
|||
.union(Utils.readPath(spark, inputPath + "/otherresearchproduct", CommunityResult.class))
|
||||
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
|
||||
|
||||
|
||||
List<String> funderList = result.flatMap((FlatMapFunction<CommunityResult, String>) cr ->
|
||||
cr.getProjects().stream().map(p -> {
|
||||
List<String> funderList = result
|
||||
.flatMap((FlatMapFunction<CommunityResult, String>) cr -> cr.getProjects().stream().map(p -> {
|
||||
String fName = p.getFunder().getShortName();
|
||||
if (fName.equalsIgnoreCase("ec")) {
|
||||
fName += "_" + p.getFunder().getFundingStream();
|
||||
}
|
||||
return fName;
|
||||
}).collect(Collectors.toList()).iterator()
|
||||
, Encoders.STRING()).distinct().collectAsList();
|
||||
}).collect(Collectors.toList()).iterator(), Encoders.STRING())
|
||||
.distinct()
|
||||
.collectAsList();
|
||||
|
||||
funderList.forEach(funder -> {
|
||||
|
||||
|
@ -98,7 +99,7 @@ public class SparkDumpFunderResults2 implements Serializable {
|
|||
}
|
||||
for (Project p : r.getProjects()) {
|
||||
String fName = p.getFunder().getShortName();
|
||||
if (fName.equalsIgnoreCase("ec")){
|
||||
if (fName.equalsIgnoreCase("ec")) {
|
||||
fName += "_" + p.getFunder().getFundingStream();
|
||||
}
|
||||
if (fName.equalsIgnoreCase(funder)) {
|
||||
|
@ -114,6 +115,4 @@ public class SparkDumpFunderResults2 implements Serializable {
|
|||
.json(outputPath + "/" + funder);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -298,6 +298,7 @@
|
|||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
--conf spark.sql.shuffle.partitions=3840
|
||||
</spark-opts>
|
||||
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/validrelation</arg>
|
||||
|
|
|
@ -136,6 +136,7 @@
|
|||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/result/publication</arg>
|
||||
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
|
||||
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
|
||||
</spark>
|
||||
<ok to="join_link"/>
|
||||
<error to="Kill"/>
|
||||
|
@ -162,6 +163,7 @@
|
|||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/result/dataset</arg>
|
||||
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
|
||||
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
|
||||
</spark>
|
||||
<ok to="join_link"/>
|
||||
<error to="Kill"/>
|
||||
|
@ -188,6 +190,7 @@
|
|||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/result/otherresearchproduct</arg>
|
||||
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
|
||||
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
|
||||
</spark>
|
||||
<ok to="join_link"/>
|
||||
<error to="Kill"/>
|
||||
|
@ -214,6 +217,7 @@
|
|||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/result/software</arg>
|
||||
<arg>--graphPath</arg><arg>${workingDir}/preparedInfo</arg>
|
||||
<arg>--communityMapPath</arg><arg>${communityMapPath}</arg>
|
||||
</spark>
|
||||
<ok to="join_link"/>
|
||||
<error to="Kill"/>
|
||||
|
|
|
@ -5,8 +5,8 @@ import java.io.IOException;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
//import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkDumpFunderResults2;
|
||||
//import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkGetFunderList;
|
||||
// import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkDumpFunderResults2;
|
||||
// import eu.dnetlib.dhp.oa.graph.dump.funderresults.SparkGetFunderList;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
|
@ -75,7 +75,6 @@ public class SplitPerFunderTest {
|
|||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/funderresource/ext")
|
||||
.getPath();
|
||||
|
||||
|
||||
SparkDumpFunderResults.main(new String[] {
|
||||
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
||||
"-outputPath", workingDir.toString() + "/split",
|
||||
|
@ -147,9 +146,6 @@ public class SplitPerFunderTest {
|
|||
.map(item -> OBJECT_MAPPER.readValue(item, CommunityResult.class));
|
||||
Assertions.assertEquals(1, tmp.count());
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue