Merge actionsets of results and projects

This commit is contained in:
Serafeim Chatzopoulos 2023-08-11 15:56:53 +03:00
parent 3a0f09774a
commit 97c1ba8918
4 changed files with 130 additions and 178 deletions

View File

@ -6,13 +6,14 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.Serializable; import java.io.Serializable;
import java.util.List; import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.api.java.function.MapFunction;
@ -41,8 +42,6 @@ import scala.Tuple2;
*/ */
public class SparkAtomicActionScoreJob implements Serializable { public class SparkAtomicActionScoreJob implements Serializable {
private static final String RESULT = "result";
private static final String PROJECT = "project";
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionScoreJob.class); private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionScoreJob.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@ -61,15 +60,15 @@ public class SparkAtomicActionScoreJob implements Serializable {
Boolean isSparkSessionManaged = isSparkSessionManaged(parser); Boolean isSparkSessionManaged = isSparkSessionManaged(parser);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged); log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String inputPath = parser.get("inputPath"); final String resultsInputPath = parser.get("resultsInputPath");
log.info("inputPath: {}", inputPath); log.info("resultsInputPath: {}", resultsInputPath);
final String projectsInputPath = parser.get("projectsInputPath");
log.info("projectsInputPath: {}", projectsInputPath);
final String outputPath = parser.get("outputPath"); final String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath); log.info("outputPath: {}", outputPath);
final String targetEntity = parser.get("targetEntity");
log.info("targetEntity: {}", targetEntity);
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
runWithSparkSession( runWithSparkSession(
@ -78,26 +77,23 @@ public class SparkAtomicActionScoreJob implements Serializable {
spark -> { spark -> {
removeOutputDir(spark, outputPath); removeOutputDir(spark, outputPath);
// follow different procedures for different target entities JavaPairRDD<Text, Text> resultsRDD = prepareResults(spark, resultsInputPath, outputPath);
switch (targetEntity) { JavaPairRDD<Text, Text> projectsRDD = prepareProjects(spark, projectsInputPath, outputPath);
case RESULT:
prepareResults(spark, inputPath, outputPath); resultsRDD
break; .union(projectsRDD)
case PROJECT: .saveAsHadoopFile(
prepareProjects(spark, inputPath, outputPath); outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
break;
default:
throw new RuntimeException("Unknown target entity: " + targetEntity);
}
}); });
} }
private static <I extends Project> void prepareProjects(SparkSession spark, String inputPath, String outputPath) { private static <I extends Project> JavaPairRDD<Text, Text> prepareProjects(SparkSession spark, String inputPath,
String outputPath) {
// read input bip project scores // read input bip project scores
Dataset<BipProjectModel> projectScores = readPath(spark, inputPath, BipProjectModel.class); Dataset<BipProjectModel> projectScores = readPath(spark, inputPath, BipProjectModel.class);
projectScores.map((MapFunction<BipProjectModel, Project>) bipProjectScores -> { return projectScores.map((MapFunction<BipProjectModel, Project>) bipProjectScores -> {
Project project = new Project(); Project project = new Project();
project.setId(bipProjectScores.getProjectId()); project.setId(bipProjectScores.getProjectId());
project.setMeasures(bipProjectScores.toMeasures()); project.setMeasures(bipProjectScores.toMeasures());
@ -107,12 +103,12 @@ public class SparkAtomicActionScoreJob implements Serializable {
.map(p -> new AtomicAction(Project.class, p)) .map(p -> new AtomicAction(Project.class, p))
.mapToPair( .mapToPair(
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()), aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
new Text(OBJECT_MAPPER.writeValueAsString(aa)))) new Text(OBJECT_MAPPER.writeValueAsString(aa))));
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
} }
private static <I extends Result> void prepareResults(SparkSession spark, String bipScorePath, String outputPath) { private static <I extends Result> JavaPairRDD<Text, Text> prepareResults(SparkSession spark, String bipScorePath,
String outputPath) {
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext()); final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
@ -128,24 +124,20 @@ public class SparkAtomicActionScoreJob implements Serializable {
return bs; return bs;
}).collect(Collectors.toList()).iterator()).rdd(), Encoders.bean(BipScore.class)); }).collect(Collectors.toList()).iterator()).rdd(), Encoders.bean(BipScore.class));
bipScores return bipScores.map((MapFunction<BipScore, Result>) bs -> {
Result ret = new Result();
.map((MapFunction<BipScore, Result>) bs -> { ret.setId(bs.getId());
Result ret = new Result();
ret.setId(bs.getId()); ret.setMeasures(getMeasure(bs));
ret.setMeasures(getMeasure(bs)); return ret;
}, Encoders.bean(Result.class))
return ret;
}, Encoders.bean(Result.class))
.toJavaRDD() .toJavaRDD()
.map(p -> new AtomicAction(Result.class, p)) .map(p -> new AtomicAction(Result.class, p))
.mapToPair( .mapToPair(
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()), aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
new Text(OBJECT_MAPPER.writeValueAsString(aa)))) new Text(OBJECT_MAPPER.writeValueAsString(aa))));
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
} }
private static List<Measure> getMeasure(BipScore value) { private static List<Measure> getMeasure(BipScore value) {

View File

@ -6,9 +6,15 @@
"paramRequired": false "paramRequired": false
}, },
{ {
"paramName": "ip", "paramName": "rip",
"paramLongName": "inputPath", "paramLongName": "resultsInputPath",
"paramDescription": "the URL from where to get the programme file", "paramDescription": "the URL from where to get the input file for results",
"paramRequired": true
},
{
"paramName": "pip",
"paramLongName": "projectsInputPath",
"paramDescription": "the URL from where to get the input file for projects",
"paramRequired": true "paramRequired": true
}, },
{ {
@ -16,11 +22,5 @@
"paramLongName": "outputPath", "paramLongName": "outputPath",
"paramDescription": "the path of the new ActionSet", "paramDescription": "the path of the new ActionSet",
"paramRequired": true "paramRequired": true
},
{
"paramName": "te",
"paramLongName": "targetEntity",
"paramDescription": "the type of target entity to be enriched; currently supported one of { 'result', 'project' }",
"paramRequired": true
} }
] ]

View File

@ -7,6 +7,8 @@ import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import javax.xml.crypto.Data;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
@ -27,6 +29,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.schema.action.AtomicAction; import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.oaf.KeyValue; import eu.dnetlib.dhp.schema.oaf.KeyValue;
import eu.dnetlib.dhp.schema.oaf.OafEntity;
import eu.dnetlib.dhp.schema.oaf.Project; import eu.dnetlib.dhp.schema.oaf.Project;
import eu.dnetlib.dhp.schema.oaf.Result; import eu.dnetlib.dhp.schema.oaf.Result;
@ -38,9 +41,6 @@ public class SparkAtomicActionScoreJobTest {
private static Path workingDir; private static Path workingDir;
private final static String RESULT = "result";
private final static String PROJECT = "project";
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionScoreJobTest.class); private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionScoreJobTest.class);
@BeforeAll @BeforeAll
@ -72,50 +72,64 @@ public class SparkAtomicActionScoreJobTest {
spark.stop(); spark.stop();
} }
private void runJob(String inputPath, String outputPath, String targetEntity) throws Exception { private void runJob(String resultsInputPath, String projectsInputPath, String outputPath) throws Exception {
SparkAtomicActionScoreJob SparkAtomicActionScoreJob
.main( .main(
new String[] { new String[] {
"-isSparkSessionManaged", Boolean.FALSE.toString(), "-isSparkSessionManaged", Boolean.FALSE.toString(),
"-inputPath", inputPath, "-resultsInputPath", resultsInputPath,
"-projectsInputPath", projectsInputPath,
"-outputPath", outputPath, "-outputPath", outputPath,
"-targetEntity", targetEntity,
}); });
} }
@Test @Test
void testResultScores() throws Exception { void testScores() throws Exception {
final String targetEntity = RESULT;
String inputResultScores = getClass() String resultsInputPath = getClass()
.getResource("/eu/dnetlib/dhp/actionmanager/bipfinder/result_bip_scores.json") .getResource("/eu/dnetlib/dhp/actionmanager/bipfinder/result_bip_scores.json")
.getPath(); .getPath();
String outputPath = workingDir.toString() + "/" + targetEntity + "/actionSet";
String projectsInputPath = getClass()
.getResource("/eu/dnetlib/dhp/actionmanager/bipfinder/project_bip_scores.json")
.getPath();
String outputPath = workingDir.toString() + "/actionSet";
// execute the job to generate the action sets for result scores // execute the job to generate the action sets for result scores
runJob(inputResultScores, outputPath, targetEntity); runJob(resultsInputPath, projectsInputPath, outputPath);
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
JavaRDD<Result> tmp = sc JavaRDD<OafEntity> tmp = sc
.sequenceFile(outputPath, Text.class, Text.class) .sequenceFile(outputPath, Text.class, Text.class)
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class)) .map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
.map(aa -> ((Result) aa.getPayload())); .map(aa -> ((OafEntity) aa.getPayload()));
assertEquals(4, tmp.count()); assertEquals(8, tmp.count());
Dataset<Result> verificationDataset = spark.createDataset(tmp.rdd(), Encoders.bean(Result.class)); Dataset<OafEntity> verificationDataset = spark.createDataset(tmp.rdd(), Encoders.bean(OafEntity.class));
verificationDataset.createOrReplaceTempView("result"); verificationDataset.createOrReplaceTempView("result");
Dataset<Row> execVerification = spark Dataset<Row> testDataset = spark
.sql( .sql(
"Select p.id oaid, mes.id, mUnit.value from result p " + "Select p.id oaid, mes.id, mUnit.value from result p " +
"lateral view explode(measures) m as mes " + "lateral view explode(measures) m as mes " +
"lateral view explode(mes.unit) u as mUnit "); "lateral view explode(mes.unit) u as mUnit ");
Assertions.assertEquals(12, execVerification.count()); // execVerification.show();
Assertions.assertEquals(28, testDataset.count());
assertResultImpactScores(testDataset);
assertProjectImpactScores(testDataset);
}
void assertResultImpactScores(Dataset<Row> testDataset) {
Assertions Assertions
.assertEquals( .assertEquals(
"6.63451994567e-09", execVerification "6.63451994567e-09", testDataset
.filter( .filter(
"oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " + "oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " +
"and id = 'influence'") "and id = 'influence'")
@ -125,7 +139,7 @@ public class SparkAtomicActionScoreJobTest {
.getString(0)); .getString(0));
Assertions Assertions
.assertEquals( .assertEquals(
"0.348694533145", execVerification "0.348694533145", testDataset
.filter( .filter(
"oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " + "oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " +
"and id = 'popularity_alt'") "and id = 'popularity_alt'")
@ -135,7 +149,7 @@ public class SparkAtomicActionScoreJobTest {
.getString(0)); .getString(0));
Assertions Assertions
.assertEquals( .assertEquals(
"2.16094680115e-09", execVerification "2.16094680115e-09", testDataset
.filter( .filter(
"oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " + "oaid='50|arXiv_dedup_::4a2d5fd8d71daec016c176ec71d957b1' " +
"and id = 'popularity'") "and id = 'popularity'")
@ -143,65 +157,49 @@ public class SparkAtomicActionScoreJobTest {
.collectAsList() .collectAsList()
.get(0) .get(0)
.getString(0)); .getString(0));
} }
@Test void assertProjectImpactScores(Dataset<Row> testDataset) throws Exception {
void testProjectScores() throws Exception {
String targetEntity = PROJECT;
String inputResultScores = getClass()
.getResource("/eu/dnetlib/dhp/actionmanager/bipfinder/project_bip_scores.json")
.getPath();
String outputPath = workingDir.toString() + "/" + targetEntity + "/actionSet";
// execute the job to generate the action sets for project scores Assertions
runJob(inputResultScores, outputPath, PROJECT); .assertEquals(
"0", testDataset
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); .filter(
"oaid='40|nih_________::c02a8233e9b60f05bb418f0c9b714833' " +
JavaRDD<Project> projects = sc "and id = 'numOfInfluentialResults'")
.sequenceFile(outputPath, Text.class, Text.class) .select("value")
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class)) .collectAsList()
.map(aa -> ((Project) aa.getPayload())); .get(0)
.getString(0));
// test the number of projects Assertions
assertEquals(4, projects.count()); .assertEquals(
"1", testDataset
String testProjectId = "40|nih_________::c02a8233e9b60f05bb418f0c9b714833"; .filter(
"oaid='40|nih_________::c02a8233e9b60f05bb418f0c9b714833' " +
// count that the project with id testProjectId is present "and id = 'numOfPopularResults'")
assertEquals(1, projects.filter(row -> row.getId().equals(testProjectId)).count()); .select("value")
.collectAsList()
projects .get(0)
.filter(row -> row.getId().equals(testProjectId)) .getString(0));
.flatMap(r -> r.getMeasures().iterator()) Assertions
.foreach(m -> { .assertEquals(
log.info(m.getId() + " " + m.getUnit()); "25", testDataset
.filter(
// ensure that only one score is present for each bip impact measure "oaid='40|nih_________::c02a8233e9b60f05bb418f0c9b714833' " +
assertEquals(1, m.getUnit().size()); "and id = 'totalImpulse'")
.select("value")
KeyValue kv = m.getUnit().get(0); .collectAsList()
.get(0)
// ensure that the correct key is provided, i.e. score .getString(0));
assertEquals("score", kv.getKey()); Assertions
.assertEquals(
switch (m.getId()) { "43", testDataset
case "numOfInfluentialResults": .filter(
assertEquals("0", kv.getValue()); "oaid='40|nih_________::c02a8233e9b60f05bb418f0c9b714833' " +
break; "and id = 'totalCitationCount'")
case "numOfPopularResults": .select("value")
assertEquals("1", kv.getValue()); .collectAsList()
break; .get(0)
case "totalImpulse": .getString(0));
assertEquals("25", kv.getValue());
break;
case "totalCitationCount":
assertEquals("43", kv.getValue());
break;
default:
fail("Unknown measure id in the context of projects");
}
});
} }
} }

View File

@ -31,7 +31,7 @@
<!-- Aggregation of impact scores on the project level --> <!-- Aggregation of impact scores on the project level -->
<case to="project-impact-indicators">${wf:conf('resume') eq "projects-impact"}</case> <case to="project-impact-indicators">${wf:conf('resume') eq "projects-impact"}</case>
<case to="create-actionset-for-projects">${wf:conf('resume') eq "projects-impact-actionsets"}</case> <case to="create-actionset">${wf:conf('resume') eq "create-actionset"}</case>
<default to="create-openaire-ranking-graph" /> <default to="create-openaire-ranking-graph" />
</switch> </switch>
@ -455,53 +455,11 @@
<file>${wfAppPath}/map_scores_to_dois.py#map_scores_to_dois.py</file> <file>${wfAppPath}/map_scores_to_dois.py#map_scores_to_dois.py</file>
</spark> </spark>
<ok to="delete-output-path-for-actionset" /> <ok to="project-impact-indicators" />
<error to="map-scores-fail" /> <error to="map-scores-fail" />
</action> </action>
<!-- Re-create folder for result and project actionsets -->
<action name="delete-output-path-for-actionset">
<fs>
<delete path="${actionSetOutputPath}/results/"/>
<delete path="${actionSetOutputPath}/projects/"/>
<mkdir path="${actionSetOutputPath}/results/"/>
<mkdir path="${actionSetOutputPath}/projects/"/>
</fs>
<ok to="create-actionset-for-results"/>
<error to="actionset-delete-fail"/>
</action>
<action name="create-actionset-for-results">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn-cluster</master>
<mode>cluster</mode>
<name>Produces the atomic action with the bip finder scores for publications</name>
<class>eu.dnetlib.dhp.actionmanager.bipfinder.SparkAtomicActionScoreJob</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkNormalExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkNormalDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--inputPath</arg><arg>${bipScorePath}</arg>
<arg>--outputPath</arg><arg>${actionSetOutputPath}/results/</arg>
<arg>--targetEntity</arg><arg>result</arg>
</spark>
<ok to="project-impact-indicators"/>
<error to="actionset-creation-fail"/>
</action>
<action name="project-impact-indicators"> <action name="project-impact-indicators">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
@ -538,17 +496,26 @@
<file>${wfAppPath}/projects_impact.py#projects_impact.py</file> <file>${wfAppPath}/projects_impact.py#projects_impact.py</file>
</spark> </spark>
<ok to="create-actionset-for-projects" /> <ok to="delete-output-path-for-actionset" />
<error to="project-impact-indicators-fail" /> <error to="project-impact-indicators-fail" />
</action> </action>
<action name="create-actionset-for-projects"> <!-- Re-create folder for actionsets -->
<action name="delete-output-path-for-actionset">
<fs>
<delete path="${actionSetOutputPath}"/>
<mkdir path="${actionSetOutputPath}"/>
</fs>
<ok to="create-actionset"/>
<error to="actionset-delete-fail"/>
</action>
<action name="create-actionset">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn-cluster</master> <master>yarn-cluster</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Produces the atomic action with the bip finder scores for projects</name> <name>Produces the atomic action with the bip finder scores</name>
<class>eu.dnetlib.dhp.actionmanager.bipfinder.SparkAtomicActionScoreJob</class> <class>eu.dnetlib.dhp.actionmanager.bipfinder.SparkAtomicActionScoreJob</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar> <jar>dhp-aggregation-${projectVersion}.jar</jar>
@ -563,14 +530,13 @@
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${projectImpactIndicatorsOutput}</arg> <arg>--resultsInputPath</arg><arg>${bipScorePath}</arg>
<arg>--outputPath</arg><arg>${actionSetOutputPath}/projects/</arg> <arg>--projectsInputPath</arg><arg>${projectImpactIndicatorsOutput}</arg>
<arg>--targetEntity</arg><arg>project</arg> <arg>--outputPath</arg><arg>${actionSetOutputPath}</arg>
</spark> </spark>
<ok to="end"/> <ok to="end"/>
<error to="actionset-project-creation-fail"/> <error to="actionset-creation-fail"/>
</action> </action>
<!-- Definitions of failure messages --> <!-- Definitions of failure messages -->
@ -630,10 +596,6 @@
<message>Calculating project impact indicators failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message> <message>Calculating project impact indicators failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill> </kill>
<kill name="actionset-project-creation-fail">
<message>ActionSet creation for projects failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<!-- Define ending node --> <!-- Define ending node -->
<end name="end" /> <end name="end" />