Add actionset creation for project bip indicators in workflow

This commit is contained in:
Serafeim Chatzopoulos 2023-04-26 20:40:06 +03:00
parent ee04cf92bf
commit 815a4ddbba
2 changed files with 53 additions and 35 deletions

View File

@ -41,7 +41,8 @@ import scala.Tuple2;
*/
public class SparkAtomicActionScoreJob implements Serializable {
private static final String DOI = "doi";
private static final String RESULT = "result";
private static final String PROJECT = "project";
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionScoreJob.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@ -79,10 +80,10 @@ public class SparkAtomicActionScoreJob implements Serializable {
// follow different procedures for different target entities
switch (targetEntity) {
case "result":
case RESULT:
prepareResults(spark, inputPath, outputPath);
break;
case "project":
case PROJECT:
prepareProjects(spark, inputPath, outputPath);
break;
default:

View File

@ -35,7 +35,6 @@
<delete path="${synonymFolder}"/>
</prepare>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
@ -90,9 +89,8 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
<!-- This is the name of our job -->
@ -132,7 +130,6 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
@ -181,9 +178,8 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
<!-- This is the name of our job -->
@ -235,7 +231,7 @@
<!-- Reference says: The master element indicates the url of the Spark Master. Ex: spark://host:port, mesos://host:port, yarn-cluster, yarn-master, or local. -->
<!-- <master>local[*]</master> -->
<!-- Reference says: The mode element if present indicates the mode of spark, where to run spark driver program. Ex: client,cluster. | In my case I always have a client -->
<!-- <mode>client</mode> -->
<!-- <mode>client</mode> -->
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
@ -336,12 +332,12 @@
<!-- This should give the machine/root of the hdfs -->
<name-node>${nameNode}</name-node>
<!-- Exec is needed for shell commands - points to type of shell command -->
<exec>/usr/bin/bash</exec>
<!-- name of script to run -->
<argument>get_ranking_files.sh</argument>
<!-- We only pass the directory where we expect to find the rankings -->
<argument>/${workflowDataDir}</argument>
<!-- Exec is needed for shell commands - points to type of shell command -->
<exec>/usr/bin/bash</exec>
<!-- name of script to run -->
<argument>get_ranking_files.sh</argument>
<!-- We only pass the directory where we expect to find the rankings -->
<argument>/${workflowDataDir}</argument>
<!-- the name of the file run -->
<file>${wfAppPath}/get_ranking_files.sh#get_ranking_files.sh</file>
@ -374,8 +370,8 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
<!-- This is the name of our job -->
@ -422,8 +418,8 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
<!-- This is the name of our job -->
@ -477,7 +473,6 @@
<delete path="${synonymFolder}"/>
</prepare>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
@ -520,7 +515,6 @@
<!-- This should give the machine/root of the hdfs, serafeim has provided a link with the required job properties -->
<name-node>${nameNode}</name-node>
<!-- using configs from an example on openaire -->
<master>yarn-cluster</master>
<mode>cluster</mode>
@ -564,17 +558,12 @@
<fs>
<delete path="${actionSetOutputPath}"/>
<mkdir path="${actionSetOutputPath}"/>
<!--
<delete path="${workingDir}"/>
<mkdir path="${workingDir}"/>
-->
</fs>
<ok to="createActionSet"/>
<ok to="createActionSetForResults"/>
<error to="actionset-delete-fail"/>
</action>
<action name="createActionSet">
<action name="createActionSetForResults">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
@ -593,12 +582,12 @@
</spark-opts>
<arg>--inputPath</arg><arg>${bipScorePath}</arg>
<arg>--outputPath</arg><arg>${actionSetOutputPath}</arg>
</spark>
<arg>--targetEntity</arg><arg>result</arg>
</spark>
<ok to="project-impact-indicators"/>
<error to="actionset-creation-fail"/>
</action>
<!-- PAGERANK here -->
<action name="project-impact-indicators">
<!-- This is required as a tag for spark jobs, regardless of programming language -->
<spark xmlns="uri:oozie:spark-action:0.2">
@ -645,13 +634,38 @@
</spark>
<!-- Do this after finishing okay -->
<ok to="end" />
<ok to="createActionSetForProjects" />
<!-- Go there if we have an error -->
<error to="project-impact-indicators-fail" />
</action>
<action name="createActionSetForProjects">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Produces the atomic action with the bip finder scores for projects</name>
<class>eu.dnetlib.dhp.actionmanager.bipfinder.SparkAtomicActionScoreJob</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--inputPath</arg><arg>${projectImpactIndicatorsOutput}</arg>
<arg>--outputPath</arg><arg>${actionSetOutputPath}</arg>
<arg>--targetEntity</arg><arg>project</arg>
</spark>
<ok to="end"/>
<error to="actionset-project-creation-fail"/>
</action>
<!-- TODO: end the workflow-->
<!-- Define ending node -->
@ -695,11 +709,14 @@
</kill>
<kill name="actionset-creation-fail">
<message>ActionSet creation failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
<message>ActionSet creation for results failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<kill name="project-impact-indicators-fail">
<message>Calculating project impact indicators failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<kill name="actionset-project-creation-fail">
<message>ActionSet creation for projects failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
</workflow-app>