dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/resources/eu/dnetlib/dhp/collection/plugin/base/oozie_app/workflow.xml

55 lines
2.2 KiB
XML

<workflow-app name="Analyze_BASE_Records" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>baseInputPath</name>
<description>the path of the BASE dump</description>
</property>
<property>
<name>baseDataPath</name>
<description>the path where to store BASE records</description>
</property>
<property>
<name>baseReportsPath</name>
<description>path where to store the reports</description>
</property>
<property>
<name>baseReimportFlag</name>
<description>flag to re-import the records from dump</description>
</property>
</parameters>
<start to="analyzeBaseRecords"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="analyzeBaseRecords">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>AnalyzeBaseRecords</name>
<class>eu.dnetlib.dhp.collection.plugin.base.BaseAnalyzerJob</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=3840
</spark-opts>
<arg>--inputPath</arg><arg>${baseInputPath}</arg>
<arg>--dataPath</arg><arg>${baseDataPath}</arg>
<arg>--outputPath</arg><arg>${baseReportsPath}</arg>
<arg>--reimport</arg><arg>${baseReimportFlag}</arg>
</spark>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>