1
0
Fork 0

workflow and parameters to exucute the dump

This commit is contained in:
Miriam Baglioni 2020-06-09 15:39:38 +02:00
parent 6bbe27587f
commit a089db18f1
5 changed files with 314 additions and 149 deletions

View File

@ -1,4 +1,11 @@
[ [
{
"paramName":"is",
"paramLongName":"isLookUpUrl",
"paramDescription": "URL of the isLookUp Service",
"paramRequired": true
},
{ {
"paramName":"s", "paramName":"s",
"paramLongName":"sourcePath", "paramLongName":"sourcePath",
@ -16,5 +23,26 @@
"paramLongName": "isSparkSessionManaged", "paramLongName": "isSparkSessionManaged",
"paramDescription": "true if the spark session is managed, false otherwise", "paramDescription": "true if the spark session is managed, false otherwise",
"paramRequired": false "paramRequired": false
},
{
"paramName":"tn",
"paramLongName":"resultTableName",
"paramDescription": "the name of the result table we are currently working on",
"paramRequired": true
},
{
"paramName":"dn",
"paramLongName":"dumpTableName",
"paramDescription": "the name of the corresondent dump element ",
"paramRequired": true
},
{
"paramName":"rt",
"paramLongName":"resultType",
"paramDescription": "the name of the corresondent dump element ",
"paramRequired": true
} }
] ]

View File

@ -2,9 +2,17 @@
<parameters> <parameters>
<property> <property>
<name>inputPath</name> <name>sourcePath</name>
<description>the source path</description> <description>the source path</description>
</property> </property>
<property>
<name>isLookUpUrl</name>
<description>the isLookup service endpoint</description>
</property>
<property>
<name>outputPath</name>
<description>the output path</description>
</property>
<property> <property>
<name>hiveDbName</name> <name>hiveDbName</name>
<description>the target hive database name</description> <description>the target hive database name</description>
@ -72,45 +80,35 @@
</configuration> </configuration>
</global> </global>
<start to="reset_DB"/> <start to="reset_outputpath"/>
<kill name="Kill"> <kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message> <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill> </kill>
<action name="reset_DB"> <action name="reset_outputpath">
<hive2 xmlns="uri:oozie:hive2-action:0.1"> <fs>
<configuration> <delete path="${outputPath}"/>
<property> <mkdir path="${outputPath}"/>
<name>hive.metastore.uris</name> </fs>
<value>${hiveMetastoreUris}</value> <ok to="fork_dump"/>
</property>
</configuration>
<jdbc-url>${hiveJdbcUrl}/${hiveDbName}</jdbc-url>
<script>lib/scripts/reset_db.sql</script>
<param>hiveDbName=${hiveDbName}</param>
</hive2>
<ok to="fork_import"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<fork name="fork_import">
<path start="import_publication"/> <fork name="fork_dump">
<path start="import_dataset"/> <path start="dump_publication"/>
<path start="import_orp"/> <path start="dump_dataset"/>
<path start="import_software"/> <path start="dump_orp"/>
<path start="import_datasource"/> <path start="dump_software"/>
<path start="import_organization"/>
<path start="import_project"/>
<path start="import_relation"/>
</fork> </fork>
<action name="import_publication"> <action name="dump_publication">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table publication</name> <name>Dump table publication for community related products</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkDumpCommunityProducts</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -122,21 +120,23 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/publication</arg> <arg>--sourcePath</arg><arg>${inputPath}/publication</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--inputType</arg><arg>publication</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--dumpTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/publication</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_dump"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_dataset"> <action name="dump_dataset">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table dataset</name> <name>Dump table dataset for community related products</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkDumpCommunityProducts</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -148,21 +148,23 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/dataset</arg> <arg>--sourcePath</arg><arg>${inputPath}/dataset</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--inputType</arg><arg>dataset</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--dumpTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/dataset</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_dump"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_orp"> <action name="dump_orp">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table otherresearchproduct</name> <name>Dump table ORP for community related products</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkDumpCommunityProducts</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -174,21 +176,23 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/otherresearchproduct</arg> <arg>--sourcePath</arg><arg>${inputPath}/otherresearchproduct</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--inputType</arg><arg>otherresearchproduct</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--dumpTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.OtherResearchProduct</arg>
<arg>--outputPath</arg><arg>${workingDir}/otherresearchproduct</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_dump"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_software"> <action name="dump_software">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table software</name> <name>Dump table software for community related products</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkDumpCommunityProducts</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -200,21 +204,25 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/software</arg> <arg>--sourcePath</arg><arg>${inputPath}/software</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--inputType</arg><arg>software</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--dumpTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Software</arg>
<arg>--outputPath</arg><arg>${workingDir}/software</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_dump"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_datasource"> <join name="join_dump" to="prepareResultProject"/>
<action name="prepareResultProject">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table datasource</name> <name>Prepare association result subset of project info</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkPrepareResultProject</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -226,21 +234,26 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/datasource</arg> <arg>--sourcePath</arg><arg>${inputPath}</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--outputPath</arg><arg>${workingDir}/preparedInfo</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Datasource</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="fork_extendWithProject"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_organization"> <fork name="fork_extendWithProject">
<path start="extend_publication"/>
<path start="extend_dataset"/>
<path start="extend_orp"/>
<path start="extend_software"/>
</fork>
<action name="extend_publication">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table organization</name> <name>Extend dumped publications with information about projects</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkUpdateProjectInfo</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -252,21 +265,21 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/organization</arg> <arg>--sourcePath</arg><arg>${workingDir}/publication</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--outputPath</arg><arg>${workingDir}/ext/publication</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Organization</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Publication</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--preparedInfoPath</arg><arg>${workingDir}/preparedInfo</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_extend"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="import_project"> <action name="extend_dataset">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table project</name> <name>Extend dumped dataset with information about projects</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkUpdateProjectInfo</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -278,21 +291,20 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/project</arg> <arg>--sourcePath</arg><arg>${workingDir}/dataset</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--outputPath</arg><arg>${workingDir}/ext/dataset</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Project</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Dataset</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--preparedInfoPath</arg><arg>${workingDir}/preparedInfo</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_extend"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="extend_orp">
<action name="import_relation">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Import table project</name> <name>Extend dumped ORP with information about projects</name>
<class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class> <class>eu.dnetlib.dhp.oa.graph.dump.SparkUpdateProjectInfo</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar> <jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -304,33 +316,182 @@
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir} --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${inputPath}/relation</arg> <arg>--sourcePath</arg><arg>${workingDir}/otherresearchproduct</arg>
<arg>--hiveDbName</arg><arg>${hiveDbName}</arg> <arg>--outputPath</arg><arg>${workingDir}/ext/otherresearchproduct</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.oaf.Relation</arg> <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.OtherResearchProduct</arg>
<arg>--hiveMetastoreUris</arg><arg>${hiveMetastoreUris}</arg> <arg>--preparedInfoPath</arg><arg>${workingDir}/preparedInfo</arg>
</spark> </spark>
<ok to="join_import"/> <ok to="join_extend"/>
<error to="Kill"/>
</action>
<action name="extend_software">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Extend dumped software with information about projects</name>
<class>eu.dnetlib.dhp.oa.graph.dump.SparkUpdateProjectInfo</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--sourcePath</arg><arg>${workingDir}/software</arg>
<arg>--outputPath</arg><arg>${workingDir}/ext/software</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Software</arg>
<arg>--preparedInfoPath</arg><arg>${workingDir}/preparedInfo</arg>
</spark>
<ok to="join_extend"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<join name="join_import" to="PostProcessing"/>
<action name="PostProcessing"> <join name="join_extend" to="fork_splitForCommunities"/>
<hive2 xmlns="uri:oozie:hive2-action:0.1">
<configuration>
<property> <fork name="fork_splitForCommunities">
<name>hive.metastore.uris</name> <path start="split_publication"/>
<value>${hiveMetastoreUris}</value> <path start="split_dataset"/>
</property> <path start="split_orp"/>
</configuration> <path start="split_software"/>
<jdbc-url>${hiveJdbcUrl}/${hiveDbName}</jdbc-url> </fork>
<script>lib/scripts/postprocessing.sql</script>
<param>hiveDbName=${hiveDbName}</param> <action name="split_publication">
</hive2> <spark xmlns="uri:oozie:spark-action:0.2">
<ok to="End"/> <master>yarn</master>
<mode>cluster</mode>
<name>Split dumped result for community</name>
<class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--sourcePath</arg><arg>${workingDir}/ext/publication</arg>
<arg>--outputPath</arg><arg>${outputPath}</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Software</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark>
<ok to="join_split"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="split_dataset">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Split dumped result for community</name>
<class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--sourcePath</arg><arg>${workingDir}/ext/dataset</arg>
<arg>--outputPath</arg><arg>${outputPath}</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Dataset</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark>
<ok to="join_split"/>
<error to="Kill"/>
</action>
<action name="split_orp">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Split dumped result for community</name>
<class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--sourcePath</arg><arg>${workingDir}/ext/orp</arg>
<arg>--outputPath</arg><arg>${outputPath}</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.OtherResearchProduct</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark>
<ok to="join_split"/>
<error to="Kill"/>
</action>
<action name="split_software">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Split dumped result for community</name>
<class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory=${sparkExecutorMemory}
--executor-cores=${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
</spark-opts>
<arg>--sourcePath</arg><arg>${workingDir}/ext/software</arg>
<arg>--outputPath</arg><arg>${outputPath}</arg>
<arg>--className</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Software</arg>
<arg>--isLookUpUrl</arg><arg>${isLoohUpUrl}</arg>
</spark>
<ok to="join_split"/>
<error to="Kill"/>
</action>
<!-- <join name="join_split" to="loadInZenodo"/>-->
<join name="join_split" to="End"/>
<!-- <action name="loadInZenodo">-->
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
<!-- <master>yarn</master>-->
<!-- <mode>cluster</mode>-->
<!-- <name>Import table software</name>-->
<!-- <class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class>-->
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
<!-- <spark-opts>-->
<!-- &#45;&#45;executor-memory=${sparkExecutorMemory}-->
<!-- &#45;&#45;executor-cores=${sparkExecutorCores}-->
<!-- &#45;&#45;driver-memory=${sparkDriverMemory}-->
<!-- &#45;&#45;conf spark.extraListeners=${spark2ExtraListeners}-->
<!-- &#45;&#45;conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
<!-- &#45;&#45;conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
<!-- &#45;&#45;conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
<!-- &#45;&#45;conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
<!-- </spark-opts>-->
<!-- <arg>&#45;&#45;inputPath</arg><arg>${workingDir}/ext/publication</arg>-->
<!-- <arg>&#45;&#45;hiveDbName</arg><arg>${hiveDbName}</arg>-->
<!-- <arg>&#45;&#45;resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Publication</arg>-->
<!-- -->
<!-- </spark>-->
<!-- <ok to="End"/>-->
<!-- <error to="Kill"/>-->
<!-- </action>-->
<end name="End"/> <end name="End"/>
</workflow-app> </workflow-app>

View File

@ -1,11 +1,6 @@
[ [
{
"paramName":"is",
"paramLongName":"isLookUpUrl",
"paramDescription": "URL of the isLookUp Service",
"paramRequired": true
},
{ {
"paramName":"s", "paramName":"s",
"paramLongName":"sourcePath", "paramLongName":"sourcePath",
@ -31,18 +26,11 @@
"paramRequired": true "paramRequired": true
}, },
{ {
"paramName":"dn", "paramName": "pip",
"paramLongName":"dumpTableName", "paramLongName": "preparedInfoPath",
"paramDescription": "the name of the corresondent dump element ", "paramDescription": "the path of the association result projectlist",
"paramRequired": true
},
{
"paramName":"rt",
"paramLongName":"resultType",
"paramDescription": "the name of the corresondent dump element ",
"paramRequired": true "paramRequired": true
} }
] ]

View File

@ -1,20 +1,20 @@
[ [
{ {
"paramName": "issm", "paramName":"s",
"paramLongName":"sourcePath",
"paramDescription": "the path of the sequencial file to read",
"paramRequired": true
},
{
"paramName": "out",
"paramLongName": "outputPath",
"paramDescription": "the path used to store temporary output files",
"paramRequired": true
},
{
"paramName": "ssm",
"paramLongName": "isSparkSessionManaged", "paramLongName": "isSparkSessionManaged",
"paramDescription": "when true will stop SparkSession after job execution", "paramDescription": "true if the spark session is managed, false otherwise",
"paramRequired": false "paramRequired": false
},
{
"paramName": "s",
"paramLongName": "sourcePath",
"paramDescription": "the source path",
"paramRequired": true
},
{
"paramName": "g",
"paramLongName": "graphRawPath",
"paramDescription": "the path of the graph Raw in hdfs",
"paramRequired": true
} }
] ]

View File

@ -29,18 +29,6 @@
"paramLongName":"resultTableName", "paramLongName":"resultTableName",
"paramDescription": "the name of the result table we are currently working on", "paramDescription": "the name of the result table we are currently working on",
"paramRequired": true "paramRequired": true
},
{
"paramName":"dn",
"paramLongName":"dumpTableName",
"paramDescription": "the name of the corresondent dump element ",
"paramRequired": true
},
{
"paramName":"rt",
"paramLongName":"resultType",
"paramDescription": "the name of the corresondent dump element ",
"paramRequired": true
} }
] ]