forked from D-Net/dnet-hadoop
add nodes to import hdfs mdstores
This commit is contained in:
parent
03a510859a
commit
e950750262
|
@ -40,6 +40,16 @@
|
||||||
<value>false</value>
|
<value>false</value>
|
||||||
<description>should import content from the aggregator or reuse a previous version</description>
|
<description>should import content from the aggregator or reuse a previous version</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>reuseODF_hdfs</name>
|
||||||
|
<value>false</value>
|
||||||
|
<description>should import content from the aggregator or reuse a previous version</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>reuseOAF_hdfs</name>
|
||||||
|
<value>false</value>
|
||||||
|
<description>should import content from the aggregator or reuse a previous version</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>contentPath</name>
|
<name>contentPath</name>
|
||||||
<description>path location to store (or reuse) content from the aggregator</description>
|
<description>path location to store (or reuse) content from the aggregator</description>
|
||||||
|
@ -289,7 +299,7 @@
|
||||||
<decision name="reuse_oaf">
|
<decision name="reuse_oaf">
|
||||||
<switch>
|
<switch>
|
||||||
<case to="ImportOAF">${wf:conf('reuseOAF') eq false}</case>
|
<case to="ImportOAF">${wf:conf('reuseOAF') eq false}</case>
|
||||||
<case to="wait_import">${wf:conf('reuseOAF') eq true}</case>
|
<case to="reuse_odf_hdfs">${wf:conf('reuseOAF') eq true}</case>
|
||||||
<default to="ImportOAF"/>
|
<default to="ImportOAF"/>
|
||||||
</switch>
|
</switch>
|
||||||
</decision>
|
</decision>
|
||||||
|
@ -324,6 +334,74 @@
|
||||||
<arg>--mdLayout</arg><arg>store</arg>
|
<arg>--mdLayout</arg><arg>store</arg>
|
||||||
<arg>--mdInterpretation</arg><arg>intersection</arg>
|
<arg>--mdInterpretation</arg><arg>intersection</arg>
|
||||||
</java>
|
</java>
|
||||||
|
<ok to="ImportODF_hdfs"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<decision name="reuse_odf_hdfs">
|
||||||
|
<switch>
|
||||||
|
<case to="ImportODF_hdfs">${wf:conf('reuseODF_hdfs') eq false}</case>
|
||||||
|
<case to="reuse_oaf_hdfs">${wf:conf('reuseODF_hdfs') eq true}</case>
|
||||||
|
<default to="ImportODF_hdfs"/>
|
||||||
|
</switch>
|
||||||
|
</decision>
|
||||||
|
|
||||||
|
<action name="ImportODF_hdfs">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>ImportODF_hdfs</name>
|
||||||
|
<class>eu.dnetlib.dhp.oa.graph.raw.MigrateHdfsMdstoresApplication</class>
|
||||||
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory ${sparkExecutorMemory}
|
||||||
|
--executor-cores ${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--hdfsPath</arg><arg>${contentPath}/odf_records_hdfs</arg>
|
||||||
|
<arg>--mdstoreManagerUrl</arg><arg>${mdstoreManagerUrl}</arg>
|
||||||
|
<arg>--mdFormat</arg><arg>ODF</arg>
|
||||||
|
<arg>--mdLayout</arg><arg>store</arg>
|
||||||
|
<arg>--mdInterpretation</arg><arg>cleaned</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="reuse_oaf_hdfs"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<decision name="reuse_oaf_hdfs">
|
||||||
|
<switch>
|
||||||
|
<case to="ImportOAF_hdfs">${wf:conf('reuseOAF_hdfs') eq false}</case>
|
||||||
|
<case to="wait_import">${wf:conf('reuseOAF_hdfs') eq true}</case>
|
||||||
|
<default to="ImportOAF_hdfs"/>
|
||||||
|
</switch>
|
||||||
|
</decision>
|
||||||
|
|
||||||
|
<action name="ImportOAF_hdfs">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>ImportOAF_hdfs</name>
|
||||||
|
<class>eu.dnetlib.dhp.oa.graph.raw.MigrateHdfsMdstoresApplication</class>
|
||||||
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory ${sparkExecutorMemory}
|
||||||
|
--executor-cores ${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--hdfsPath</arg><arg>${contentPath}/oaf_records_hdfs</arg>
|
||||||
|
<arg>--mdstoreManagerUrl</arg><arg>${mdstoreManagerUrl}</arg>
|
||||||
|
<arg>--mdFormat</arg><arg>OAF</arg>
|
||||||
|
<arg>--mdLayout</arg><arg>store</arg>
|
||||||
|
<arg>--mdInterpretation</arg><arg>cleaned</arg>
|
||||||
|
</spark>
|
||||||
<ok to="wait_import"/>
|
<ok to="wait_import"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
@ -426,7 +504,7 @@
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--sourcePaths</arg><arg>${contentPath}/db_openaire,${contentPath}/db_openorgs,${contentPath}/oaf_records,${contentPath}/odf_records</arg>
|
<arg>--sourcePaths</arg><arg>${contentPath}/db_openaire,${contentPath}/db_openorgs,${contentPath}/oaf_records,${contentPath}/odf_records,${contentPath}/oaf_records_hdfs,${contentPath}/odf_records_hdfs</arg>
|
||||||
<arg>--targetPath</arg><arg>${workingDir}/entities</arg>
|
<arg>--targetPath</arg><arg>${workingDir}/entities</arg>
|
||||||
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
|
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
|
||||||
<arg>--shouldHashId</arg><arg>${shouldHashId}</arg>
|
<arg>--shouldHashId</arg><arg>${shouldHashId}</arg>
|
||||||
|
|
Loading…
Reference in New Issue