master #11

Manually merged
claudio.atzori merged 275 commits from :master into enrichment_wfs 2020-05-11 15:14:56 +02:00
4 changed files with 297 additions and 51 deletions
Showing only changes of commit a5e5c81a2c - Show all commits

View File

@ -11,18 +11,6 @@
"paramDescription": "the hive metastore uris", "paramDescription": "the hive metastore uris",
"paramRequired": true "paramRequired": true
}, },
{
"paramName":"wu",
"paramLongName":"writeUpdate",
"paramDescription": "true if the update must be writte. No double check if information is already present",
"paramRequired": true
},
{
"paramName":"sg",
"paramLongName":"saveGraph",
"paramDescription": "true if the new version of the graph must be saved",
"paramRequired": true
},
{ {
"paramName":"dop", "paramName":"dop",
"paramLongName":"datasourceOrganizationPath", "paramLongName":"datasourceOrganizationPath",

View File

@ -1,10 +1,4 @@
[ [
{
"paramName":"mt",
"paramLongName":"master",
"paramDescription": "should be local or yarn",
"paramRequired": true
},
{ {
"paramName":"s", "paramName":"s",
"paramLongName":"sourcePath", "paramLongName":"sourcePath",
@ -16,5 +10,53 @@
"paramLongName":"hive_metastore_uris", "paramLongName":"hive_metastore_uris",
"paramDescription": "the hive metastore uris", "paramDescription": "the hive metastore uris",
"paramRequired": true "paramRequired": true
},
{
"paramName":"wu",
"paramLongName":"writeUpdate",
"paramDescription": "true if the update must be writte. No double check if information is already present",
"paramRequired": true
},
{
"paramName":"sg",
"paramLongName":"saveGraph",
"paramDescription": "true if the new version of the graph must be saved",
"paramRequired": true
},
{
"paramName":"dop",
"paramLongName":"datasourceOrganizationPath",
"paramDescription": "path where to store/find association from datasource and organization",
"paramRequired": true
},
{
"paramName":"alp",
"paramLongName":"alreadyLinkedPath",
"paramDescription": "path where to store/find already linked results and organizations",
"paramRequired": true
},
{
"paramName": "ssm",
"paramLongName": "isSparkSessionManaged",
"paramDescription": "the path where prepared info have been stored",
"paramRequired": false
},
{
"paramName": "test",
"paramLongName": "isTest",
"paramDescription": "true if it is a test running",
"paramRequired": false
},
{
"paramName":"tn",
"paramLongName":"resultTableName",
"paramDescription": "the name of the result table we are currently working on",
"paramRequired": true
},
{
"paramName": "out",
"paramLongName": "outputPath",
"paramDescription": "the path used to store temporary output files",
"paramRequired": true
} }
] ]

View File

@ -19,4 +19,40 @@
<name>hive_metastore_uris</name> <name>hive_metastore_uris</name>
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value> <value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
</property> </property>
<property>
<name>spark2YarnHistoryServerAddress</name>
<value>http://iis-cdh5-test-gw.ocean.icm.edu.pl:18089</value>
</property>
<property>
<name>spark2EventLogDir</name>
<value>/user/spark/spark2ApplicationHistory</value>
</property>
<property>
<name>spark2ExtraListeners</name>
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
</property>
<property>
<name>spark2SqlQueryExecutionListeners</name>
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
</property>
<property>
<name>sparkExecutorNumber</name>
<value>4</value>
</property>
<property>
<name>sparkDriverMemory</name>
<value>15G</value>
</property>
<property>
<name>sparkExecutorMemory</name>
<value>6G</value>
</property>
<property>
<name>sparkExecutorCores</name>
<value>1</value>
</property>
<property>
<name>spark2MaxExecutors</name>
<value>50</value>
</property>
</configuration> </configuration>

View File

@ -5,47 +5,227 @@
<description>the source path</description> <description>the source path</description>
</property> </property>
<property> <property>
<name>sparkDriverMemory</name> <name>writeUpdate</name>
<description>memory for driver process</description> <description>writes the information found for the update. No double check done if the information is already present</description>
</property> </property>
<property> <property>
<name>sparkExecutorMemory</name> <name>saveGraph</name>
<description>memory for individual executor</description> <description>writes new version of the graph after the propagation step</description>
</property>
<property>
<name>sparkExecutorCores</name>
<description>number of cores used by single executor</description>
</property> </property>
</parameters> </parameters>
<start to="AffiliationPropagation"/> <start to="reset-outputpath"/>
<!--<start to="prepare_result_organization_association"/>-->
<kill name="Kill"> <kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message> <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill> </kill>
<action name="AffiliationPropagation"> <action name="reset-outputpath">
<spark xmlns="uri:oozie:spark-action:0.2"> <fs>
<delete path='${workingDir}/resulttoorganization_propagation/relation'/>
</fs>
<ok to="copy_relation"/>
<error to="Kill"/>
</action>
<action name="copy_relation">
<distcp xmlns="uri:oozie:distcp-action:0.2">
<job-tracker>${jobTracker}</job-tracker> <job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node> <name-node>${nameNode}</name-node>
<master>yarn-cluster</master> <arg>${nameNode}/${sourcePath}/relation</arg>
<arg>${nameNode}/${workingDir}/resulttoorganization_propagation/relation</arg>
</distcp>
<ok to="prepare_result_organization_association"/>
<error to="Kill"/>
</action>
<action name="prepare_result_organization_association">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>AffiliationPropagation</name> <name>PrepareResultOrganizationAssociation</name>
<class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob</class> <class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.PrepareResultInstRepoAssociation</class>
<jar>dhp-propagation-${projectVersion}.jar</jar> <jar>dhp-propagation-${projectVersion}.jar</jar>
<spark-opts>--executor-memory ${sparkExecutorMemory} <spark-opts>
--executor-cores ${sparkExecutorCores} --executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory} --driver-memory=${sparkDriverMemory}
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener" --conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener" --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
</spark-opts> </spark-opts>
<arg>-mt</arg> <arg>yarn-cluster</arg>
<arg>--sourcePath</arg><arg>${sourcePath}</arg> <arg>--sourcePath</arg><arg>${sourcePath}</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg> <arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>--datasourceOrganizationPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/datasourceOrganization</arg>
<arg>--alreadyLinkedPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/alreadyLinked</arg>
</spark> </spark>
<ok to="End"/> <ok to="fork_join_apply_resulttoorganization_propagation"/>
<!-- <ok to="End"/>-->
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<fork name="fork_join_apply_resulttoorganization_propagation">
<path start="join_propagation_publication"/>
<path start="join_propagation_dataset"/>
<path start="join_propagation_otherresearchproduct"/>
<path start="join_propagation_software"/>
</fork>
<action name="join_propagation_publication">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>resultToOrganizationFromInstRepoPropagationForPublications</name>
<class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob2</class>
<jar>dhp-propagation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
</spark-opts>
<arg>--sourcePath</arg><arg>${sourcePath}/publication</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>--writeUpdate</arg><arg>${writeUpdate}</arg>
<arg>--saveGraph</arg><arg>${saveGraph}</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/resulttoorganization_propagation/relation</arg>
<arg>--datasourceOrganizationPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/datasourceOrganization</arg>
<arg>--alreadyLinkedPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/alreadyLinked</arg>
</spark>
<ok to="wait"/>
<error to="Kill"/>
</action>
<action name="join_propagation_dataset">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>resultToOrganizationFromInstRepoPropagationForDataset</name>
<class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob2</class>
<jar>dhp-propagation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
</spark-opts>
<arg>--sourcePath</arg><arg>${sourcePath}/dataset</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>--writeUpdate</arg><arg>${writeUpdate}</arg>
<arg>--saveGraph</arg><arg>${saveGraph}</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/resulttoorganization_propagation/relation</arg>
<arg>--datasourceOrganizationPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/datasourceOrganization</arg>
<arg>--alreadyLinkedPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/alreadyLinked</arg>
</spark>
<ok to="wait"/>
<error to="Kill"/>
</action>
<action name="join_propagation_otherresearchproduct">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>resultToOrganizationFromInstRepoPropagationForORP</name>
<class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob2</class>
<jar>dhp-propagation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
</spark-opts>
<arg>--sourcePath</arg><arg>${sourcePath}/otherresearchproduct</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>--writeUpdate</arg><arg>${writeUpdate}</arg>
<arg>--saveGraph</arg><arg>${saveGraph}</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--outputPath</arg><arg>${workingDir}/resulttoorganization_propagation/relation</arg>
<arg>--datasourceOrganizationPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/datasourceOrganization</arg>
<arg>--alreadyLinkedPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/alreadyLinked</arg>
</spark>
<ok to="wait"/>
<error to="Kill"/>
</action>
<action name="join_propagation_software">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>resultToOrganizationFromInstRepoPropagationForSoftware</name>
<class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob2</class>
<jar>dhp-propagation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
</spark-opts>
<arg>--sourcePath</arg><arg>${sourcePath}/software</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>--writeUpdate</arg><arg>${writeUpdate}</arg>
<arg>--saveGraph</arg><arg>${saveGraph}</arg>
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--outputPath</arg><arg>${workingDir}/resulttoorganization_propagation/relation</arg>
<arg>--datasourceOrganizationPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/datasourceOrganization</arg>
<arg>--alreadyLinkedPath</arg><arg>${workingDir}/resulttoorganization_propagation/preparedInfo/alreadyLinked</arg>
</spark>
<ok to="wait"/>
<error to="Kill"/>
</action>
<join name="wait" to="End"/>
<end name="End"/>
<!-- <action name="AffiliationPropagation">-->
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
<!-- <job-tracker>${jobTracker}</job-tracker>-->
<!-- <name-node>${nameNode}</name-node>-->
<!-- <master>yarn-cluster</master>-->
<!-- <mode>cluster</mode>-->
<!-- <name>AffiliationPropagation</name>-->
<!-- <class>eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob</class>-->
<!-- <jar>dhp-propagation-${projectVersion}.jar</jar>-->
<!-- <spark-opts>-->
<!-- &#45;&#45;num-executors=${sparkExecutorNumber}-->
<!-- &#45;&#45;executor-memory=${sparkExecutorMemory}-->
<!-- &#45;&#45;executor-cores=${sparkExecutorCores}-->
<!-- &#45;&#45;driver-memory=${sparkDriverMemory}-->
<!-- &#45;&#45;conf spark.extraListeners=${spark2ExtraListeners}-->
<!-- &#45;&#45;conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
<!-- &#45;&#45;conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
<!-- &#45;&#45;conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
<!-- &#45;&#45;conf spark.dynamicAllocation.enabled=true-->
<!-- &#45;&#45;conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}-->
<!-- </spark-opts>-->
<!-- <arg>-mt</arg> <arg>yarn-cluster</arg>-->
<!-- <arg>&#45;&#45;sourcePath</arg><arg>${sourcePath}</arg>-->
<!-- <arg>&#45;&#45;hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>-->
<!-- <arg>&#45;&#45;writeUpdate</arg><arg>${writeUpdate}</arg>-->
<!-- <arg>&#45;&#45;saveGraph</arg><arg>${saveGraph}</arg>-->
<!-- </spark>-->
<!-- <ok to="End"/>-->
<!-- <error to="Kill"/>-->
<!--</action>-->
<end name="End"/>
</workflow-app> </workflow-app>