first try in adding orcid emend after cleaning step

This commit is contained in:
Miriam Baglioni 2020-11-27 17:28:29 +01:00
parent 3d66a7c0d6
commit 44a66ad8a6
3 changed files with 428 additions and 4 deletions

View File

@ -13,6 +13,14 @@
<name>isLookupUrl</name>
<description>the address of the lookUp service</description>
</property>
<property>
<name>orcidInputPath</name>
<description>the path where to find the orcid sequence file</description>
</property>
<property>
<name>emend</name>
<description>true if the cleaning of the ORCID should be executed</description>
</property>
<property>
<name>sparkDriverMemory</name>
@ -109,7 +117,7 @@
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/grouped_entities</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/cleaned/publication</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
</spark>
@ -135,7 +143,7 @@
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/grouped_entities</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/cleaned/dataset</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
</spark>
@ -161,7 +169,7 @@
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/grouped_entities</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/otherresearchproduct</arg>
<arg>--outputPath</arg><arg>${workingDir}/cleaned/otherresearchproduct</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
</spark>
@ -187,7 +195,7 @@
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/grouped_entities</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/software</arg>
<arg>--outputPath</arg><arg>${workingDir}/cleaned/software</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
</spark>
@ -299,6 +307,373 @@
<error to="Kill"/>
</action>
<join name="wait_clean" to="should_emend_orcid"/>
<decision name="should_emend_orcid">
<switch>
<case to="perpare_result">${wf:conf('emend') eq true}</case>
<default to="End"/>
</switch>
</decision>
<fork name="perpare_result">
<path start="prepare_publication"/>
<path start="prepare_dataset"/>
<path start="prepare_software"/>
<path start="prepare_orp"/>
</fork>
<action name="prepare_publication">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>orcid prepare publication</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.PrepareResultsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/prepared/publication</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
</spark>
<ok to="wait_prepare"/>
<error to="Kill"/>
</action>
<action name="prepare_dataset">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>orcid prepare dataset</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.PrepareResultsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/prepared/dataset</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
</spark>
<ok to="wait_prepare"/>
<error to="Kill"/>
</action>
<action name="prepare_software">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>orcid prepare software</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.PrepareResultsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/software</arg>
<arg>--outputPath</arg><arg>${workingDir}/prepared/software</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
</spark>
<ok to="wait_prepare"/>
<error to="Kill"/>
</action>
<action name="prepare_orp">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>orcid prepare orp</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.PrepareResultsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/otherresearchproduct</arg>
<arg>--outputPath</arg><arg>${workingDir}/prepared/otherresearchproduct</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
</spark>
<ok to="wait_prepare"/>
<error to="Kill"/>
</action>
<join name="wait_prepare" to="cleanorreport"/>
<decision name="cleanorreport">
<switch>
<case to="make_report">${wf:conf('clean') eq false}</case>
<case to="clean_orcid_copy">${wf:conf('clean') eq true}</case>
<default to="make_report"/>
</switch>
</decision>
<fork name="make_report">
<path start="report_publication"/>
<path start="report_dataset"/>
<path start="report_software"/>
<path start="report_orp"/>
</fork>
<action name="report_publication">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Report ORCID on Publication</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.MakeReportSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/publication</arg>
<arg>--outputPath</arg><arg>${workingDir}/report/publication</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/publication</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
<arg>--whitelist</arg><arg>${whitelist}</arg>
</spark>
<ok to="wait_report"/>
<error to="Kill"/>
</action>
<action name="report_dataset">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Report ORCID on Dataset</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.MakeReportSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/dataset</arg>
<arg>--outputPath</arg><arg>${workingDir}/report/dataset</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/dataset</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
<arg>--whitelist</arg><arg>${whitelist}</arg>
</spark>
<ok to="wait_report"/>
<error to="Kill"/>
</action>
<action name="report_orp">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Report ORCID on ORP</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.MakeReportSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/otherresearchproduct</arg>
<arg>--outputPath</arg><arg>$workingDir}/report/otherresearchproduct</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/otherresearchproduct</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
<arg>--whitelist</arg><arg>${whitelist}</arg>
</spark>
<ok to="wait_report"/>
<error to="Kill"/>
</action>
<action name="report_software">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Report ORCID on Softwar</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.MakeReportSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/software</arg>
<arg>--outputPath</arg><arg>$workingDir}/report/software</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/software</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
<arg>--whitelist</arg><arg>${whitelist}</arg>
</spark>
<ok to="wait_report"/>
<error to="Kill"/>
</action>
<join name="wait_report" to="clean_orcid"/>
<fork name="clean_orcid">
<path start="clean_publication_orcid"/>
<path start="clean_dataset_orcid"/>
<path start="clean_orp_orcid"/>
<path start="clean_software_orcid"/>
</fork>
<action name="clean_publication_orcid">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Clean ORCID for Publications</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.CleanAuthorPidsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/publication</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/publication</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/publication</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
</spark>
<ok to="wait_clean"/>
<error to="Kill"/>
</action>
<action name="clean_dataset_orcid">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Clean ORCID for Datasets</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.CleanAuthorPidsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/dataset</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/dataset</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/dataset</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
</spark>
<ok to="wait_clean"/>
<error to="Kill"/>
</action>
<action name="clean_orp_orcid">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Clean ORCID for ORP</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.CleanAuthorPidsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/otherresearchproduct</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/otherresearchproduct</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/otherresearchproduct</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
</spark>
<ok to="wait_clean"/>
<error to="Kill"/>
</action>
<action name="clean_software_orcid">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Clean ORCID for Software</name>
<class>eu.dnetlib.dhp.oa.graph.clean.authorpids.CleanAuthorPidsSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--preparedInfoPath</arg><arg>${workingDir}/prepared/software</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/software</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
<arg>--inputPath</arg><arg>${workingDir}/cleaned/software</arg>
<arg>--orcidInputPath</arg><arg>${orcidInputPath}</arg>
</spark>
<ok to="wait_clean"/>
<error to="Kill"/>
</action>
<join name="wait_clean" to="End"/>
<end name="End"/>

View File

@ -0,0 +1,45 @@
[
{
"paramName":"i",
"paramLongName":"inputPath",
"paramDescription": "the path of the sequencial file to read",
"paramRequired": true
},
{
"paramName": "out",
"paramLongName": "outputPath",
"paramDescription": "the path used to store temporary output files",
"paramRequired": true
},
{
"paramName": "ssm",
"paramLongName": "isSparkSessionManaged",
"paramDescription": "true if the spark session is managed, false otherwise",
"paramRequired": false
},
{
"paramName": "gtn",
"paramLongName": "graphTableClassName",
"paramDescription": "the table name of the result currently considering",
"paramRequired": true
},
{
"paramName": "pip",
"paramLongName": "preparedInfoPath",
"paramDescription": "The path of the prepared result informaiton",
"paramRequired": true
},
{
"paramName": "oip",
"paramLongName": "orcidInputPath",
"paramDescription": "the path to the authoritative orcid information",
"paramRequired": true
},
{
"paramName":"wl",
"paramLongName":"whitelist",
"paramDescription": "the whitelist",
"paramRequired": true
}
]

View File

@ -0,0 +1,4 @@
package eu.dnetlib.dhp.oa.graph.clean;
public class FuzzyLogicTest {
}