wf to generate orcid dataset

This commit is contained in:
Enrico Ottonello 2020-12-07 11:02:32 +01:00
parent 8812ab65e1
commit b1b589ada1
5 changed files with 360 additions and 2 deletions

View File

@ -0,0 +1,79 @@
package eu.dnetlib.dhp.schema.orcid;
import java.io.Serializable;
public class Summary implements Serializable {
private String creationMethod;
private String completionDate;
private String submissionDate;
private String lastModifiedDate;
private boolean claimed;
private String deactivationDate;
private boolean verifiedEmail;
private boolean verifiedPrimaryEmail;
public String getCreationMethod() {
return creationMethod;
}
public void setCreationMethod(String creationMethod) {
this.creationMethod = creationMethod;
}
public String getCompletionDate() {
return completionDate;
}
public void setCompletionDate(String completionDate) {
this.completionDate = completionDate;
}
public String getSubmissionDate() {
return submissionDate;
}
public void setSubmissionDate(String submissionDate) {
this.submissionDate = submissionDate;
}
public String getLastModifiedDate() {
return lastModifiedDate;
}
public void setLastModifiedDate(String lastModifiedDate) {
this.lastModifiedDate = lastModifiedDate;
}
public boolean isClaimed() {
return claimed;
}
public void setClaimed(boolean claimed) {
this.claimed = claimed;
}
public String getDeactivationDate() {
return deactivationDate;
}
public void setDeactivationDate(String deactivationDate) {
this.deactivationDate = deactivationDate;
}
public boolean isVerifiedEmail() {
return verifiedEmail;
}
public void setVerifiedEmail(boolean verifiedEmail) {
this.verifiedEmail = verifiedEmail;
}
public boolean isVerifiedPrimaryEmail() {
return verifiedPrimaryEmail;
}
public void setVerifiedPrimaryEmail(boolean verifiedPrimaryEmail) {
this.verifiedPrimaryEmail = verifiedPrimaryEmail;
}
}

View File

@ -0,0 +1,140 @@
package eu.dnetlib.doiboost.orcid;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.esotericsoftware.minlog.Log;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.oaf.Publication;
import eu.dnetlib.dhp.schema.orcid.AuthorData;
import eu.dnetlib.doiboost.orcid.model.WorkData;
import eu.dnetlib.doiboost.orcid.xml.XMLRecordParser;
import eu.dnetlib.doiboost.orcidnodoi.json.JsonWriter;
import eu.dnetlib.doiboost.orcidnodoi.model.WorkDataNoDoi;
import eu.dnetlib.doiboost.orcidnodoi.xml.XMLRecordParserNoDoi;
import scala.Tuple2;
public class SparkUpdateOrcidDatasets {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws IOException, Exception {
Logger logger = LoggerFactory.getLogger(SparkUpdateOrcidDatasets.class);
logger.info("[ SparkUpdateOrcidDatasets STARTED]");
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
IOUtils
.toString(
SparkUpdateOrcidDatasets.class
.getResourceAsStream(
"/eu/dnetlib/dhp/doiboost/download_orcid_data.json")));
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
logger.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String workingPath = parser.get("workingPath");
logger.info("workingPath: ", workingPath);
// final String outputPath = parser.get("outputPath");
// logger.info("outputPath: ", outputPath);
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
sc.hadoopConfiguration().set("mapreduce.output.fileoutputformat.compress", "true");
JavaPairRDD<Text, Text> xmlSummariesRDD = sc
.sequenceFile(workingPath.concat("xml/authors/xml_authors.seq"), Text.class, Text.class);
xmlSummariesRDD
.repartition(5)
.map(seq -> XMLRecordParser.VTDParseAuthorData(seq._2().toString().getBytes()))
.filter(summary -> summary != null)
.mapToPair(
summary -> new Tuple2<>(summary.getOid(),
OBJECT_MAPPER.writeValueAsString(summary)))
.mapToPair(t -> new Tuple2(new Text(t._1()), new Text(t._2())))
.saveAsNewAPIHadoopFile(
workingPath.concat("orcid_dataset/authors"),
Text.class,
Text.class,
SequenceFileOutputFormat.class,
sc.hadoopConfiguration());
JavaPairRDD<Text, Text> xmlWorksRDD = sc
.sequenceFile(workingPath.concat("xml/works/*"), Text.class, Text.class);
xmlWorksRDD
.map(seq -> XMLRecordParserNoDoi.VTDParseWorkData(seq._2().toString().getBytes()))
.filter(work -> work != null)
.mapToPair(
work -> new Tuple2<>(work.getOid().concat("_").concat(work.getId()),
OBJECT_MAPPER.writeValueAsString(work)))
.mapToPair(t -> new Tuple2(new Text(t._1()), new Text(t._2())))
.saveAsNewAPIHadoopFile(
workingPath.concat("orcid_dataset/works"),
Text.class,
Text.class,
SequenceFileOutputFormat.class,
sc.hadoopConfiguration());
});
}
private static AuthorData loadAuthorFromJson(Text orcidId, Text json) {
AuthorData authorData = new AuthorData();
authorData.setOid(orcidId.toString());
JsonElement jElement = new JsonParser().parse(json.toString());
authorData.setName(getJsonValue(jElement, "name"));
authorData.setSurname(getJsonValue(jElement, "surname"));
authorData.setCreditName(getJsonValue(jElement, "creditname"));
return authorData;
}
private static WorkData loadWorkFromJson(Text orcidId, Text json) {
WorkData workData = new WorkData();
workData.setOid(orcidId.toString());
JsonElement jElement = new JsonParser().parse(json.toString());
workData.setDoi(getJsonValue(jElement, "doi"));
return workData;
}
private static String getJsonValue(JsonElement jElement, String property) {
if (jElement.getAsJsonObject().has(property)) {
JsonElement name = null;
name = jElement.getAsJsonObject().get(property);
if (name != null && !name.isJsonNull()) {
return name.getAsString();
}
}
return null;
}
}

View File

@ -4,6 +4,7 @@ package eu.dnetlib.doiboost.orcid.xml;
import java.io.IOException;
import java.util.*;
import org.apache.commons.lang3.StringUtils;
import org.mortbay.log.Log;
import com.ximpleware.*;
@ -31,6 +32,8 @@ public class XMLRecordParser {
private static final String NS_ACTIVITIES_URL = "http://www.orcid.org/ns/activities";
private static final String NS_WORK = "work";
private static final String NS_WORK_URL = "http://www.orcid.org/ns/work";
private static final String NS_HISTORY = "history";
private static final String NS_HISTORY_URL = "http://www.orcid.org/ns/history";
private static final String NS_ERROR = "error";
@ -47,6 +50,7 @@ public class XMLRecordParser {
ap.declareXPathNameSpace(NS_OTHER, NS_OTHER_URL);
ap.declareXPathNameSpace(NS_RECORD, NS_RECORD_URL);
ap.declareXPathNameSpace(NS_ERROR, NS_ERROR_URL);
ap.declareXPathNameSpace(NS_HISTORY, NS_HISTORY_URL);
AuthorData authorData = new AuthorData();
final List<String> errors = VtdUtilityParser.getTextValue(ap, vn, "//error:response-code");
@ -85,6 +89,46 @@ public class XMLRecordParser {
authorData.setOtherNames(otherNames);
}
// final String creationMethod = VtdUtilityParser.getSingleValue(ap, vn, "//history:creation-method");
// if (StringUtils.isNoneBlank(creationMethod)) {
// authorData.setCreationMethod(creationMethod);
// }
//
// final String completionDate = VtdUtilityParser.getSingleValue(ap, vn, "//history:completion-date");
// if (StringUtils.isNoneBlank(completionDate)) {
// authorData.setCompletionDate(completionDate);
// }
//
// final String submissionDate = VtdUtilityParser.getSingleValue(ap, vn, "//history:submission-date");
// if (StringUtils.isNoneBlank(submissionDate)) {
// authorData.setSubmissionDate(submissionDate);
// }
//
// final String claimed = VtdUtilityParser.getSingleValue(ap, vn, "//history:claimed");
// if (StringUtils.isNoneBlank(claimed)) {
// authorData.setClaimed(Boolean.parseBoolean(claimed));
// }
//
// final String verifiedEmail = VtdUtilityParser.getSingleValue(ap, vn, "//history:verified-email");
// if (StringUtils.isNoneBlank(verifiedEmail)) {
// authorData.setVerifiedEmail(Boolean.parseBoolean(verifiedEmail));
// }
//
// final String verifiedPrimaryEmail = VtdUtilityParser.getSingleValue(ap, vn, "//history:verified-primary-email");
// if (StringUtils.isNoneBlank(verifiedPrimaryEmail)) {
// authorData.setVerifiedPrimaryEmail(Boolean.parseBoolean(verifiedPrimaryEmail));
// }
//
// final String deactivationDate = VtdUtilityParser.getSingleValue(ap, vn, "//history:deactivation-date");
// if (StringUtils.isNoneBlank(deactivationDate)) {
// authorData.setDeactivationDate(deactivationDate);
// }
//
// final String lastModifiedDate = VtdUtilityParser
// .getSingleValue(ap, vn, "//history:history/common:last-modified-date");
// if (StringUtils.isNoneBlank(lastModifiedDate)) {
// authorData.setLastModifiedDate(lastModifiedDate);
// }
return authorData;
}

View File

@ -0,0 +1,92 @@
<workflow-app name="update_orcid_datasets" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>spark2MaxExecutors</name>
<value>5</value>
</property>
<property>
<name>sparkDriverMemory</name>
<description>memory for driver process</description>
</property>
<property>
<name>sparkExecutorMemory</name>
<description>memory for individual executor</description>
</property>
<property>
<name>sparkExecutorCores</name>
<description>number of cores used by single executor</description>
</property>
<property>
<name>oozieActionShareLibForSpark2</name>
<description>oozie action sharelib for spark 2.*</description>
</property>
<property>
<name>spark2ExtraListeners</name>
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
<description>spark 2.* extra listeners classname</description>
</property>
<property>
<name>spark2SqlQueryExecutionListeners</name>
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
<description>spark 2.* sql query execution listeners classname</description>
</property>
<property>
<name>spark2YarnHistoryServerAddress</name>
<description>spark 2.* yarn history server address</description>
</property>
<property>
<name>spark2EventLogDir</name>
<description>spark 2.* event log dir location</description>
</property>
<property>
<name>workingPath</name>
<description>the working dir base path</description>
</property>
</parameters>
<global>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>oozie.action.sharelib.for.spark</name>
<value>${oozieActionShareLibForSpark2}</value>
</property>
</configuration>
</global>
<start to="UpdateOrcidDatasets"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="UpdateOrcidDatasets">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn-cluster</master>
<mode>cluster</mode>
<name>UpdateOrcidDatasets</name>
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidDatasets</class>
<jar>dhp-doiboost-${projectVersion}.jar</jar>
<spark-opts>
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
</spark-opts>
<arg>-w</arg><arg>${workingPath}/</arg>
<arg>-n</arg><arg>${nameNode}</arg>
<arg>-f</arg><arg>-</arg>
<arg>-o</arg><arg>-</arg>
<arg>-t</arg><arg>-</arg>
</spark>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>

View File

@ -12,6 +12,7 @@ import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.junit.jupiter.api.Test;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.ximpleware.*;
import eu.dnetlib.dhp.schema.orcid.AuthorData;
@ -25,9 +26,10 @@ public class XMLRecordParserTest {
private static final String NS_WORK_URL = "http://www.orcid.org/ns/work";
private static final String NS_COMMON_URL = "http://www.orcid.org/ns/common";
private static final String NS_COMMON = "common";
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@Test
private void testOrcidAuthorDataXMLParser() throws Exception {
public void testOrcidAuthorDataXMLParser() throws Exception {
String xml = IOUtils.toString(this.getClass().getResourceAsStream("summary_0000-0001-6828-479X.xml"));
@ -39,6 +41,7 @@ public class XMLRecordParserTest {
System.out.println("name: " + authorData.getName());
assertNotNull(authorData.getSurname());
System.out.println("surname: " + authorData.getSurname());
OrcidClientTest.logToFile(OBJECT_MAPPER.writeValueAsString(authorData));
}
@Test
@ -86,7 +89,7 @@ public class XMLRecordParserTest {
}
@Test
public void testWorkIdLastModifiedDateXMLParser() throws Exception {
private void testWorkIdLastModifiedDateXMLParser() throws Exception {
String xml = IOUtils
.toString(
this.getClass().getResourceAsStream("record_0000-0001-5004-5918.xml"));