forked from antonis.lempesis/dnet-hadoop
authors and works are now updated in two separate spark actions of the wf
This commit is contained in:
parent
858efbfad1
commit
efe4c2a9c5
|
@ -1,7 +1,9 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.orcid;
|
||||
|
||||
public class AuthorSummary extends OrcidData {
|
||||
import java.io.Serializable;
|
||||
|
||||
public class AuthorSummary extends OrcidData implements Serializable {
|
||||
AuthorData authorData;
|
||||
AuthorHistory authorHistory;
|
||||
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.orcid;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* This class models the data related to external id, that are retrieved from an orcid publication
|
||||
*/
|
||||
|
||||
public class ExternalId {
|
||||
public class ExternalId implements Serializable {
|
||||
private String type;
|
||||
private String value;
|
||||
private String relationShip;
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.orcid;
|
||||
|
||||
public class OrcidData {
|
||||
import java.io.Serializable;
|
||||
|
||||
public class OrcidData implements Serializable {
|
||||
protected String base64CompressData;
|
||||
protected String statusCode;
|
||||
protected String downloadDate;
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.orcid;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* This class models the data related to a publication date, that are retrieved from an orcid publication
|
||||
*/
|
||||
|
||||
public class PublicationDate {
|
||||
public class PublicationDate implements Serializable {
|
||||
private String year;
|
||||
private String month;
|
||||
private String day;
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.orcid;
|
||||
|
||||
public class Work extends OrcidData {
|
||||
import java.io.Serializable;
|
||||
|
||||
public class Work extends OrcidData implements Serializable {
|
||||
WorkDetail workDetail;
|
||||
|
||||
public WorkDetail getWorkDetail() {
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
|
||||
package eu.dnetlib.doiboost.orcid;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.api.java.function.Function;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.util.LongAccumulator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.gson.JsonElement;
|
||||
import com.google.gson.JsonParser;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.orcid.AuthorSummary;
|
||||
import eu.dnetlib.doiboost.orcid.xml.XMLRecordParser;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class SparkUpdateOrcidAuthors {
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper()
|
||||
.setSerializationInclusion(JsonInclude.Include.NON_NULL);
|
||||
|
||||
public static void main(String[] args) throws IOException, Exception {
|
||||
Logger logger = LoggerFactory.getLogger(SparkUpdateOrcidDatasets.class);
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
SparkUpdateOrcidDatasets.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/doiboost/download_orcid_data.json")));
|
||||
parser.parseArgument(args);
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
final String workingPath = parser.get("workingPath");
|
||||
// final String outputPath = parser.get("outputPath");
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
|
||||
LongAccumulator oldAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("old_authors_found");
|
||||
LongAccumulator updatedAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("updated_authors_found");
|
||||
LongAccumulator newAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("new_authors_found");
|
||||
LongAccumulator errorCodeAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_code_authors_found");
|
||||
LongAccumulator errorLoadingAuthorsJsonFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_authors_json_found");
|
||||
LongAccumulator errorParsingAuthorsXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_parsing_authors_xml_found");
|
||||
|
||||
Function<Tuple2<Text, Text>, AuthorSummary> retrieveAuthorSummaryFunction = data -> {
|
||||
AuthorSummary authorSummary = new AuthorSummary();
|
||||
String orcidId = data._1().toString();
|
||||
String jsonData = data._2().toString();
|
||||
JsonElement jElement = new JsonParser().parse(jsonData);
|
||||
String statusCode = getJsonValue(jElement, "statusCode");
|
||||
String downloadDate = getJsonValue(jElement, "lastModifiedDate");
|
||||
if (statusCode.equals("200")) {
|
||||
String compressedData = getJsonValue(jElement, "compressedData");
|
||||
if (StringUtils.isEmpty(compressedData)) {
|
||||
errorLoadingAuthorsJsonFoundAcc.add(1);
|
||||
} else {
|
||||
String xmlAuthor = ArgumentApplicationParser.decompressValue(compressedData);
|
||||
try {
|
||||
authorSummary = XMLRecordParser
|
||||
.VTDParseAuthorSummary(xmlAuthor.getBytes());
|
||||
authorSummary.setStatusCode(statusCode);
|
||||
authorSummary.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
authorSummary.setBase64CompressData(compressedData);
|
||||
return authorSummary;
|
||||
} catch (Exception e) {
|
||||
logger.error("parsing xml " + orcidId + " [" + jsonData + "]", e);
|
||||
errorParsingAuthorsXMLFoundAcc.add(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
authorSummary.setStatusCode(statusCode);
|
||||
authorSummary.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
errorCodeAuthorsFoundAcc.add(1);
|
||||
}
|
||||
return authorSummary;
|
||||
};
|
||||
|
||||
Dataset<AuthorSummary> downloadedAuthorSummaryDS = spark
|
||||
.createDataset(
|
||||
sc
|
||||
.sequenceFile(workingPath + "downloads/updated_authors/*", Text.class, Text.class)
|
||||
.map(retrieveAuthorSummaryFunction)
|
||||
.rdd(),
|
||||
Encoders.bean(AuthorSummary.class));
|
||||
Dataset<AuthorSummary> currentAuthorSummaryDS = spark
|
||||
.createDataset(
|
||||
sc
|
||||
.textFile(workingPath.concat("orcid_dataset/authors/*"))
|
||||
.map(item -> OBJECT_MAPPER.readValue(item, AuthorSummary.class))
|
||||
.rdd(),
|
||||
Encoders.bean(AuthorSummary.class));
|
||||
currentAuthorSummaryDS
|
||||
.joinWith(
|
||||
downloadedAuthorSummaryDS,
|
||||
currentAuthorSummaryDS
|
||||
.col("authorData.oid")
|
||||
.equalTo(downloadedAuthorSummaryDS.col("authorData.oid")),
|
||||
"full_outer")
|
||||
.map(value -> {
|
||||
Optional<AuthorSummary> opCurrent = Optional.ofNullable(value._1());
|
||||
Optional<AuthorSummary> opDownloaded = Optional.ofNullable(value._2());
|
||||
if (!opCurrent.isPresent()) {
|
||||
newAuthorsFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
if (!opDownloaded.isPresent()) {
|
||||
oldAuthorsFoundAcc.add(1);
|
||||
return opCurrent.get();
|
||||
}
|
||||
if (opCurrent.isPresent() && opDownloaded.isPresent()) {
|
||||
updatedAuthorsFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
return null;
|
||||
},
|
||||
Encoders.bean(AuthorSummary.class))
|
||||
.filter(Objects::nonNull)
|
||||
.toJavaRDD()
|
||||
.map(authorSummary -> OBJECT_MAPPER.writeValueAsString(authorSummary))
|
||||
.saveAsTextFile(workingPath.concat("orcid_dataset/new_authors"), GzipCodec.class);
|
||||
|
||||
logger.info("oldAuthorsFoundAcc: " + oldAuthorsFoundAcc.value().toString());
|
||||
logger.info("newAuthorsFoundAcc: " + newAuthorsFoundAcc.value().toString());
|
||||
logger.info("updatedAuthorsFoundAcc: " + updatedAuthorsFoundAcc.value().toString());
|
||||
logger.info("errorCodeFoundAcc: " + errorCodeAuthorsFoundAcc.value().toString());
|
||||
logger.info("errorLoadingJsonFoundAcc: " + errorLoadingAuthorsJsonFoundAcc.value().toString());
|
||||
logger.info("errorParsingXMLFoundAcc: " + errorParsingAuthorsXMLFoundAcc.value().toString());
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private static String getJsonValue(JsonElement jElement, String property) {
|
||||
if (jElement.getAsJsonObject().has(property)) {
|
||||
JsonElement name = null;
|
||||
name = jElement.getAsJsonObject().get(property);
|
||||
if (name != null && !name.isJsonNull()) {
|
||||
return name.getAsString();
|
||||
}
|
||||
}
|
||||
return "";
|
||||
}
|
||||
}
|
|
@ -4,27 +4,23 @@ package eu.dnetlib.doiboost.orcid;
|
|||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.api.java.function.Function;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.api.java.function.PairFunction;
|
||||
import org.apache.spark.rdd.RDD;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.util.LongAccumulator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.gson.JsonElement;
|
||||
import com.google.gson.JsonParser;
|
||||
|
@ -33,15 +29,14 @@ import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|||
import eu.dnetlib.dhp.schema.orcid.AuthorSummary;
|
||||
import eu.dnetlib.dhp.schema.orcid.Work;
|
||||
import eu.dnetlib.dhp.schema.orcid.WorkDetail;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
import eu.dnetlib.doiboost.orcid.xml.XMLRecordParser;
|
||||
import eu.dnetlib.doiboost.orcidnodoi.json.JsonWriter;
|
||||
import eu.dnetlib.doiboost.orcidnodoi.xml.XMLRecordParserNoDoi;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class SparkUpdateOrcidDatasets {
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper()
|
||||
.setSerializationInclusion(JsonInclude.Include.NON_NULL);
|
||||
|
||||
public static void main(String[] args) throws IOException, Exception {
|
||||
Logger logger = LoggerFactory.getLogger(SparkUpdateOrcidDatasets.class);
|
||||
|
@ -67,31 +62,40 @@ public class SparkUpdateOrcidDatasets {
|
|||
spark -> {
|
||||
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
|
||||
LongAccumulator oldAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("old_authors_found");
|
||||
LongAccumulator updatedAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("updated_authors_found");
|
||||
LongAccumulator newAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("new_authors_found");
|
||||
LongAccumulator errorCodeAuthorsFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_code_authors_found");
|
||||
LongAccumulator errorLoadingAuthorsJsonFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_authors_json_found");
|
||||
LongAccumulator errorLoadingAuthorsXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_authors_xml_found");
|
||||
LongAccumulator errorParsingAuthorsXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_parsing_authors_xml_found");
|
||||
|
||||
LongAccumulator oldWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("old_works_found");
|
||||
LongAccumulator updatedWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("updated_works_found");
|
||||
LongAccumulator newWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("new_works_found");
|
||||
LongAccumulator errorCodeWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_code_works_found");
|
||||
LongAccumulator errorLoadingWorksJsonFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_works_json_found");
|
||||
LongAccumulator errorLoadingWorksXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_works_xml_found");
|
||||
LongAccumulator errorParsingWorksXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_parsing_works_xml_found");
|
||||
|
@ -138,25 +142,21 @@ public class SparkUpdateOrcidDatasets {
|
|||
// errorLoadingAuthorsJsonFoundAcc.add(1);
|
||||
// } else {
|
||||
// String xmlAuthor = ArgumentApplicationParser.decompressValue(compressedData);
|
||||
// if (StringUtils.isEmpty(xmlAuthor)) {
|
||||
// errorLoadingAuthorsXMLFoundAcc.add(1);
|
||||
// } else {
|
||||
// try {
|
||||
// authorSummary = XMLRecordParser
|
||||
// .VTDParseAuthorSummary(xmlAuthor.getBytes());
|
||||
// authorSummary.setStatusCode(statusCode);
|
||||
// authorSummary.setDownloadDate(downloadDate);
|
||||
// authorSummary.setBase64CompressData(compressedData);
|
||||
// return authorSummary;
|
||||
// } catch (Exception e) {
|
||||
// logger.error("parsing xml " + orcidId + " [" + jsonData + "]", e);
|
||||
// errorParsingAuthorsXMLFoundAcc.add(1);
|
||||
// }
|
||||
// try {
|
||||
// authorSummary = XMLRecordParser
|
||||
// .VTDParseAuthorSummary(xmlAuthor.getBytes());
|
||||
// authorSummary.setStatusCode(statusCode);
|
||||
// authorSummary.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
// authorSummary.setBase64CompressData(compressedData);
|
||||
// return authorSummary;
|
||||
// } catch (Exception e) {
|
||||
// logger.error("parsing xml " + orcidId + " [" + jsonData + "]", e);
|
||||
// errorParsingAuthorsXMLFoundAcc.add(1);
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// authorSummary.setStatusCode(statusCode);
|
||||
// authorSummary.setDownloadDate(downloadDate);
|
||||
// authorSummary.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
// errorCodeAuthorsFoundAcc.add(1);
|
||||
// }
|
||||
// return authorSummary;
|
||||
|
@ -187,12 +187,15 @@ public class SparkUpdateOrcidDatasets {
|
|||
// Optional<AuthorSummary> opCurrent = Optional.ofNullable(value._1());
|
||||
// Optional<AuthorSummary> opDownloaded = Optional.ofNullable(value._2());
|
||||
// if (!opCurrent.isPresent()) {
|
||||
// newAuthorsFoundAcc.add(1);
|
||||
// return opDownloaded.get();
|
||||
// }
|
||||
// if (!opDownloaded.isPresent()) {
|
||||
// oldAuthorsFoundAcc.add(1);
|
||||
// return opCurrent.get();
|
||||
// }
|
||||
// if (opCurrent.isPresent() && opDownloaded.isPresent()) {
|
||||
// updatedAuthorsFoundAcc.add(1);
|
||||
// return opDownloaded.get();
|
||||
// }
|
||||
// return null;
|
||||
|
@ -200,12 +203,14 @@ public class SparkUpdateOrcidDatasets {
|
|||
// Encoders.bean(AuthorSummary.class))
|
||||
// .filter(Objects::nonNull)
|
||||
// .toJavaRDD()
|
||||
// .map(authorSummary -> JsonWriter.create(authorSummary))
|
||||
// .map(authorSummary -> OBJECT_MAPPER.writeValueAsString(authorSummary))
|
||||
// .saveAsTextFile(workingPath.concat("orcid_dataset/new_authors"), GzipCodec.class);
|
||||
//
|
||||
// logger.info("oldAuthorsFoundAcc: " + oldAuthorsFoundAcc.value().toString());
|
||||
// logger.info("newAuthorsFoundAcc: " + newAuthorsFoundAcc.value().toString());
|
||||
// logger.info("updatedAuthorsFoundAcc: " + updatedAuthorsFoundAcc.value().toString());
|
||||
// logger.info("errorCodeFoundAcc: " + errorCodeAuthorsFoundAcc.value().toString());
|
||||
// logger.info("errorLoadingJsonFoundAcc: " + errorLoadingAuthorsJsonFoundAcc.value().toString());
|
||||
// logger.info("errorLoadingXMLFoundAcc: " + errorLoadingAuthorsXMLFoundAcc.value().toString());
|
||||
// logger.info("errorParsingXMLFoundAcc: " + errorParsingAuthorsXMLFoundAcc.value().toString());
|
||||
|
||||
Function<String, Work> retrieveWorkFunction = jsonData -> {
|
||||
|
@ -214,27 +219,22 @@ public class SparkUpdateOrcidDatasets {
|
|||
String statusCode = getJsonValue(jElement, "statusCode");
|
||||
work.setStatusCode(statusCode);
|
||||
String downloadDate = getJsonValue(jElement, "lastModifiedDate");
|
||||
work.setDownloadDate(downloadDate);
|
||||
work.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
if (statusCode.equals("200")) {
|
||||
String compressedData = getJsonValue(jElement, "compressedData");
|
||||
if (StringUtils.isEmpty(compressedData)) {
|
||||
errorLoadingWorksJsonFoundAcc.add(1);
|
||||
} else {
|
||||
String xmlWork = ArgumentApplicationParser.decompressValue(compressedData);
|
||||
if (StringUtils.isEmpty(xmlWork)) {
|
||||
errorLoadingWorksXMLFoundAcc.add(1);
|
||||
} else {
|
||||
try {
|
||||
WorkDetail workDetail = XMLRecordParserNoDoi
|
||||
.VTDParseWorkData(xmlWork.getBytes());
|
||||
work.setWorkDetail(workDetail);
|
||||
work.setBase64CompressData(compressedData);
|
||||
updatedWorksFoundAcc.add(1);
|
||||
return work;
|
||||
} catch (Exception e) {
|
||||
logger.error("parsing xml [" + jsonData + "]", e);
|
||||
errorParsingWorksXMLFoundAcc.add(1);
|
||||
}
|
||||
try {
|
||||
WorkDetail workDetail = XMLRecordParserNoDoi
|
||||
.VTDParseWorkData(xmlWork.getBytes());
|
||||
work.setWorkDetail(workDetail);
|
||||
work.setBase64CompressData(compressedData);
|
||||
return work;
|
||||
} catch (Exception e) {
|
||||
logger.error("parsing xml [" + jsonData + "]", e);
|
||||
errorParsingWorksXMLFoundAcc.add(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -275,12 +275,15 @@ public class SparkUpdateOrcidDatasets {
|
|||
Optional<Work> opCurrent = Optional.ofNullable(value._1());
|
||||
Optional<Work> opDownloaded = Optional.ofNullable(value._2());
|
||||
if (!opCurrent.isPresent()) {
|
||||
newWorksFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
if (!opDownloaded.isPresent()) {
|
||||
oldWorksFoundAcc.add(1);
|
||||
return opCurrent.get();
|
||||
}
|
||||
if (opCurrent.isPresent() && opDownloaded.isPresent()) {
|
||||
updatedWorksFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
return null;
|
||||
|
@ -288,13 +291,14 @@ public class SparkUpdateOrcidDatasets {
|
|||
Encoders.bean(Work.class))
|
||||
.filter(Objects::nonNull)
|
||||
.toJavaRDD()
|
||||
.map(work -> JsonWriter.create(work))
|
||||
.map(work -> OBJECT_MAPPER.writeValueAsString(work))
|
||||
.saveAsTextFile(workingPath.concat("orcid_dataset/new_works"), GzipCodec.class);
|
||||
|
||||
logger.info("oldWorksFoundAcc: " + oldWorksFoundAcc.value().toString());
|
||||
logger.info("newWorksFoundAcc: " + newWorksFoundAcc.value().toString());
|
||||
logger.info("updatedWorksFoundAcc: " + updatedWorksFoundAcc.value().toString());
|
||||
logger.info("errorCodeWorksFoundAcc: " + errorCodeWorksFoundAcc.value().toString());
|
||||
logger.info("errorLoadingJsonWorksFoundAcc: " + errorLoadingWorksJsonFoundAcc.value().toString());
|
||||
logger.info("errorLoadingXMLWorksFoundAcc: " + errorLoadingWorksXMLFoundAcc.value().toString());
|
||||
logger.info("errorParsingXMLWorksFoundAcc: " + errorParsingWorksXMLFoundAcc.value().toString());
|
||||
|
||||
});
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
|
||||
package eu.dnetlib.doiboost.orcid;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.api.java.function.Function;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.util.LongAccumulator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.gson.JsonElement;
|
||||
import com.google.gson.JsonParser;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.orcid.Work;
|
||||
import eu.dnetlib.dhp.schema.orcid.WorkDetail;
|
||||
import eu.dnetlib.doiboost.orcidnodoi.xml.XMLRecordParserNoDoi;
|
||||
|
||||
public class SparkUpdateOrcidWorks {
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper()
|
||||
.setSerializationInclusion(JsonInclude.Include.NON_NULL);
|
||||
|
||||
public static void main(String[] args) throws IOException, Exception {
|
||||
Logger logger = LoggerFactory.getLogger(SparkUpdateOrcidDatasets.class);
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
SparkUpdateOrcidDatasets.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/doiboost/download_orcid_data.json")));
|
||||
parser.parseArgument(args);
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
final String workingPath = parser.get("workingPath");
|
||||
// final String outputPath = parser.get("outputPath");
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
|
||||
LongAccumulator oldWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("old_works_found");
|
||||
LongAccumulator updatedWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("updated_works_found");
|
||||
LongAccumulator newWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("new_works_found");
|
||||
LongAccumulator errorCodeWorksFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_code_works_found");
|
||||
LongAccumulator errorLoadingWorksJsonFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_loading_works_json_found");
|
||||
LongAccumulator errorParsingWorksXMLFoundAcc = spark
|
||||
.sparkContext()
|
||||
.longAccumulator("error_parsing_works_xml_found");
|
||||
|
||||
Function<String, Work> retrieveWorkFunction = jsonData -> {
|
||||
Work work = new Work();
|
||||
JsonElement jElement = new JsonParser().parse(jsonData);
|
||||
String statusCode = getJsonValue(jElement, "statusCode");
|
||||
work.setStatusCode(statusCode);
|
||||
String downloadDate = getJsonValue(jElement, "lastModifiedDate");
|
||||
work.setDownloadDate("2020-11-18 00:00:05.644768");
|
||||
if (statusCode.equals("200")) {
|
||||
String compressedData = getJsonValue(jElement, "compressedData");
|
||||
if (StringUtils.isEmpty(compressedData)) {
|
||||
errorLoadingWorksJsonFoundAcc.add(1);
|
||||
} else {
|
||||
String xmlWork = ArgumentApplicationParser.decompressValue(compressedData);
|
||||
try {
|
||||
WorkDetail workDetail = XMLRecordParserNoDoi
|
||||
.VTDParseWorkData(xmlWork.getBytes());
|
||||
work.setWorkDetail(workDetail);
|
||||
work.setBase64CompressData(compressedData);
|
||||
return work;
|
||||
} catch (Exception e) {
|
||||
logger.error("parsing xml [" + jsonData + "]", e);
|
||||
errorParsingWorksXMLFoundAcc.add(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errorCodeWorksFoundAcc.add(1);
|
||||
}
|
||||
return work;
|
||||
};
|
||||
|
||||
Dataset<Work> downloadedWorksDS = spark
|
||||
.createDataset(
|
||||
sc
|
||||
.textFile(workingPath + "downloads/updated_works/*")
|
||||
.map(s -> {
|
||||
return s.substring(21, s.length() - 1);
|
||||
})
|
||||
.map(retrieveWorkFunction)
|
||||
.rdd(),
|
||||
Encoders.bean(Work.class));
|
||||
Dataset<Work> currentWorksDS = spark
|
||||
.createDataset(
|
||||
sc
|
||||
.textFile(workingPath.concat("orcid_dataset/works/*"))
|
||||
.map(item -> OBJECT_MAPPER.readValue(item, Work.class))
|
||||
.rdd(),
|
||||
Encoders.bean(Work.class));
|
||||
currentWorksDS
|
||||
.joinWith(
|
||||
downloadedWorksDS,
|
||||
currentWorksDS
|
||||
.col("workDetail.id")
|
||||
.equalTo(downloadedWorksDS.col("workDetail.id"))
|
||||
.and(
|
||||
currentWorksDS
|
||||
.col("workDetail.oid")
|
||||
.equalTo(downloadedWorksDS.col("workDetail.oid"))),
|
||||
"full_outer")
|
||||
.map(value -> {
|
||||
Optional<Work> opCurrent = Optional.ofNullable(value._1());
|
||||
Optional<Work> opDownloaded = Optional.ofNullable(value._2());
|
||||
if (!opCurrent.isPresent()) {
|
||||
newWorksFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
if (!opDownloaded.isPresent()) {
|
||||
oldWorksFoundAcc.add(1);
|
||||
return opCurrent.get();
|
||||
}
|
||||
if (opCurrent.isPresent() && opDownloaded.isPresent()) {
|
||||
updatedWorksFoundAcc.add(1);
|
||||
return opDownloaded.get();
|
||||
}
|
||||
return null;
|
||||
},
|
||||
Encoders.bean(Work.class))
|
||||
.filter(Objects::nonNull)
|
||||
.toJavaRDD()
|
||||
.map(work -> OBJECT_MAPPER.writeValueAsString(work))
|
||||
.saveAsTextFile(workingPath.concat("orcid_dataset/new_works"), GzipCodec.class);
|
||||
|
||||
logger.info("oldWorksFoundAcc: " + oldWorksFoundAcc.value().toString());
|
||||
logger.info("newWorksFoundAcc: " + newWorksFoundAcc.value().toString());
|
||||
logger.info("updatedWorksFoundAcc: " + updatedWorksFoundAcc.value().toString());
|
||||
logger.info("errorCodeWorksFoundAcc: " + errorCodeWorksFoundAcc.value().toString());
|
||||
logger.info("errorLoadingJsonWorksFoundAcc: " + errorLoadingWorksJsonFoundAcc.value().toString());
|
||||
logger.info("errorParsingXMLWorksFoundAcc: " + errorParsingWorksXMLFoundAcc.value().toString());
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private static String getJsonValue(JsonElement jElement, String property) {
|
||||
if (jElement.getAsJsonObject().has(property)) {
|
||||
JsonElement name = null;
|
||||
name = jElement.getAsJsonObject().get(property);
|
||||
if (name != null && !name.isJsonNull()) {
|
||||
return name.getAsString();
|
||||
}
|
||||
}
|
||||
return "";
|
||||
}
|
||||
}
|
|
@ -55,18 +55,54 @@
|
|||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="UpdateOrcidDatasets"/>
|
||||
<start to="promoteOrcidAuthorsDataset"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
|
||||
<action name="UpdateOrcidDatasets">
|
||||
<action name="ResetWorkingPath">
|
||||
<fs>
|
||||
<delete path='${workingPath}/orcid_dataset/new_authors'/>
|
||||
<delete path='${workingPath}/orcid_dataset/new_works'/>
|
||||
</fs>
|
||||
<ok to="UpdateOrcidAuthors"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="UpdateOrcidAuthors">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn-cluster</master>
|
||||
<mode>cluster</mode>
|
||||
<name>UpdateOrcidDatasets</name>
|
||||
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidDatasets</class>
|
||||
<name>UpdateOrcidAuthors</name>
|
||||
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidAuthors</class>
|
||||
<jar>dhp-doiboost-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--conf spark.dynamicAllocation.enabled=true
|
||||
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
</spark-opts>
|
||||
<arg>-w</arg><arg>${workingPath}/</arg>
|
||||
<arg>-n</arg><arg>${nameNode}</arg>
|
||||
<arg>-f</arg><arg>-</arg>
|
||||
<arg>-o</arg><arg>-</arg>
|
||||
<arg>-t</arg><arg>-</arg>
|
||||
</spark>
|
||||
<ok to="UpdateOrcidWorks"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="UpdateOrcidWorks">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn-cluster</master>
|
||||
<mode>cluster</mode>
|
||||
<name>UpdateOrcidWorks</name>
|
||||
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidWorks</class>
|
||||
<jar>dhp-doiboost-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--conf spark.dynamicAllocation.enabled=true
|
||||
|
@ -88,5 +124,40 @@
|
|||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="promoteOrcidAuthorsDataset">
|
||||
<distcp xmlns="uri:oozie:distcp-action:0.2">
|
||||
<prepare>
|
||||
<delete path="${workingPath}/orcid_dataset/authors"/>
|
||||
<mkdir path="${workingPath}/orcid_dataset/authors"/>
|
||||
</prepare>
|
||||
<arg>${workingPath}/orcid_dataset/new_authors/*</arg>
|
||||
<arg>${workingPath}/orcid_dataset/authors</arg>
|
||||
</distcp>
|
||||
<ok to="promoteOrcidWorksDataset"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="promoteOrcidWorksDataset">
|
||||
<distcp xmlns="uri:oozie:distcp-action:0.2">
|
||||
<prepare>
|
||||
<delete path="${workingPath}/orcid_dataset/works"/>
|
||||
<mkdir path="${workingPath}/orcid_dataset/works"/>
|
||||
</prepare>
|
||||
<arg>${workingPath}/orcid_dataset/new_works/*</arg>
|
||||
<arg>${workingPath}/orcid_dataset/works</arg>
|
||||
</distcp>
|
||||
<ok to="CleanWorkingPath"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="CleanWorkingPath">
|
||||
<fs>
|
||||
<delete path='${workingPath}/orcid_dataset/new_authors'/>
|
||||
<delete path='${workingPath}/orcid_dataset/new_works'/>
|
||||
</fs>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -66,7 +66,7 @@
|
|||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="DownloadOrcidWorks"/>
|
||||
<start to="ResetWorkingPath"/>
|
||||
|
||||
|
||||
<kill name="Kill">
|
||||
|
@ -96,21 +96,6 @@
|
|||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="DownloadUpdatedXMLAuthors">
|
||||
<java>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<main-class>eu.dnetlib.doiboost.orcid.OrcidDownloader</main-class>
|
||||
<arg>-w</arg><arg>${workingPath}/</arg>
|
||||
<arg>-n</arg><arg>${nameNode}</arg>
|
||||
<arg>-f</arg><arg>last_modified.csv.tar</arg>
|
||||
<arg>-o</arg><arg>downloads/</arg>
|
||||
<arg>-t</arg><arg>${token}</arg>
|
||||
</java>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="GenLastModifiedSeq">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn-cluster</master>
|
||||
|
|
Loading…
Reference in New Issue