job execution now based on file last_update.txt on hdfs

This commit is contained in:
Enrico Ottonello 2021-02-04 10:44:04 +01:00
parent b2de598c1a
commit 465ce39f75
7 changed files with 72 additions and 171 deletions

View File

@ -28,13 +28,14 @@ import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.doiboost.orcid.model.DownloadedRecordData; import eu.dnetlib.doiboost.orcid.model.DownloadedRecordData;
import eu.dnetlib.doiboost.orcid.util.HDFSUtil;
import scala.Tuple2; import scala.Tuple2;
public class SparkDownloadOrcidAuthors { public class SparkDownloadOrcidAuthors {
static Logger logger = LoggerFactory.getLogger(SparkDownloadOrcidAuthors.class); static Logger logger = LoggerFactory.getLogger(SparkDownloadOrcidAuthors.class);
static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"; static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
static final String lastUpdate = "2020-11-18 00:00:05"; static String lastUpdate;
public static void main(String[] args) throws IOException, Exception { public static void main(String[] args) throws IOException, Exception {
@ -58,6 +59,8 @@ public class SparkDownloadOrcidAuthors {
final String lambdaFileName = parser.get("lambdaFileName"); final String lambdaFileName = parser.get("lambdaFileName");
logger.info("lambdaFileName: ", lambdaFileName); logger.info("lambdaFileName: ", lambdaFileName);
lastUpdate = HDFSUtil.readFromTextFile(workingPath.concat("last_update.txt"));
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
runWithSparkSession( runWithSparkSession(
conf, conf,
@ -182,6 +185,9 @@ public class SparkDownloadOrcidAuthors {
if (modifiedDate.length() != 19) { if (modifiedDate.length() != 19) {
modifiedDate = modifiedDate.substring(0, 19); modifiedDate = modifiedDate.substring(0, 19);
} }
if (lastUpdate.length() != 19) {
lastUpdate = lastUpdate.substring(0, 19);
}
modifiedDateDt = new SimpleDateFormat(DATE_FORMAT).parse(modifiedDate); modifiedDateDt = new SimpleDateFormat(DATE_FORMAT).parse(modifiedDate);
lastUpdateDt = new SimpleDateFormat(DATE_FORMAT).parse(lastUpdate); lastUpdateDt = new SimpleDateFormat(DATE_FORMAT).parse(lastUpdate);
} catch (Exception e) { } catch (Exception e) {

View File

@ -31,6 +31,7 @@ import com.google.gson.JsonParser;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.doiboost.orcid.model.DownloadedRecordData; import eu.dnetlib.doiboost.orcid.model.DownloadedRecordData;
import eu.dnetlib.doiboost.orcid.util.HDFSUtil;
import eu.dnetlib.doiboost.orcid.xml.XMLRecordParser; import eu.dnetlib.doiboost.orcid.xml.XMLRecordParser;
import scala.Tuple2; import scala.Tuple2;
@ -43,7 +44,7 @@ public class SparkDownloadOrcidWorks {
public static final String ORCID_XML_DATETIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; public static final String ORCID_XML_DATETIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
public static final DateTimeFormatter ORCID_XML_DATETIMEFORMATTER = DateTimeFormatter public static final DateTimeFormatter ORCID_XML_DATETIMEFORMATTER = DateTimeFormatter
.ofPattern(ORCID_XML_DATETIME_FORMAT); .ofPattern(ORCID_XML_DATETIME_FORMAT);
public static final String lastUpdateValue = "2020-11-18 00:00:05"; public static String lastUpdateValue;
public static void main(String[] args) throws IOException, Exception { public static void main(String[] args) throws IOException, Exception {
@ -64,6 +65,11 @@ public class SparkDownloadOrcidWorks {
final String outputPath = parser.get("outputPath"); final String outputPath = parser.get("outputPath");
final String token = parser.get("token"); final String token = parser.get("token");
lastUpdateValue = HDFSUtil.readFromTextFile(workingPath.concat("last_update.txt"));
if (lastUpdateValue.length() != 19) {
lastUpdateValue = lastUpdateValue.substring(0, 19);
}
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
runWithSparkSession( runWithSparkSession(
conf, conf,

View File

@ -3,9 +3,7 @@ package eu.dnetlib.doiboost.orcid;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.BufferedReader; import java.io.*;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI; import java.net.URI;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -17,6 +15,7 @@ import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
@ -26,6 +25,7 @@ import org.apache.spark.SparkConf;
import org.mortbay.log.Log; import org.mortbay.log.Log;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.doiboost.orcid.util.HDFSUtil;
public class SparkGenLastModifiedSeq { public class SparkGenLastModifiedSeq {
private static String hdfsServerUri; private static String hdfsServerUri;
@ -50,6 +50,9 @@ public class SparkGenLastModifiedSeq {
outputPath = parser.get("outputPath"); outputPath = parser.get("outputPath");
lambdaFileName = parser.get("lambdaFileName"); lambdaFileName = parser.get("lambdaFileName");
String lambdaFileUri = hdfsServerUri.concat(workingPath).concat(lambdaFileName); String lambdaFileUri = hdfsServerUri.concat(workingPath).concat(lambdaFileName);
String lastModifiedDateFromLambdaFileUri = hdfsServerUri
.concat(workingPath)
.concat("last_modified_date_from_lambda_file.txt");
SparkConf sparkConf = new SparkConf(); SparkConf sparkConf = new SparkConf();
runWithSparkSession( runWithSparkSession(
@ -57,6 +60,7 @@ public class SparkGenLastModifiedSeq {
isSparkSessionManaged, isSparkSessionManaged,
spark -> { spark -> {
int rowsNum = 0; int rowsNum = 0;
String lastModifiedAuthorDate = "";
Path output = new Path( Path output = new Path(
hdfsServerUri hdfsServerUri
.concat(workingPath) .concat(workingPath)
@ -89,10 +93,15 @@ public class SparkGenLastModifiedSeq {
final Text value = new Text(recordInfo.get(3)); final Text value = new Text(recordInfo.get(3));
writer.append(key, value); writer.append(key, value);
rowsNum++; rowsNum++;
if (rowsNum == 2) {
lastModifiedAuthorDate = value.toString();
}
}
} }
} }
} }
} HDFSUtil.writeToTextFile(lastModifiedDateFromLambdaFileUri, lastModifiedAuthorDate);
Log.info("Saved rows from lamda csv tar file: " + rowsNum); Log.info("Saved rows from lamda csv tar file: " + rowsNum);
}); });
} }

View File

@ -95,7 +95,7 @@ public class SparkUpdateOrcidAuthors {
authorSummary = XMLRecordParser authorSummary = XMLRecordParser
.VTDParseAuthorSummary(xmlAuthor.getBytes()); .VTDParseAuthorSummary(xmlAuthor.getBytes());
authorSummary.setStatusCode(statusCode); authorSummary.setStatusCode(statusCode);
authorSummary.setDownloadDate("2020-12-15 00:00:01.000000"); authorSummary.setDownloadDate(Long.toString(System.currentTimeMillis()));
authorSummary.setBase64CompressData(compressedData); authorSummary.setBase64CompressData(compressedData);
return authorSummary; return authorSummary;
} catch (Exception e) { } catch (Exception e) {
@ -105,7 +105,7 @@ public class SparkUpdateOrcidAuthors {
} }
} else { } else {
authorSummary.setStatusCode(statusCode); authorSummary.setStatusCode(statusCode);
authorSummary.setDownloadDate("2020-12-15 00:00:01.000000"); authorSummary.setDownloadDate(Long.toString(System.currentTimeMillis()));
errorCodeAuthorsFoundAcc.add(1); errorCodeAuthorsFoundAcc.add(1);
} }
return authorSummary; return authorSummary;

View File

@ -27,6 +27,7 @@ import com.google.gson.JsonParser;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.schema.orcid.Work; import eu.dnetlib.dhp.schema.orcid.Work;
import eu.dnetlib.dhp.schema.orcid.WorkDetail; import eu.dnetlib.dhp.schema.orcid.WorkDetail;
import eu.dnetlib.doiboost.orcid.util.HDFSUtil;
import eu.dnetlib.doiboost.orcidnodoi.xml.XMLRecordParserNoDoi; import eu.dnetlib.doiboost.orcidnodoi.xml.XMLRecordParserNoDoi;
public class SparkUpdateOrcidWorks { public class SparkUpdateOrcidWorks {
@ -83,7 +84,7 @@ public class SparkUpdateOrcidWorks {
String statusCode = getJsonValue(jElement, "statusCode"); String statusCode = getJsonValue(jElement, "statusCode");
work.setStatusCode(statusCode); work.setStatusCode(statusCode);
String downloadDate = getJsonValue(jElement, "lastModifiedDate"); String downloadDate = getJsonValue(jElement, "lastModifiedDate");
work.setDownloadDate("2020-12-15 00:00:01.000000"); work.setDownloadDate(Long.toString(System.currentTimeMillis()));
if (statusCode.equals("200")) { if (statusCode.equals("200")) {
String compressedData = getJsonValue(jElement, "compressedData"); String compressedData = getJsonValue(jElement, "compressedData");
if (StringUtils.isEmpty(compressedData)) { if (StringUtils.isEmpty(compressedData)) {
@ -165,6 +166,10 @@ public class SparkUpdateOrcidWorks {
logger.info("errorLoadingJsonWorksFoundAcc: " + errorLoadingWorksJsonFoundAcc.value().toString()); logger.info("errorLoadingJsonWorksFoundAcc: " + errorLoadingWorksJsonFoundAcc.value().toString());
logger.info("errorParsingXMLWorksFoundAcc: " + errorParsingWorksXMLFoundAcc.value().toString()); logger.info("errorParsingXMLWorksFoundAcc: " + errorParsingWorksXMLFoundAcc.value().toString());
String lastModifiedDateFromLambdaFile = HDFSUtil
.readFromTextFile(workingPath.concat("last_modified_date_from_lambda_file.txt"));
HDFSUtil.writeToTextFile(workingPath.concat("last_update.txt"), lastModifiedDateFromLambdaFile);
logger.info("last_update file updated");
}); });
} }

View File

@ -0,0 +1,38 @@
package eu.dnetlib.doiboost.orcid.util;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class HDFSUtil {
public static String readFromTextFile(String path) throws IOException {
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(conf);
FSDataInputStream inputStream = new FSDataInputStream(fileSystem.open(new Path(path)));
return IOUtils.toString(inputStream, StandardCharsets.UTF_8.name());
}
public static void writeToTextFile(String pathValue, String text) throws IOException {
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(conf);
Path path = new Path(pathValue);
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);
}
FSDataOutputStream os = fileSystem.create(path);
BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
br.write(text);
br.close();
fileSystem.close();
}
}

View File

@ -1,163 +0,0 @@
<workflow-app name="update_orcid_datasets" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>spark2MaxExecutors</name>
<value>50</value>
</property>
<property>
<name>sparkDriverMemory</name>
<description>memory for driver process</description>
</property>
<property>
<name>sparkExecutorMemory</name>
<description>memory for individual executor</description>
</property>
<property>
<name>sparkExecutorCores</name>
<description>number of cores used by single executor</description>
</property>
<property>
<name>oozieActionShareLibForSpark2</name>
<description>oozie action sharelib for spark 2.*</description>
</property>
<property>
<name>spark2ExtraListeners</name>
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
<description>spark 2.* extra listeners classname</description>
</property>
<property>
<name>spark2SqlQueryExecutionListeners</name>
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
<description>spark 2.* sql query execution listeners classname</description>
</property>
<property>
<name>spark2YarnHistoryServerAddress</name>
<description>spark 2.* yarn history server address</description>
</property>
<property>
<name>spark2EventLogDir</name>
<description>spark 2.* event log dir location</description>
</property>
<property>
<name>workingPath</name>
<description>the working dir base path</description>
</property>
</parameters>
<global>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>oozie.action.sharelib.for.spark</name>
<value>${oozieActionShareLibForSpark2}</value>
</property>
</configuration>
</global>
<start to="promoteOrcidAuthorsDataset"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="ResetWorkingPath">
<fs>
<delete path='${workingPath}/orcid_dataset/new_authors'/>
<delete path='${workingPath}/orcid_dataset/new_works'/>
</fs>
<ok to="UpdateOrcidAuthors"/>
<error to="Kill"/>
</action>
<action name="UpdateOrcidAuthors">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn-cluster</master>
<mode>cluster</mode>
<name>UpdateOrcidAuthors</name>
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidAuthors</class>
<jar>dhp-doiboost-${projectVersion}.jar</jar>
<spark-opts>
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
</spark-opts>
<arg>-w</arg><arg>${workingPath}/</arg>
<arg>-n</arg><arg>${nameNode}</arg>
<arg>-f</arg><arg>-</arg>
<arg>-o</arg><arg>-</arg>
<arg>-t</arg><arg>-</arg>
</spark>
<ok to="UpdateOrcidWorks"/>
<error to="Kill"/>
</action>
<action name="UpdateOrcidWorks">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn-cluster</master>
<mode>cluster</mode>
<name>UpdateOrcidWorks</name>
<class>eu.dnetlib.doiboost.orcid.SparkUpdateOrcidWorks</class>
<jar>dhp-doiboost-${projectVersion}.jar</jar>
<spark-opts>
--conf spark.dynamicAllocation.enabled=true
--conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
</spark-opts>
<arg>-w</arg><arg>${workingPath}/</arg>
<arg>-n</arg><arg>${nameNode}</arg>
<arg>-f</arg><arg>-</arg>
<arg>-o</arg><arg>-</arg>
<arg>-t</arg><arg>-</arg>
</spark>
<ok to="End"/>
<error to="Kill"/>
</action>
<action name="promoteOrcidAuthorsDataset">
<distcp xmlns="uri:oozie:distcp-action:0.2">
<prepare>
<delete path="${workingPath}/orcid_dataset/authors"/>
<mkdir path="${workingPath}/orcid_dataset/authors"/>
</prepare>
<arg>${workingPath}/orcid_dataset/new_authors/*</arg>
<arg>${workingPath}/orcid_dataset/authors</arg>
</distcp>
<ok to="promoteOrcidWorksDataset"/>
<error to="Kill"/>
</action>
<action name="promoteOrcidWorksDataset">
<distcp xmlns="uri:oozie:distcp-action:0.2">
<prepare>
<delete path="${workingPath}/orcid_dataset/works"/>
<mkdir path="${workingPath}/orcid_dataset/works"/>
</prepare>
<arg>${workingPath}/orcid_dataset/new_works/*</arg>
<arg>${workingPath}/orcid_dataset/works</arg>
</distcp>
<ok to="CleanWorkingPath"/>
<error to="Kill"/>
</action>
<action name="CleanWorkingPath">
<fs>
<delete path='${workingPath}/orcid_dataset/new_authors'/>
<delete path='${workingPath}/orcid_dataset/new_works'/>
</fs>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>