dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/transformation/TransformSparkJobNode.java

112 lines
4.3 KiB
Java
Raw Normal View History

package eu.dnetlib.dhp.transformation;
2021-02-02 12:28:21 +01:00
import static eu.dnetlib.dhp.aggregation.common.AggregationConstants.*;
2021-02-02 12:44:04 +01:00
import static eu.dnetlib.dhp.aggregation.common.AggregationUtility.*;
2021-02-02 12:34:14 +01:00
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
2020-05-05 12:39:04 +02:00
import java.io.IOException;
import java.util.Map;
2020-05-05 12:39:04 +02:00
import java.util.Optional;
import org.apache.commons.io.IOUtils;
2020-05-05 12:39:04 +02:00
import org.apache.spark.SparkConf;
2021-02-02 12:49:29 +01:00
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.util.LongAccumulator;
2020-05-05 12:39:04 +02:00
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.data.mdstore.manager.common.model.MDStoreVersion;
import eu.dnetlib.dhp.aggregation.common.AggregationCounter;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
2021-02-02 12:12:14 +01:00
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
import eu.dnetlib.dhp.model.mdstore.MetadataRecord;
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
public class TransformSparkJobNode {
2020-05-05 12:39:04 +02:00
private static final Logger log = LoggerFactory.getLogger(TransformSparkJobNode.class);
public static void main(String[] args) throws Exception {
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
IOUtils
.toString(
TransformSparkJobNode.class
.getResourceAsStream(
"/eu/dnetlib/dhp/transformation/transformation_input_parameters.json")));
parser.parseArgument(args);
2020-05-05 12:39:04 +02:00
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String mdstoreInputVersion = parser.get("mdstoreInputVersion");
final String mdstoreOutputVersion = parser.get("mdstoreOutputVersion");
2020-05-05 12:39:04 +02:00
2021-02-02 12:44:04 +01:00
final MDStoreVersion nativeMdStoreVersion = MAPPER.readValue(mdstoreInputVersion, MDStoreVersion.class);
final String inputPath = nativeMdStoreVersion.getHdfsPath() + MDSTORE_DATA_PATH;
2021-02-02 12:49:29 +01:00
log.info("inputPath: {}", inputPath);
2021-02-02 12:44:04 +01:00
final MDStoreVersion cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, MDStoreVersion.class);
2021-02-02 12:49:29 +01:00
final String outputBasePath = cleanedMdStoreVersion.getHdfsPath();
log.info("outputBasePath: {}", outputBasePath);
final String isLookupUrl = parser.get("isLookupUrl");
log.info(String.format("isLookupUrl: %s", isLookupUrl));
2021-02-02 12:12:14 +01:00
final String dateOfTransformation = parser.get("dateOfTransformation");
log.info(String.format("dateOfTransformation: %s", dateOfTransformation));
final ISLookUpService isLookupService = ISLookupClientFactory.getLookUpService(isLookupUrl);
2021-02-02 12:12:14 +01:00
final VocabularyGroup vocabularies = VocabularyGroup.loadVocsFromIS(isLookupService);
log.info("Retrieved {} vocabularies", vocabularies.vocabularyNames().size());
2020-05-05 12:39:04 +02:00
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
2021-02-02 12:44:04 +01:00
spark -> {
transformRecords(
2021-02-02 12:49:29 +01:00
parser.getObjectMap(), isLookupService, spark, inputPath, outputBasePath);
2021-02-02 12:44:04 +01:00
});
}
public static void transformRecords(final Map<String, String> args, final ISLookUpService isLookUpService,
2021-02-02 12:49:29 +01:00
final SparkSession spark, final String inputPath, final String outputBasePath)
throws DnetTransformationException, IOException {
2021-02-02 12:28:21 +01:00
final LongAccumulator totalItems = spark.sparkContext().longAccumulator(CONTENT_TOTALITEMS);
final LongAccumulator errorItems = spark.sparkContext().longAccumulator(CONTENT_INVALIDRECORDS);
final LongAccumulator transformedItems = spark.sparkContext().longAccumulator(CONTENT_TRANSFORMEDRECORDS);
final AggregationCounter ct = new AggregationCounter(totalItems, errorItems, transformedItems);
final Encoder<MetadataRecord> encoder = Encoders.bean(MetadataRecord.class);
2021-02-02 12:28:21 +01:00
2021-02-02 12:49:29 +01:00
final Dataset<MetadataRecord> mdstore = spark
2021-02-02 12:34:14 +01:00
.read()
.format("parquet")
.load(inputPath)
.as(encoder)
.map(
2021-02-02 12:49:29 +01:00
TransformationFactory.getTransformationPlugin(args, ct, isLookUpService),
encoder);
saveDataset(mdstore, outputBasePath + MDSTORE_DATA_PATH);
log.info("Transformed item " + ct.getProcessedItems().count());
log.info("Total item " + ct.getTotalItems().count());
log.info("Transformation Error item " + ct.getErrorItems().count());
2021-02-02 12:49:29 +01:00
writeTotalSizeOnHDFS(spark, mdstore.count(), outputBasePath + MDSTORE_SIZE_PATH);
}
}