fixed transformation target paths

This commit is contained in:
Claudio Atzori 2021-02-02 12:49:29 +01:00
parent ca4391aa1c
commit bde14b149a
1 changed files with 11 additions and 11 deletions

View File

@ -11,6 +11,7 @@ import java.util.Optional;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder; import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.SparkSession;
@ -52,11 +53,11 @@ public class TransformSparkJobNode {
final MDStoreVersion nativeMdStoreVersion = MAPPER.readValue(mdstoreInputVersion, MDStoreVersion.class); final MDStoreVersion nativeMdStoreVersion = MAPPER.readValue(mdstoreInputVersion, MDStoreVersion.class);
final String inputPath = nativeMdStoreVersion.getHdfsPath() + MDSTORE_DATA_PATH; final String inputPath = nativeMdStoreVersion.getHdfsPath() + MDSTORE_DATA_PATH;
log.info("input path: {}", inputPath); log.info("inputPath: {}", inputPath);
final MDStoreVersion cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, MDStoreVersion.class); final MDStoreVersion cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, MDStoreVersion.class);
final String outputPath = cleanedMdStoreVersion.getHdfsPath() + MDSTORE_DATA_PATH; final String outputBasePath = cleanedMdStoreVersion.getHdfsPath();
log.info("output path: {}", outputPath); log.info("outputBasePath: {}", outputBasePath);
final String isLookupUrl = parser.get("isLookupUrl"); final String isLookupUrl = parser.get("isLookupUrl");
log.info(String.format("isLookupUrl: %s", isLookupUrl)); log.info(String.format("isLookupUrl: %s", isLookupUrl));
@ -76,12 +77,12 @@ public class TransformSparkJobNode {
isSparkSessionManaged, isSparkSessionManaged,
spark -> { spark -> {
transformRecords( transformRecords(
parser.getObjectMap(), isLookupService, spark, inputPath, outputPath); parser.getObjectMap(), isLookupService, spark, inputPath, outputBasePath);
}); });
} }
public static void transformRecords(final Map<String, String> args, final ISLookUpService isLookUpService, public static void transformRecords(final Map<String, String> args, final ISLookUpService isLookUpService,
final SparkSession spark, final String inputPath, final String outputPath) final SparkSession spark, final String inputPath, final String outputBasePath)
throws DnetTransformationException, IOException { throws DnetTransformationException, IOException {
final LongAccumulator totalItems = spark.sparkContext().longAccumulator(CONTENT_TOTALITEMS); final LongAccumulator totalItems = spark.sparkContext().longAccumulator(CONTENT_TOTALITEMS);
@ -90,22 +91,21 @@ public class TransformSparkJobNode {
final AggregationCounter ct = new AggregationCounter(totalItems, errorItems, transformedItems); final AggregationCounter ct = new AggregationCounter(totalItems, errorItems, transformedItems);
final Encoder<MetadataRecord> encoder = Encoders.bean(MetadataRecord.class); final Encoder<MetadataRecord> encoder = Encoders.bean(MetadataRecord.class);
saveDataset( final Dataset<MetadataRecord> mdstore = spark
spark
.read() .read()
.format("parquet") .format("parquet")
.load(inputPath) .load(inputPath)
.as(encoder) .as(encoder)
.map( .map(
TransformationFactory.getTransformationPlugin(args, ct, isLookUpService), TransformationFactory.getTransformationPlugin(args, ct, isLookUpService),
encoder), encoder);
outputPath + MDSTORE_DATA_PATH); saveDataset(mdstore, outputBasePath + MDSTORE_DATA_PATH);
log.info("Transformed item " + ct.getProcessedItems().count()); log.info("Transformed item " + ct.getProcessedItems().count());
log.info("Total item " + ct.getTotalItems().count()); log.info("Total item " + ct.getTotalItems().count());
log.info("Transformation Error item " + ct.getErrorItems().count()); log.info("Transformation Error item " + ct.getErrorItems().count());
writeTotalSizeOnHDFS(spark, ct.getProcessedItems().count(), outputPath + MDSTORE_SIZE_PATH); writeTotalSizeOnHDFS(spark, mdstore.count(), outputBasePath + MDSTORE_SIZE_PATH);
} }
} }