WIP: error handling during XSLT transformation

This commit is contained in:
Claudio Atzori 2022-09-29 11:05:43 +02:00
parent f0a9c370b6
commit a2095dc725
2 changed files with 26 additions and 26 deletions

View File

@ -77,11 +77,11 @@ public class TransformSparkJobNode {
.ofNullable(parser.get("recordsPerTask"))
.map(Integer::valueOf)
.orElse(RECORDS_PER_TASK);
log.info("recordsPerTask: {}", rpt);
final ISLookUpService isLookupService = ISLookupClientFactory.getLookUpService(isLookupUrl);
final VocabularyGroup vocabularies = VocabularyGroup.loadVocsFromIS(isLookupService);
log.info("Retrieved {} vocabularies", vocabularies.vocabularyNames().size());
SparkConf conf = new SparkConf();
@ -120,33 +120,24 @@ public class TransformSparkJobNode {
final MessageSender messageSender = new MessageSender(dnetMessageManagerURL, workflowId);
try (AggregatorReport report = new AggregatorReport(messageSender)) {
try {
final MapFunction<MetadataRecord, MetadataRecord> tr = TransformationFactory
.getTransformationPlugin(args, ct, report, isLookUpService);
final MapFunction<MetadataRecord, MetadataRecord> tr = TransformationFactory
.getTransformationPlugin(args, ct, report, isLookUpService);
JavaRDD<MetadataRecord> mdstore = inputMDStore
.javaRDD()
.repartition(getRepartitionNumber(totalInput, rpt))
.map((Function<MetadataRecord, MetadataRecord>) tr::call)
.filter((Function<MetadataRecord, Boolean>) Objects::nonNull);
saveDataset(spark.createDataset(mdstore.rdd(), encoder), outputBasePath + MDSTORE_DATA_PATH);
JavaRDD<MetadataRecord> mdstore = inputMDStore
.javaRDD()
.repartition(getRepartitionNumber(totalInput, rpt))
.map((Function<MetadataRecord, MetadataRecord>) tr::call)
.filter((Function<MetadataRecord, Boolean>) Objects::nonNull);
saveDataset(spark.createDataset(mdstore.rdd(), encoder), outputBasePath + MDSTORE_DATA_PATH);
log.info("Transformed item {}", ct.getProcessedItems().count());
log.info("Total item {}", ct.getTotalItems().count());
log.info("Transformation Error item {}", ct.getErrorItems().count());
log.info("Transformed item {}", ct.getProcessedItems().count());
log.info("Total item {}", ct.getTotalItems().count());
log.info("Transformation Error item {}", ct.getErrorItems().count());
final long mdStoreSize = spark.read().load(outputBasePath + MDSTORE_DATA_PATH).count();
writeHdfsFile(
spark.sparkContext().hadoopConfiguration(),
"" + mdStoreSize, outputBasePath + MDSTORE_SIZE_PATH);
} catch (Throwable e) {
log.error("error during record transformation", e);
report.put(e.getClass().getName(), e.getMessage());
report.put(CONTENT_TOTALITEMS, ct.getTotalItems().value().toString());
report.put(CONTENT_INVALIDRECORDS, ct.getErrorItems().value().toString());
report.put(CONTENT_TRANSFORMEDRECORDS, ct.getProcessedItems().value().toString());
throw e;
}
final long mdStoreSize = spark.read().load(outputBasePath + MDSTORE_DATA_PATH).count();
writeHdfsFile(
spark.sparkContext().hadoopConfiguration(),
"" + mdStoreSize, outputBasePath + MDSTORE_SIZE_PATH);
}
}

View File

@ -1,6 +1,9 @@
package eu.dnetlib.dhp.transformation.xslt;
import static eu.dnetlib.dhp.common.Constants.*;
import java.io.IOException;
import java.io.Serializable;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
@ -70,7 +73,13 @@ public class XSLTTransformationFunction implements MapFunction<MetadataRecord, M
.compile(new StreamSource(IOUtils.toInputStream(transformationRule, StandardCharsets.UTF_8)))
.load();
} catch (SaxonApiException e) {
throw new RuntimeException(e);
report.put(e.getClass().getName(), e.getMessage());
try {
report.close();
} catch (IOException ex) {
throw new IllegalArgumentException("error compiling the XSLT", e);
}
throw new IllegalArgumentException("error compiling the XSLT", e);
}
transformer