dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/collection/plugin/base/BaseAnalyzerJob.java

208 lines
6.1 KiB
Java
Raw Normal View History

2024-02-14 11:39:37 +01:00
package eu.dnetlib.dhp.collection.plugin.base;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.IOException;
2024-02-22 14:01:11 +01:00
import java.util.ArrayList;
2024-02-16 11:36:46 +01:00
import java.util.LinkedHashSet;
2024-02-22 14:01:11 +01:00
import java.util.List;
import java.util.Optional;
2024-02-16 11:36:46 +01:00
import java.util.Set;
2024-02-14 11:39:37 +01:00
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
2024-02-15 14:04:17 +01:00
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.DeflateCodec;
import org.apache.spark.SparkConf;
2024-02-15 14:04:17 +01:00
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.dom4j.Document;
2024-02-15 08:21:52 +01:00
import org.dom4j.DocumentException;
import org.dom4j.DocumentHelper;
import org.dom4j.Element;
2024-02-14 10:37:39 +01:00
import org.dom4j.Node;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
public class BaseAnalyzerJob {
2024-02-14 15:52:31 +01:00
private static final Logger log = LoggerFactory.getLogger(BaseAnalyzerJob.class);
public static void main(final String[] args) throws Exception {
final String jsonConfiguration = IOUtils
2024-02-16 11:36:46 +01:00
.toString(
BaseAnalyzerJob.class
.getResourceAsStream("/eu/dnetlib/dhp/collection/plugin/base/action_set_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
final Boolean isSparkSessionManaged = Optional
2024-02-16 11:36:46 +01:00
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String inputPath = parser.get("inputPath");
log.info("inputPath: {}", inputPath);
2024-02-15 14:04:17 +01:00
final String dataPath = parser.get("dataPath");
log.info("dataPath {}: ", dataPath);
final String outputPath = parser.get("outputPath");
log.info("outputPath {}: ", outputPath);
2024-02-15 14:04:17 +01:00
final boolean reimport = Boolean.parseBoolean(parser.get("reimport"));
log.info("reimport {}: ", reimport);
final SparkConf conf = new SparkConf();
2024-02-16 11:36:46 +01:00
runWithSparkSession(
conf, isSparkSessionManaged, spark -> processBaseRecords(spark, inputPath, dataPath, outputPath, reimport));
}
private static void processBaseRecords(final SparkSession spark,
2024-02-16 11:36:46 +01:00
final String inputPath,
final String dataPath,
final String outputPath,
final boolean reimport) throws IOException {
2024-02-14 10:37:39 +01:00
2024-02-14 11:39:37 +01:00
try (final FileSystem fs = FileSystem.get(new Configuration());
2024-02-16 11:36:46 +01:00
final AggregatorReport report = new AggregatorReport()) {
2024-02-14 11:39:37 +01:00
2024-02-15 14:04:17 +01:00
if (reimport) {
loadRecords(fs, inputPath, dataPath, report);
}
2024-02-14 10:37:39 +01:00
2024-02-16 11:36:46 +01:00
// fs.delete(new Path(outputPath), true);
2024-02-15 14:04:17 +01:00
extractInfo(spark, dataPath, outputPath);
} catch (final Throwable e) {
throw new RuntimeException(e);
}
}
2024-02-15 14:04:17 +01:00
private static void loadRecords(final FileSystem fs,
2024-02-16 11:36:46 +01:00
final String inputPath,
final String outputPath,
final AggregatorReport report)
throws Exception {
2024-02-14 11:39:37 +01:00
final AtomicLong recordsCounter = new AtomicLong(0);
2024-02-15 14:04:17 +01:00
final LongWritable key = new LongWritable();
final Text value = new Text();
try (final SequenceFile.Writer writer = SequenceFile
2024-02-16 11:36:46 +01:00
.createWriter(
fs.getConf(), SequenceFile.Writer.file(new Path(outputPath)), SequenceFile.Writer
.keyClass(LongWritable.class),
SequenceFile.Writer
.valueClass(Text.class),
SequenceFile.Writer.compression(SequenceFile.CompressionType.BLOCK, new DeflateCodec()))) {
2024-02-15 14:04:17 +01:00
final BaseCollectorIterator iteraror = new BaseCollectorIterator(fs, new Path(inputPath), report);
2024-02-15 14:04:17 +01:00
while (iteraror.hasNext()) {
final String record = iteraror.next();
2024-02-15 14:04:17 +01:00
final long i = recordsCounter.incrementAndGet();
if ((i % 10000) == 0) {
log.info("# Loaded records: " + i);
}
key.set(i);
value.set(record);
try {
writer.append(key, value);
} catch (final Throwable e1) {
throw new RuntimeException(e1);
}
2024-02-14 10:37:39 +01:00
}
2024-02-15 14:04:17 +01:00
log.info("# COMPLETED - Loaded records: " + recordsCounter.get());
}
}
2024-02-15 14:04:17 +01:00
private static void extractInfo(final SparkSession spark,
2024-02-16 11:36:46 +01:00
final String inputPath,
final String targetPath) throws Exception {
final JavaRDD<BaseRecordInfo> rdd = JavaSparkContext
.fromSparkContext(spark.sparkContext())
.sequenceFile(inputPath, LongWritable.class, Text.class)
.map(s -> s._2.toString())
.map(BaseAnalyzerJob::extractInfo);
spark
.createDataset(rdd.rdd(), Encoders.bean(BaseRecordInfo.class))
.write()
.mode(SaveMode.Overwrite)
.format("parquet")
.save(targetPath);
2024-02-15 14:04:17 +01:00
}
2024-02-16 11:36:46 +01:00
protected static BaseRecordInfo extractInfo(final String s) {
2024-02-15 14:04:17 +01:00
try {
2024-02-16 11:36:46 +01:00
final Document record = DocumentHelper.parseText(s);
2024-02-15 14:04:17 +01:00
final BaseRecordInfo info = new BaseRecordInfo();
2024-02-14 10:37:39 +01:00
2024-02-16 11:36:46 +01:00
final Set<String> paths = new LinkedHashSet<>();
final Set<String> types = new LinkedHashSet<>();
2024-02-22 14:01:11 +01:00
final List<BaseCollectionInfo> colls = new ArrayList<>();
2024-02-15 14:27:50 +01:00
2024-02-15 14:04:17 +01:00
for (final Object o : record.selectNodes("//*|//@*")) {
2024-02-16 11:36:46 +01:00
paths.add(((Node) o).getPath());
2024-02-15 14:04:17 +01:00
2024-02-15 08:52:28 +01:00
if (o instanceof Element) {
final Element n = (Element) o;
2024-02-15 14:27:50 +01:00
final String nodeName = n.getName();
2024-02-15 08:52:28 +01:00
if ("collection".equals(nodeName)) {
final String collName = n.getText().trim();
2024-02-22 14:01:11 +01:00
2024-02-15 08:52:28 +01:00
if (StringUtils.isNotBlank(collName)) {
2024-02-22 14:01:11 +01:00
final BaseCollectionInfo coll = new BaseCollectionInfo();
coll.setId(collName);
coll.setOpendoarId(n.valueOf("@opendoar_id").trim());
coll.setRorId(n.valueOf("@ror_id").trim());
colls.add(coll);
}
2024-02-15 08:52:28 +01:00
} else if ("type".equals(nodeName)) {
2024-02-16 11:36:46 +01:00
types.add("TYPE: " + n.getText().trim());
2024-02-15 08:52:28 +01:00
} else if ("typenorm".equals(nodeName)) {
2024-02-16 11:36:46 +01:00
types.add("TYPE_NORM: " + n.getText().trim());
}
}
}
2024-02-16 11:36:46 +01:00
info.setId(record.valueOf("//*[local-name() = 'header']/*[local-name() = 'identifier']").trim());
info.getTypes().addAll(types);
info.getPaths().addAll(paths);
info.setCollections(colls);
2024-02-15 14:04:17 +01:00
return info;
} catch (final DocumentException e) {
throw new RuntimeException(e);
}
}
}