dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/ircdl_extention/PrepareNormalizedResultSpar...

89 lines
2.8 KiB
Java

package eu.dnetlib.dhp.ircdl_extention;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import java.util.Optional;
import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.ircdl_extention.model.Result;
public class PrepareNormalizedResultSpark {
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
PrepareNormalizedResultSpark.class
.getResourceAsStream(
"/eu/dnetlib/dhp/ircdl_extention/prepare_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
final String inputPath = parser.get("inputPath");
final String outputPath = parser.get("outputPath");
SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", "thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083");
runWithSparkHiveSession(
conf,
isSparkSessionManaged,
spark -> {
Utils.removeOutputDir(spark, outputPath);
execNormalize(spark, outputPath, inputPath);
});
}
private static void execNormalize(SparkSession spark, String outputPath, String inputPath) {
Dataset<Result> normalized_result = Utils
.readPath(spark, inputPath + "publicationsWithOrcid", Result.class)
.union(Utils.readPath(spark, inputPath + "datasetWithOrcid", Result.class))
.union(Utils.readPath(spark, inputPath + "softwareWithOrcid", Result.class))
.union(Utils.readPath(spark, inputPath + "otherWithOrcid", Result.class))
.map((MapFunction<Result, Result>) r -> {
r.setName(Utils.normalizeString(r.getName()));
r.setSurname(Utils.normalizeString(r.getSurname()));
r.setFullname(Utils.normalizeString(r.getFullname()));
return r;
}, Encoders.bean(Result.class));
normalized_result
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(outputPath + "ResultWithOrcid");
normalized_result
.filter((FilterFunction<Result>) r -> !r.getId().startsWith("50|dedup"))
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(outputPath + "collectedResultWithOrcid");
normalized_result
.filter((FilterFunction<Result>) r -> !r.getDeletedbyinference())
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(outputPath + "notDeletedByInferenceResultWithOrcid");
}
}