dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/ircdl_extention/PrepareResultFromInstRepo.java

93 lines
2.7 KiB
Java

package eu.dnetlib.dhp.ircdl_extention;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import java.util.Optional;
import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.ircdl_extention.model.Result;
import eu.dnetlib.dhp.schema.oaf.Datasource;
public class PrepareResultFromInstRepo {
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
PrepareResultFromInstRepo.class
.getResourceAsStream(
"/eu/dnetlib/dhp/ircdl_extention/prepare_instrepo_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
final String inputPath = parser.get("inputPath");
final String outputPath = parser.get("outputPath");
final String datasourcePath = parser.get("datasourcePath");
SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", "thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083");
runWithSparkHiveSession(
conf,
isSparkSessionManaged,
spark -> {
Utils.removeOutputDir(spark, outputPath);
selectResultFromInstRepo(spark, inputPath, outputPath, datasourcePath);
});
}
private static void selectResultFromInstRepo(SparkSession spark, String inputPath, String output_path,
String datasourcePath) {
Dataset<Datasource> datasource = Utils.readPath(spark, datasourcePath, Datasource.class);
Dataset<Result> res = Utils
.readPath(
spark, inputPath, Result.class)
.filter(
(FilterFunction<Result>) r -> !r.getId().startsWith("50|doiboost")
&& !r.getId().startsWith("50|scholix")
&& !r.getId().startsWith("50|datacite")
&& !r.getId().startsWith("50|dedup"));
datasource.createOrReplaceTempView("datasource");
res.createOrReplaceTempView("result");
spark
.sql(
"SELECT t.id, t.deletedbyinference, t.name, t.surname, t.cf, t.fullname, t.pid, t.oid " +
"FROM " +
"(Select * " +
"from result " +
"LATERAL VIEW explode(cf.key) c as cfromkey) as t " +
"join " +
"datasource d " +
"on " +
"d.id = t.cfromkey " +
"and d.datasourcetype.classid = 'pubsrepository::institutional'")
.as(Encoders.bean(Result.class))
.write()
.option("compressio", "gzip")
.mode(SaveMode.Overwrite)
.json(output_path);
}
}