forked from D-Net/dnet-hadoop
104 lines
3.1 KiB
Java
104 lines
3.1 KiB
Java
|
|
package eu.dnetlib.dhp.ircdl_extention;
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
|
|
|
|
import java.util.Objects;
|
|
import java.util.Optional;
|
|
|
|
import org.apache.commons.io.IOUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.function.MapFunction;
|
|
import org.apache.spark.sql.Dataset;
|
|
import org.apache.spark.sql.Encoders;
|
|
import org.apache.spark.sql.SaveMode;
|
|
import org.apache.spark.sql.SparkSession;
|
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
import eu.dnetlib.dhp.ircdl_extention.model.Result;
|
|
import scala.Tuple2;
|
|
|
|
public class PrepareResultAllTheRestSpark {
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
|
|
String jsonConfiguration = IOUtils
|
|
.toString(
|
|
PrepareResultAllTheRestSpark.class
|
|
.getResourceAsStream(
|
|
"/eu/dnetlib/dhp/ircdl_extention/prepare_alltherest_parameters.json"));
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
|
|
parser.parseArgument(args);
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
.map(Boolean::valueOf)
|
|
.orElse(Boolean.TRUE);
|
|
|
|
final String inputPath = parser.get("inputPath");
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
|
|
final String instRepoPath = parser.get("instRepoPath");
|
|
final String crossrefPath = parser.get("crossrefPath");
|
|
final String datacitePath = parser.get("datacitePath");
|
|
|
|
SparkConf conf = new SparkConf();
|
|
conf.set("hive.metastore.uris", "thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083");
|
|
|
|
runWithSparkHiveSession(
|
|
conf,
|
|
isSparkSessionManaged,
|
|
spark -> {
|
|
Utils.removeOutputDir(spark, outputPath + "allTheRest");
|
|
exec(
|
|
spark, outputPath + "allTheRest",
|
|
inputPath, instRepoPath,
|
|
datacitePath, crossrefPath);
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Leggo tutti i result di crossref, datacite ed associati agli institutional repositories
|
|
* Leggo tutti i result collezionati
|
|
* faccio una left join tra i result collezionati e quelli letti al passo precedente
|
|
* prendo quelli che non hanno un match nella join
|
|
* @param spark
|
|
* @param output_path
|
|
* @param result_path
|
|
*/
|
|
private static void exec(SparkSession spark, String output_path, String result_path, String inst_repo_path,
|
|
String datacite_path, String crossref_path) {
|
|
|
|
Dataset<Result> result = Utils.readPath(spark, result_path, Result.class);
|
|
|
|
Dataset<Result> inst_repo = Utils
|
|
.readPath(spark, inst_repo_path, Result.class);
|
|
|
|
Dataset<Result> datacite = Utils
|
|
.readPath(spark, datacite_path, Result.class);
|
|
|
|
Dataset<Result> crossref = Utils
|
|
.readPath(spark, crossref_path, Result.class);
|
|
|
|
Dataset<Result> union_dataset = inst_repo.union(datacite).union(crossref);
|
|
|
|
result
|
|
.joinWith(union_dataset, result.col("id").equalTo(union_dataset.col("id")), "left")
|
|
.map((MapFunction<Tuple2<Result, Result>, Result>) t2 -> {
|
|
if (!Optional.ofNullable(t2._2()).isPresent())
|
|
return t2._1();
|
|
return null;
|
|
}, Encoders.bean(Result.class))
|
|
.filter(Objects::nonNull)
|
|
.write()
|
|
.option("compression", "gzip")
|
|
.mode(SaveMode.Overwrite)
|
|
.json(output_path);
|
|
|
|
}
|
|
|
|
}
|