329 lines
13 KiB
Java
329 lines
13 KiB
Java
|
|
package eu.dnetlib.dhp.oa.graph.dump.subset;
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
|
|
import java.io.Serializable;
|
|
import java.util.*;
|
|
import java.util.stream.Collectors;
|
|
|
|
import javax.print.attribute.standard.MediaSize;
|
|
|
|
import org.apache.commons.io.IOUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.function.*;
|
|
import org.apache.spark.sql.*;
|
|
import org.apache.spark.sql.Dataset;
|
|
import org.jetbrains.annotations.NotNull;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
|
import eu.dnetlib.dhp.oa.model.graph.GraphResult;
|
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
import eu.dnetlib.dhp.schema.oaf.*;
|
|
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
|
import scala.Function1;
|
|
import scala.Tuple2;
|
|
|
|
/**
|
|
* @author miriam.baglioni
|
|
* @Date 11/11/22
|
|
*/
|
|
public class SparkSelectSubset implements Serializable {
|
|
private static final Logger log = LoggerFactory.getLogger(SparkSelectSubset.class);
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
String jsonConfiguration = IOUtils
|
|
.toString(
|
|
SparkSelectSubset.class
|
|
.getResourceAsStream(
|
|
"/eu/dnetlib/dhp/oa/graph/dump/input_relationdump_parameters.json"));
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
parser.parseArgument(args);
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
.map(Boolean::valueOf)
|
|
.orElse(Boolean.TRUE);
|
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
|
|
final String inputPath = parser.get("sourcePath");
|
|
log.info("inputPath: {}", inputPath);
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
log.info("outputPath: {}", outputPath);
|
|
|
|
Optional<String> rs = Optional.ofNullable(parser.get("removeSet"));
|
|
final Set<String> removeSet = new HashSet<>();
|
|
if (rs.isPresent()) {
|
|
Collections.addAll(removeSet, rs.get().split(";"));
|
|
}
|
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
runWithSparkSession(
|
|
conf,
|
|
isSparkSessionManaged,
|
|
spark -> {
|
|
selectSubset(spark, inputPath, outputPath, removeSet);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
private static void selectSubset(SparkSession spark, String inputPath, String outputPath, Set<String> removeSet) {
|
|
Dataset<Relation> relation = Utils
|
|
.readPath(spark, inputPath + "/relation", Relation.class)
|
|
.filter(
|
|
(FilterFunction<Relation>) r -> !r.getDataInfo().getDeletedbyinference()
|
|
&& !removeSet.contains(r.getRelClass()));
|
|
|
|
Dataset<String> resultIds = Utils
|
|
.readPath(spark, outputPath + "/original/publication", Publication.class)
|
|
|
|
.map((MapFunction<Publication, String>) p -> p.getId(), Encoders.STRING())
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/dataset", eu.dnetlib.dhp.schema.oaf.Dataset.class)
|
|
|
|
.map((MapFunction<eu.dnetlib.dhp.schema.oaf.Dataset, String>) d -> d.getId(), Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/software", Software.class)
|
|
|
|
.map((MapFunction<Software, String>) s -> s.getId(), Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/otherresearchproduct", OtherResearchProduct.class)
|
|
|
|
.map((MapFunction<OtherResearchProduct, String>) o -> o.getId(), Encoders.STRING()));
|
|
|
|
// select result -> result relations
|
|
Dataset<Relation> relResultResult = relation
|
|
.joinWith(resultIds, relation.col("source").equalTo(resultIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
|
|
|
relResultResult
|
|
.joinWith(resultIds, relResultResult.col("target").equalTo(resultIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
|
.write()
|
|
.option("compression", "gzip")
|
|
.mode(SaveMode.Overwrite)
|
|
.json(outputPath + "/original/relation");
|
|
|
|
// save the relations among other entities and the results
|
|
Dataset<String> otherIds = Utils
|
|
.readPath(spark, inputPath + "/organization", Organization.class)
|
|
.filter((FilterFunction<Organization>) e -> !e.getDataInfo().getDeletedbyinference())
|
|
.map((MapFunction<Organization, String>) o -> o.getId(), Encoders.STRING())
|
|
.union(
|
|
Utils
|
|
.readPath(spark, inputPath + "/project", Project.class)
|
|
.filter((FilterFunction<Project>) e -> !e.getDataInfo().getDeletedbyinference())
|
|
.map((MapFunction<Project, String>) p -> p.getId(), Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, inputPath + "/datasource", Datasource.class)
|
|
.filter((FilterFunction<Datasource>) e -> !e.getDataInfo().getDeletedbyinference())
|
|
.map((MapFunction<Datasource, String>) d -> d.getId(), Encoders.STRING()));
|
|
|
|
Dataset<Relation> relResultOther = relation
|
|
.joinWith(resultIds, relation.col("source").equalTo(resultIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
|
|
|
relResultOther
|
|
.joinWith(otherIds, relResultOther.col("target").equalTo(otherIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
|
.write()
|
|
.mode(SaveMode.Append)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/relation");
|
|
|
|
Dataset<Relation> relOtherResult = relation
|
|
.joinWith(resultIds, relation.col("target").equalTo(resultIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
|
|
|
relOtherResult
|
|
.joinWith(otherIds, relOtherResult.col("source").equalTo(otherIds.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
|
.write()
|
|
.mode(SaveMode.Append)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/relation");
|
|
|
|
Dataset<String> relAll = Utils
|
|
.readPath(spark, outputPath + "/original/relation", Relation.class)
|
|
.flatMap(
|
|
(FlatMapFunction<Relation, String>) r -> Arrays.asList(r.getSource(), r.getTarget()).iterator(),
|
|
Encoders.STRING())
|
|
.distinct();
|
|
|
|
// Save the entities in relations with at least one result
|
|
Dataset<Organization> organization = Utils
|
|
.readPath(spark, inputPath + "/organization", Organization.class)
|
|
.filter((FilterFunction<Organization>) o -> !o.getDataInfo().getDeletedbyinference());
|
|
organization
|
|
.joinWith(relAll, organization.col("id").equalTo(relAll.col("value")))
|
|
.map(
|
|
(MapFunction<Tuple2<Organization, String>, Organization>) t2 -> t2._1(),
|
|
Encoders.bean(Organization.class))
|
|
.groupByKey((MapFunction<Organization, String>) v -> v.getId(), Encoders.STRING())
|
|
.mapGroups(
|
|
(MapGroupsFunction<String, Organization, Organization>) (k, it) -> it.next(),
|
|
Encoders.bean(Organization.class))
|
|
.write()
|
|
.mode(SaveMode.Overwrite)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/organization");
|
|
|
|
Dataset<Datasource> datasource = Utils
|
|
.readPath(spark, inputPath + "/datasource", Datasource.class)
|
|
.filter((FilterFunction<Datasource>) d -> !d.getDataInfo().getDeletedbyinference());
|
|
datasource
|
|
.joinWith(relAll, datasource.col("id").equalTo(relAll.col("value")))
|
|
.map((MapFunction<Tuple2<Datasource, String>, Datasource>) t2 -> t2._1(), Encoders.bean(Datasource.class))
|
|
.groupByKey((MapFunction<Datasource, String>) v -> v.getId(), Encoders.STRING())
|
|
.mapGroups(
|
|
(MapGroupsFunction<String, Datasource, Datasource>) (k, it) -> it.next(),
|
|
Encoders.bean(Datasource.class))
|
|
.write()
|
|
.mode(SaveMode.Overwrite)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/datasource");
|
|
|
|
// plus we need to dump all the datasource in collectedfrom hostedby
|
|
Dataset<String> cfhb_orig = Utils
|
|
.readPath(spark, outputPath + "/original/publication", Publication.class)
|
|
.flatMap(
|
|
(FlatMapFunction<Publication, String>) p -> {
|
|
List<String> ret = new ArrayList<>();
|
|
p.getInstance().stream().forEach(i -> {
|
|
if (Optional.ofNullable(i.getHostedby()).isPresent()
|
|
&& Optional.ofNullable(i.getHostedby().getKey()).isPresent())
|
|
ret.add(i.getHostedby().getKey());
|
|
});
|
|
if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) {
|
|
p.getCollectedfrom().stream().forEach(cf -> {
|
|
if (Optional.ofNullable(cf.getKey()).isPresent())
|
|
ret.add(cf.getKey());
|
|
});
|
|
}
|
|
return ret.iterator();
|
|
}, Encoders.STRING())
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/dataset", eu.dnetlib.dhp.schema.oaf.Dataset.class)
|
|
.flatMap(
|
|
(FlatMapFunction<eu.dnetlib.dhp.schema.oaf.Dataset, String>) p -> {
|
|
List<String> ret = new ArrayList<>();
|
|
p.getInstance().stream().forEach(i -> {
|
|
if (Optional.ofNullable(i.getHostedby()).isPresent()
|
|
&& Optional.ofNullable(i.getHostedby().getKey()).isPresent())
|
|
ret.add(i.getHostedby().getKey());
|
|
});
|
|
if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) {
|
|
p.getCollectedfrom().stream().forEach(cf -> {
|
|
if (Optional.ofNullable(cf.getKey()).isPresent())
|
|
ret.add(cf.getKey());
|
|
});
|
|
}
|
|
return ret.iterator();
|
|
}, Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/software", Software.class)
|
|
.flatMap(
|
|
(FlatMapFunction<Software, String>) p -> {
|
|
List<String> ret = new ArrayList<>();
|
|
p.getInstance().stream().forEach(i -> {
|
|
if (Optional.ofNullable(i.getHostedby()).isPresent()
|
|
&& Optional.ofNullable(i.getHostedby().getKey()).isPresent())
|
|
ret.add(i.getHostedby().getKey());
|
|
});
|
|
if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) {
|
|
p.getCollectedfrom().stream().forEach(cf -> {
|
|
if (Optional.ofNullable(cf.getKey()).isPresent())
|
|
ret.add(cf.getKey());
|
|
});
|
|
}
|
|
return ret.iterator();
|
|
}, Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/otherresearchproduct", OtherResearchProduct.class)
|
|
.flatMap(
|
|
(FlatMapFunction<OtherResearchProduct, String>) p -> {
|
|
List<String> ret = new ArrayList<>();
|
|
p.getInstance().stream().forEach(i -> {
|
|
if (Optional.ofNullable(i.getHostedby()).isPresent()
|
|
&& Optional.ofNullable(i.getHostedby().getKey()).isPresent())
|
|
ret.add(i.getHostedby().getKey());
|
|
});
|
|
if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) {
|
|
p.getCollectedfrom().stream().forEach(cf -> {
|
|
if (Optional.ofNullable(cf.getKey()).isPresent())
|
|
ret.add(cf.getKey());
|
|
});
|
|
}
|
|
return ret.iterator();
|
|
}, Encoders.STRING()))
|
|
.filter((FilterFunction<String>) s -> !s.equals(ModelConstants.UNKNOWN_REPOSITORY.getKey()))
|
|
.distinct();
|
|
|
|
datasource
|
|
.joinWith(cfhb_orig, datasource.col("id").equalTo(cfhb_orig.col("value")))
|
|
.map((MapFunction<Tuple2<Datasource, String>, Datasource>) t2 -> t2._1(), Encoders.bean(Datasource.class))
|
|
.write()
|
|
.mode(SaveMode.Append)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/datasource");
|
|
|
|
Dataset<Project> project = Utils
|
|
.readPath(spark, inputPath + "/project", Project.class)
|
|
.filter((FilterFunction<Project>) d -> !d.getDataInfo().getDeletedbyinference());
|
|
project
|
|
.joinWith(relAll, project.col("id").equalTo(relAll.col("value")))
|
|
.map((MapFunction<Tuple2<Project, String>, Project>) t2 -> t2._1(), Encoders.bean(Project.class))
|
|
.groupByKey((MapFunction<Project, String>) v -> v.getId(), Encoders.STRING())
|
|
.mapGroups((MapGroupsFunction<String, Project, Project>) (k, it) -> it.next(), Encoders.bean(Project.class))
|
|
.write()
|
|
.mode(SaveMode.Overwrite)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/project");
|
|
|
|
// save the relations among entities different from the result
|
|
|
|
Dataset<String> selectedIDs = Utils
|
|
.readPath(spark, outputPath + "/original/project", Project.class)
|
|
.map((MapFunction<Project, String>) p -> p.getId(), Encoders.STRING())
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/organization", Organization.class)
|
|
.map((MapFunction<Organization, String>) o -> o.getId(), Encoders.STRING()))
|
|
.union(
|
|
Utils
|
|
.readPath(spark, outputPath + "/original/datasource", Datasource.class)
|
|
.map((MapFunction<Datasource, String>) d -> d.getId(), Encoders.STRING()));
|
|
|
|
Dataset<Relation> relOtherOther = relation
|
|
.joinWith(selectedIDs, relation.col("source").equalTo(selectedIDs.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
|
|
|
relOtherOther
|
|
.joinWith(selectedIDs, relOtherOther.col("target").equalTo(selectedIDs.col("value")))
|
|
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
|
.write()
|
|
.mode(SaveMode.Append)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "/original/relation");
|
|
|
|
}
|
|
|
|
}
|