dhp-graph-dump/dump/src/main/java/eu/dnetlib/dhp/oa/graph/dump/filterentities/SelectConnectedEntities.java

218 lines
8.7 KiB
Java

package eu.dnetlib.dhp.oa.graph.dump.filterentities;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.Dataset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.oa.graph.dump.skgif.EmitFromEntities;
import eu.dnetlib.dhp.oa.graph.dump.skgif.Utils;
import eu.dnetlib.dhp.schema.common.EntityType;
import eu.dnetlib.dhp.schema.common.ModelSupport;
import eu.dnetlib.dhp.schema.oaf.*;
import scala.Tuple2;
/**
* @author miriam.baglioni
* @Date 12/03/24
*/
public class SelectConnectedEntities implements Serializable {
private static final Logger log = LoggerFactory.getLogger(EmitFromEntities.class);
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
SelectConnectedEntities.class
.getResourceAsStream(
"/eu/dnetlib/dhp/oa/graph/dump/select_connected_entities_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String inputPath = parser.get("sourcePath");
log.info("inputPath: {}", inputPath);
final String filterPath = parser.get("filterPath");
log.info("filterPath: {}", filterPath);
final String workingDir = parser.get("workingDir");
log.info("workingDir: {}", workingDir);
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
selectConnectedEntities(spark, inputPath, filterPath, workingDir);
});
}
private static <R extends Result> void selectConnectedEntities(SparkSession spark, String inputPath,
String filterPath,
String workingDir) throws JsonProcessingException {
Dataset<String> resultIds = spark.emptyDataset(Encoders.STRING());
for (EntityType entity : ModelSupport.entityTypes.keySet())
if (ModelSupport.isResult(entity))
resultIds = resultIds
.union(
spark
.read()
.parquet(filterPath + entity.name() + "_ids")
.select("id")
.as(Encoders.STRING()));
Dataset<Relation> relation = Utils
.readPath(spark, inputPath + "relation", Relation.class)
.filter((FilterFunction<Relation>) r -> !r.getDataInfo().getDeletedbyinference());
Dataset<Organization> organizations = Utils
.readPath(spark, inputPath + "organization", Organization.class)
.filter((FilterFunction<Organization>) o -> !o.getDataInfo().getDeletedbyinference());
Dataset<Project> projects = Utils
.readPath(spark, inputPath + "project", Project.class)
.filter((FilterFunction<Project>) p -> !p.getDataInfo().getDeletedbyinference())
.filter(
(FilterFunction<Project>) p -> Optional.ofNullable(p.getFundingtree()).isPresent() &&
p.getFundingtree().size() > 0 &&
Utils
.getFunderName(p.getFundingtree().get(0).getValue())
.equalsIgnoreCase("European Commission"));
Dataset<Datasource> datasources = Utils
.readPath(spark, inputPath + "datasource", Datasource.class)
.filter((FilterFunction<Datasource>) d -> !d.getDataInfo().getDeletedbyinference());
// select relations having source in the set of identifiers selected for eosc
Dataset<Relation> resultSource = resultIds
.joinWith(relation, resultIds.col("value").equalTo(relation.col("source")))
.map((MapFunction<Tuple2<String, Relation>, Relation>) t2 -> t2._2(), Encoders.bean(Relation.class));
// write relations having source and target in the set
resultIds
.joinWith(resultSource, resultIds.col("value").equalTo(resultSource.col("target")))
.map((MapFunction<Tuple2<String, Relation>, Relation>) t2 -> t2._2(), Encoders.bean(Relation.class))
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(workingDir + "relation");
// write relations between results and organizations
resultSource
.joinWith(organizations, resultSource.col("target").equalTo(organizations.col("id")))
.map((MapFunction<Tuple2<Relation, Organization>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
.write()
.mode(SaveMode.Append)
.option("compression", "gzip")
.json(workingDir + "relation");
resultSource
.joinWith(projects, resultSource.col("target").equalTo(projects.col("id")))
.map((MapFunction<Tuple2<Relation, Project>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
.write()
.mode(SaveMode.Append)
.option("compression", "gzip")
.json(workingDir + "relation");
// write organizations linked to results in the set
resultSource
.joinWith(organizations, resultSource.col("target").equalTo(organizations.col("id")))
.map(
(MapFunction<Tuple2<Relation, Organization>, Organization>) t2 -> t2._2(),
Encoders.bean(Organization.class))
.write()
.mode(SaveMode.Overwrite)
.option("compression", "gzip")
.json(workingDir + "organization");
// write projects linked to results in the set
resultSource
.joinWith(projects, resultSource.col("target").equalTo(projects.col("id")))
.map((MapFunction<Tuple2<Relation, Project>, Project>) t2 -> t2._2(), Encoders.bean(Project.class))
.write()
.mode(SaveMode.Append)
.option("compression", "gzip")
.json(workingDir + "project");
// read the results and select all the distinct instance.hostedbykey
Dataset<String> datasourceReferencedIds = spark.emptyDataset(Encoders.STRING());
for (EntityType entity : ModelSupport.entityTypes.keySet())
if (ModelSupport.isResult(entity)) {
Class<R> resultClazz = ModelSupport.entityTypes.get(entity);
datasourceReferencedIds = datasourceReferencedIds
.union(
Utils
.readPath(spark, workingDir + entity.name(), resultClazz)
.flatMap(
(FlatMapFunction<R, String>) r -> r
.getInstance()
.stream()
.flatMap(i -> Stream.of(i.getHostedby().getKey(), i.getCollectedfrom().getKey()))
.collect(Collectors.toList())
.iterator(),
Encoders.STRING()));
}
datasourceReferencedIds = datasourceReferencedIds.distinct();
// join with the datasources and write the datasource in the join
datasourceReferencedIds
.joinWith(datasources, datasourceReferencedIds.col("value").equalTo(datasources.col("id")))
.map((MapFunction<Tuple2<String, Datasource>, Datasource>) t2 -> t2._2(), Encoders.bean(Datasource.class))
.write()
.mode(SaveMode.Overwrite)
.option("compression", "gzip")
.json(workingDir + "datasource");
// selecting relations between organizations and projects in the selected set
Dataset<Organization> organizationSbs = Utils.readPath(spark, workingDir + "organization", Organization.class);
Dataset<Project> projectSbs = Utils.readPath(spark, workingDir + "project", Project.class);
Dataset<Relation> orgSourceRels = organizationSbs
.joinWith(relation, organizationSbs.col("id").equalTo(relation.col("source")))
.map((MapFunction<Tuple2<Organization, Relation>, Relation>) t2 -> t2._2(), Encoders.bean(Relation.class));
orgSourceRels
.joinWith(projectSbs, orgSourceRels.col("target").equalTo(projectSbs.col("id")))
.map((MapFunction<Tuple2<Relation, Project>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
.write()
.mode(SaveMode.Append)
.option("compression", "gzip")
.json(workingDir + "relation");
// selecting relations between datasources and organizations in the selected set
Dataset<Datasource> datasourceSbs = Utils.readPath(spark, workingDir + "datasource", Datasource.class);
Dataset<Relation> dsSourceRels = datasourceSbs
.joinWith(relation, datasourceSbs.col("id").as("dsId").equalTo(relation.col("source")))
.map((MapFunction<Tuple2<Datasource, Relation>, Relation>) t2 -> t2._2(), Encoders.bean(Relation.class));
dsSourceRels
.joinWith(organizationSbs, dsSourceRels.col("target").equalTo(organizations.col("id").as("orgId")))
.map((MapFunction<Tuple2<Relation, Organization>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
.write()
.mode(SaveMode.Append)
.option("compression", "gzip")
.json(workingDir + "relation");
}
}