package eu.dnetlib.dhp.oa.graph.dump.subset; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import java.io.Serializable; import java.util.*; import java.util.stream.Collectors; import javax.print.attribute.standard.MediaSize; import org.apache.commons.io.IOUtils; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.*; import org.apache.spark.sql.*; import org.apache.spark.sql.Dataset; import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.ObjectMapper; import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.oa.graph.dump.Utils; import eu.dnetlib.dhp.oa.model.graph.GraphResult; import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.oaf.*; import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils; import eu.dnetlib.dhp.utils.DHPUtils; import scala.Function1; import scala.Tuple2; /** * @author miriam.baglioni * @Date 11/11/22 */ public class SparkSelectSubset implements Serializable { private static final Logger log = LoggerFactory.getLogger(SparkSelectSubset.class); public static void main(String[] args) throws Exception { String jsonConfiguration = IOUtils .toString( SparkSelectSubset.class .getResourceAsStream( "/eu/dnetlib/dhp/oa/graph/dump/input_relationdump_parameters.json")); final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration); parser.parseArgument(args); Boolean isSparkSessionManaged = Optional .ofNullable(parser.get("isSparkSessionManaged")) .map(Boolean::valueOf) .orElse(Boolean.TRUE); log.info("isSparkSessionManaged: {}", isSparkSessionManaged); final String inputPath = parser.get("sourcePath"); log.info("inputPath: {}", inputPath); final String outputPath = parser.get("outputPath"); log.info("outputPath: {}", outputPath); Optional rs = Optional.ofNullable(parser.get("removeSet")); final Set removeSet = new HashSet<>(); if (rs.isPresent()) { Collections.addAll(removeSet, rs.get().split(";")); } SparkConf conf = new SparkConf(); runWithSparkSession( conf, isSparkSessionManaged, spark -> { selectSubset(spark, inputPath, outputPath, removeSet); }); } private static void selectSubset(SparkSession spark, String inputPath, String outputPath, Set removeSet) { Dataset relation = Utils .readPath(spark, inputPath + "/relation", Relation.class) .filter( (FilterFunction) r -> !r.getDataInfo().getDeletedbyinference() && !removeSet.contains(r.getRelClass())); Dataset resultIds = Utils .readPath(spark, outputPath + "/original/publication", Publication.class) .map((MapFunction) p -> p.getId(), Encoders.STRING()) .union( Utils .readPath(spark, outputPath + "/original/dataset", eu.dnetlib.dhp.schema.oaf.Dataset.class) .map((MapFunction) d -> d.getId(), Encoders.STRING())) .union( Utils .readPath(spark, outputPath + "/original/software", Software.class) .map((MapFunction) s -> s.getId(), Encoders.STRING())) .union( Utils .readPath(spark, outputPath + "/original/otherresearchproduct", OtherResearchProduct.class) .map((MapFunction) o -> o.getId(), Encoders.STRING())); // select result -> result relations Dataset relResultResult = relation .joinWith(resultIds, relation.col("source").equalTo(resultIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)); relResultResult .joinWith(resultIds, relResultResult.col("target").equalTo(resultIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)) .write() .option("compression", "gzip") .mode(SaveMode.Overwrite) .json(outputPath + "/original/relation"); // save the relations among other entities and the results Dataset otherIds = Utils .readPath(spark, inputPath + "/organization", Organization.class) .filter((FilterFunction) e -> !e.getDataInfo().getDeletedbyinference()) .map((MapFunction) o -> o.getId(), Encoders.STRING()) .union( Utils .readPath(spark, inputPath + "/project", Project.class) .filter((FilterFunction) e -> !e.getDataInfo().getDeletedbyinference()) .map((MapFunction) p -> p.getId(), Encoders.STRING())) .union( Utils .readPath(spark, inputPath + "/datasource", Datasource.class) .filter((FilterFunction) e -> !e.getDataInfo().getDeletedbyinference()) .map((MapFunction) d -> d.getId(), Encoders.STRING())); Dataset relResultOther = relation .joinWith(resultIds, relation.col("source").equalTo(resultIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)); relResultOther .joinWith(otherIds, relResultOther.col("target").equalTo(otherIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)) .write() .mode(SaveMode.Append) .option("compression", "gzip") .json(outputPath + "/original/relation"); Dataset relOtherResult = relation .joinWith(resultIds, relation.col("target").equalTo(resultIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)); relOtherResult .joinWith(otherIds, relOtherResult.col("source").equalTo(otherIds.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)) .write() .mode(SaveMode.Append) .option("compression", "gzip") .json(outputPath + "/original/relation"); Dataset relAll = Utils .readPath(spark, outputPath + "/original/relation", Relation.class) .flatMap( (FlatMapFunction) r -> Arrays.asList(r.getSource(), r.getTarget()).iterator(), Encoders.STRING()) .distinct(); // Save the entities in relations with at least one result Dataset organization = Utils .readPath(spark, inputPath + "/organization", Organization.class) .filter((FilterFunction) o -> !o.getDataInfo().getDeletedbyinference()); organization .joinWith(relAll, organization.col("id").equalTo(relAll.col("value"))) .map( (MapFunction, Organization>) t2 -> t2._1(), Encoders.bean(Organization.class)) .groupByKey((MapFunction) v -> v.getId(), Encoders.STRING()) .mapGroups( (MapGroupsFunction) (k, it) -> it.next(), Encoders.bean(Organization.class)) .write() .mode(SaveMode.Overwrite) .option("compression", "gzip") .json(outputPath + "/original/organization"); Dataset datasource = Utils .readPath(spark, inputPath + "/datasource", Datasource.class) .filter((FilterFunction) d -> !d.getDataInfo().getDeletedbyinference()); datasource .joinWith(relAll, datasource.col("id").equalTo(relAll.col("value"))) .map((MapFunction, Datasource>) t2 -> t2._1(), Encoders.bean(Datasource.class)) .groupByKey((MapFunction) v -> v.getId(), Encoders.STRING()) .mapGroups( (MapGroupsFunction) (k, it) -> it.next(), Encoders.bean(Datasource.class)) .write() .mode(SaveMode.Overwrite) .option("compression", "gzip") .json(outputPath + "/original/datasource"); // plus we need to dump all the datasource in collectedfrom hostedby Dataset cfhb_orig = Utils .readPath(spark, outputPath + "/original/publication", Publication.class) .flatMap( (FlatMapFunction) p -> { List ret = new ArrayList<>(); p.getInstance().stream().forEach(i -> { if (Optional.ofNullable(i.getHostedby()).isPresent() && Optional.ofNullable(i.getHostedby().getKey()).isPresent()) ret.add(i.getHostedby().getKey()); }); if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) { p.getCollectedfrom().stream().forEach(cf -> { if (Optional.ofNullable(cf.getKey()).isPresent()) ret.add(cf.getKey()); }); } return ret.iterator(); }, Encoders.STRING()) .union( Utils .readPath(spark, outputPath + "/original/dataset", eu.dnetlib.dhp.schema.oaf.Dataset.class) .flatMap( (FlatMapFunction) p -> { List ret = new ArrayList<>(); p.getInstance().stream().forEach(i -> { if (Optional.ofNullable(i.getHostedby()).isPresent() && Optional.ofNullable(i.getHostedby().getKey()).isPresent()) ret.add(i.getHostedby().getKey()); }); if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) { p.getCollectedfrom().stream().forEach(cf -> { if (Optional.ofNullable(cf.getKey()).isPresent()) ret.add(cf.getKey()); }); } return ret.iterator(); }, Encoders.STRING())) .union( Utils .readPath(spark, outputPath + "/original/software", Software.class) .flatMap( (FlatMapFunction) p -> { List ret = new ArrayList<>(); p.getInstance().stream().forEach(i -> { if (Optional.ofNullable(i.getHostedby()).isPresent() && Optional.ofNullable(i.getHostedby().getKey()).isPresent()) ret.add(i.getHostedby().getKey()); }); if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) { p.getCollectedfrom().stream().forEach(cf -> { if (Optional.ofNullable(cf.getKey()).isPresent()) ret.add(cf.getKey()); }); } return ret.iterator(); }, Encoders.STRING())) .union( Utils .readPath(spark, outputPath + "/original/otherresearchproduct", OtherResearchProduct.class) .flatMap( (FlatMapFunction) p -> { List ret = new ArrayList<>(); p.getInstance().stream().forEach(i -> { if (Optional.ofNullable(i.getHostedby()).isPresent() && Optional.ofNullable(i.getHostedby().getKey()).isPresent()) ret.add(i.getHostedby().getKey()); }); if (Optional.ofNullable(p.getCollectedfrom()).isPresent()) { p.getCollectedfrom().stream().forEach(cf -> { if (Optional.ofNullable(cf.getKey()).isPresent()) ret.add(cf.getKey()); }); } return ret.iterator(); }, Encoders.STRING())) .filter((FilterFunction) s -> !s.equals(ModelConstants.UNKNOWN_REPOSITORY.getKey())) .distinct(); datasource .joinWith(cfhb_orig, datasource.col("id").equalTo(cfhb_orig.col("value"))) .map((MapFunction, Datasource>) t2 -> t2._1(), Encoders.bean(Datasource.class)) .write() .mode(SaveMode.Append) .option("compression", "gzip") .json(outputPath + "/original/datasource"); Dataset project = Utils .readPath(spark, inputPath + "/project", Project.class) .filter((FilterFunction) d -> !d.getDataInfo().getDeletedbyinference()); project .joinWith(relAll, project.col("id").equalTo(relAll.col("value"))) .map((MapFunction, Project>) t2 -> t2._1(), Encoders.bean(Project.class)) .groupByKey((MapFunction) v -> v.getId(), Encoders.STRING()) .mapGroups((MapGroupsFunction) (k, it) -> it.next(), Encoders.bean(Project.class)) .write() .mode(SaveMode.Overwrite) .option("compression", "gzip") .json(outputPath + "/original/project"); // save the relations among entities different from the result Dataset selectedIDs = Utils .readPath(spark, outputPath + "/original/project", Project.class) .map((MapFunction) p -> p.getId(), Encoders.STRING()) .union( Utils .readPath(spark, outputPath + "/original/organization", Organization.class) .map((MapFunction) o -> o.getId(), Encoders.STRING())) .union( Utils .readPath(spark, outputPath + "/original/datasource", Datasource.class) .map((MapFunction) d -> d.getId(), Encoders.STRING())); Dataset relOtherOther = relation .joinWith(selectedIDs, relation.col("source").equalTo(selectedIDs.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)); relOtherOther .joinWith(selectedIDs, relOtherOther.col("target").equalTo(selectedIDs.col("value"))) .map((MapFunction, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class)) .write() .mode(SaveMode.Append) .option("compression", "gzip") .json(outputPath + "/original/relation"); } }