package eu.dnetlib.dhp.oa.graph.dump.complete; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Optional; import org.apache.commons.io.IOUtils; import org.apache.spark.SparkConf; import org.apache.spark.sql.*; import org.apache.spark.sql.types.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.oa.graph.dump.Utils; import eu.dnetlib.dhp.schema.oaf.*; /** * It selects the valid relations among those present in the graph. One relation is valid if it is not deletedbyinference * and if both the source and the target node are present in the graph and are not deleted by inference nor invisible. * To check this I made a view of the ids of all the entities in the graph, and select the relations for which a join exists * with this view for both the source and the target */ public class SparkSelectValidRelationsJob implements Serializable { private static final Logger log = LoggerFactory.getLogger(SparkSelectValidRelationsJob.class); public static void main(String[] args) throws Exception { String jsonConfiguration = IOUtils .toString( SparkSelectValidRelationsJob.class .getResourceAsStream( "/eu/dnetlib/dhp/oa/graph/dump/input_relationdump_parameters.json")); final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration); parser.parseArgument(args); Boolean isSparkSessionManaged = Optional .ofNullable(parser.get("isSparkSessionManaged")) .map(Boolean::valueOf) .orElse(Boolean.TRUE); log.info("isSparkSessionManaged: {}", isSparkSessionManaged); final String inputPath = parser.get("sourcePath"); log.info("inputPath: {}", inputPath); final String outputPath = parser.get("outputPath"); log.info("outputPath: {}", outputPath); SparkConf conf = new SparkConf(); runWithSparkSession( conf, isSparkSessionManaged, spark -> { Utils.removeOutputDir(spark, outputPath); selectValidRelation2(spark, inputPath, outputPath); }); } private static void selectValidRelation2(SparkSession spark, String inputPath, String outputPath) { final StructType structureSchema = new StructType() .fromDDL("`id` STRING, `dataInfo` STRUCT<`deletedbyinference`:BOOLEAN,`invisible`:BOOLEAN>"); org.apache.spark.sql.Dataset df = spark.createDataFrame(new ArrayList(), structureSchema); List entities = Arrays .asList( "publication", "dataset", "otherresearchproduct", "software", "organization", "project", "datasource"); for (String e : entities) df = df .union( spark .read() .schema(structureSchema) .json(inputPath + "/" + e) .filter("dataInfo.deletedbyinference != true and dataInfo.invisible != true")); org.apache.spark.sql.Dataset relations = spark .read() .schema(Encoders.bean(Relation.class).schema()) .json(inputPath + "/relation") .filter("dataInfo.deletedbyinference == false"); relations .join(df, relations.col("source").equalTo(df.col("id")), "leftsemi") .join(df, relations.col("target").equalTo(df.col("id")), "leftsemi") .write() .mode(SaveMode.Overwrite) .option("compression", "gzip") .json(outputPath); } }