package eu.dnetlib.dhp.oa.graph.merge; import static eu.dnetlib.dhp.common.GraphSupport.deleteGraphTable; import static eu.dnetlib.dhp.common.GraphSupport.saveGraphTable; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import java.util.Objects; import java.util.Optional; import org.apache.commons.io.IOUtils; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.FilterFunction; import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.ObjectMapper; import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.common.GraphFormat; import eu.dnetlib.dhp.common.GraphSupport; import eu.dnetlib.dhp.oa.graph.clean.CleanGraphSparkJob; import eu.dnetlib.dhp.schema.common.ModelSupport; import eu.dnetlib.dhp.schema.oaf.*; import scala.Tuple2; /** * Combines the content from two aggregator graph tables of the same type, entities (or relationships) with the same ids * are picked preferring those from the BETA aggregator rather then from PROD. The identity of a relationship is defined * by eu.dnetlib.dhp.schema.common.ModelSupport#idFn() */ public class MergeGraphSparkJob { private static final Logger log = LoggerFactory.getLogger(CleanGraphSparkJob.class); private static final String PRIORITY_DEFAULT = "BETA"; // BETA | PROD public static void main(String[] args) throws Exception { String jsonConfiguration = IOUtils .toString( CleanGraphSparkJob.class .getResourceAsStream( "/eu/dnetlib/dhp/oa/graph/merge_graphs_parameters.json")); final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration); parser.parseArgument(args); String priority = Optional .ofNullable(parser.get("priority")) .orElse(PRIORITY_DEFAULT); log.info("priority: {}", priority); Boolean isSparkSessionManaged = Optional .ofNullable(parser.get("isSparkSessionManaged")) .map(Boolean::valueOf) .orElse(Boolean.TRUE); log.info("isSparkSessionManaged: {}", isSparkSessionManaged); String betaInputGraph = parser.get("betaInputGraph"); log.info("betaInputGraph: {}", betaInputGraph); String prodInputGraph = parser.get("prodInputGraph"); log.info("prodInputGraph: {}", prodInputGraph); String outputGraph = parser.get("outputGraph"); log.info("outputGraph: {}", outputGraph); GraphFormat inputGraphFormat = Optional .ofNullable(parser.get("inputGraphFormat")) .map(GraphFormat::valueOf) .orElse(GraphFormat.DEFAULT); log.info("inputGraphFormat: {}", inputGraphFormat); GraphFormat outputGraphFormat = Optional .ofNullable(parser.get("outputGraphFormat")) .map(GraphFormat::valueOf) .orElse(GraphFormat.DEFAULT); log.info("outputGraphFormat: {}", outputGraphFormat); String graphTableClassName = parser.get("graphTableClassName"); log.info("graphTableClassName: {}", graphTableClassName); Class clazz = (Class) Class.forName(graphTableClassName); String hiveMetastoreUris = parser.get("hiveMetastoreUris"); log.info("hiveMetastoreUris: {}", hiveMetastoreUris); SparkConf conf = new SparkConf(); conf.set("hive.metastore.uris", hiveMetastoreUris); conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); conf.registerKryoClasses(ModelSupport.getOafModelClasses()); runWithSparkHiveSession( conf, isSparkSessionManaged, spark -> mergeGraphTable( spark, priority, betaInputGraph, clazz, prodInputGraph, clazz, outputGraph, inputGraphFormat, outputGraphFormat)); } private static

void mergeGraphTable( SparkSession spark, String priority, String betaInputGraph, Class b_clazz, String prodInputGraph, Class

p_clazz, String outputGraph, GraphFormat inputGraphFormat, GraphFormat outputGraphFormat) { Dataset> beta = readGraph(spark, betaInputGraph, b_clazz, inputGraphFormat); Dataset> prod = readGraph(spark, prodInputGraph, p_clazz, inputGraphFormat); Dataset

merged = prod .joinWith(beta, prod.col("_1").equalTo(beta.col("_1")), "full_outer") .map((MapFunction, Tuple2>, P>) value -> { Optional

p = Optional.ofNullable(value._1()).map(Tuple2::_2); Optional b = Optional.ofNullable(value._2()).map(Tuple2::_2); switch (priority) { default: case "BETA": return mergeWithPriorityToBETA(p, b); case "PROD": return mergeWithPriorityToPROD(p, b); } }, Encoders.bean(p_clazz)) .filter((FilterFunction

) Objects::nonNull); saveGraphTable(merged, p_clazz, outputGraph, outputGraphFormat); } private static

P mergeWithPriorityToPROD(Optional

p, Optional b) { if (b.isPresent() & !p.isPresent()) { return (P) b.get(); } if (p.isPresent()) { return p.get(); } return null; } private static

P mergeWithPriorityToBETA(Optional

p, Optional b) { if (p.isPresent() & !b.isPresent()) { return p.get(); } if (b.isPresent()) { return (P) b.get(); } return null; } private static Dataset> readGraph( SparkSession spark, String inputGraph, Class clazz, GraphFormat inputGraphFormat) { log.info("Reading Graph table from: {}", inputGraph); return GraphSupport .readGraph(spark, inputGraph, clazz, inputGraphFormat) .map((MapFunction>) t -> { final String id = ModelSupport.idFn().apply(t); return new Tuple2<>(id, t); }, Encoders.tuple(Encoders.STRING(), Encoders.kryo(clazz))); } }