Changed the way to find results linked to projects. We verify to actually have the project on the graph before selecting the result

This commit is contained in:
Miriam Baglioni 2021-05-20 10:29:13 +02:00
parent 25c5dd744f
commit c6cadacd24
1 changed files with 37 additions and 21 deletions

View File

@ -10,10 +10,7 @@ import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapGroupsFunction; import org.apache.spark.api.java.function.MapGroupsFunction;
import org.apache.spark.sql.Dataset; import org.apache.spark.sql.*;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -21,6 +18,7 @@ import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.oa.graph.dump.Constants; import eu.dnetlib.dhp.oa.graph.dump.Constants;
import eu.dnetlib.dhp.oa.graph.dump.Utils; import eu.dnetlib.dhp.oa.graph.dump.Utils;
import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.Project;
import eu.dnetlib.dhp.schema.oaf.Relation; import eu.dnetlib.dhp.schema.oaf.Relation;
import eu.dnetlib.dhp.schema.oaf.Result; import eu.dnetlib.dhp.schema.oaf.Result;
import scala.Tuple2; import scala.Tuple2;
@ -59,8 +57,8 @@ public class SparkResultLinkedToProject implements Serializable {
final String resultClassName = parser.get("resultTableName"); final String resultClassName = parser.get("resultTableName");
log.info("resultTableName: {}", resultClassName); log.info("resultTableName: {}", resultClassName);
final String relationPath = parser.get("relationPath"); final String graphPath = parser.get("graphPath");
log.info("relationPath: {}", relationPath); log.info("graphPath: {}", graphPath);
Class<? extends Result> inputClazz = (Class<? extends Result>) Class.forName(resultClassName); Class<? extends Result> inputClazz = (Class<? extends Result>) Class.forName(resultClassName);
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
@ -70,34 +68,52 @@ public class SparkResultLinkedToProject implements Serializable {
isSparkSessionManaged, isSparkSessionManaged,
spark -> { spark -> {
Utils.removeOutputDir(spark, outputPath); Utils.removeOutputDir(spark, outputPath);
writeResultsLinkedToProjects(spark, inputClazz, inputPath, outputPath, relationPath); writeResultsLinkedToProjects(spark, inputClazz, inputPath, outputPath, graphPath);
}); });
} }
private static <R extends Result> void writeResultsLinkedToProjects(SparkSession spark, Class<R> inputClazz, private static <R extends Result> void writeResultsLinkedToProjects(SparkSession spark, Class<R> inputClazz,
String inputPath, String outputPath, String relationPath) { String inputPath, String outputPath, String graphPath) {
Dataset<R> results = Utils Dataset<R> results = Utils
.readPath(spark, inputPath, inputClazz) .readPath(spark, inputPath, inputClazz)
.filter("dataInfo.deletedbyinference = false and datainfo.invisible = false"); .filter("dataInfo.deletedbyinference = false and datainfo.invisible = false");
Dataset<Relation> relations = Utils Dataset<Relation> relations = Utils
.readPath(spark, relationPath, Relation.class) .readPath(spark, graphPath + "/relation", Relation.class)
.filter( .filter(
"dataInfo.deletedbyinference = false and lower(relClass) = '" "dataInfo.deletedbyinference = false and lower(relClass) = '"
+ ModelConstants.IS_PRODUCED_BY.toLowerCase() + "'"); + ModelConstants.IS_PRODUCED_BY.toLowerCase() + "'");
Dataset<Project> project = Utils.readPath(spark, graphPath + "/project", Project.class);
relations results.createOrReplaceTempView("result");
.joinWith( relations.createOrReplaceTempView("relation");
results, relations.col("source").equalTo(results.col("id")), project.createOrReplaceTempView("project");
"inner")
.groupByKey( Dataset<R> tmp = spark
(MapFunction<Tuple2<Relation, R>, String>) value -> value .sql(
._2() "Select res.* " +
.getId(), "from relation rel " +
Encoders.STRING()) "join result res " +
.mapGroups((MapGroupsFunction<String, Tuple2<Relation, R>, R>) (k, it) -> { "on rel.source = res.id " +
return it.next()._2(); "join project p " +
}, Encoders.bean(inputClazz)) "on rel.target = p.id " +
"")
.as(Encoders.bean(inputClazz));
//
// relations
// .joinWith(
// results, relations.col("source").equalTo(results.col("id")),
// "inner")
// .groupByKey(
// (MapFunction<Tuple2<Relation, R>, String>) value -> value
// ._2()
// .getId(),
// Encoders.STRING())
// .mapGroups((MapGroupsFunction<String, Tuple2<Relation, R>, R>) (k, it) -> {
// return it.next()._2();
// }, Encoders.bean(inputClazz))
tmp
.write() .write()
.mode(SaveMode.Overwrite) .mode(SaveMode.Overwrite)
.option("compression", "gzip") .option("compression", "gzip")