101 lines
3.4 KiB
Java
101 lines
3.4 KiB
Java
|
|
package eu.dnetlib.dhp.oa.graph.dump.eosc;
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
|
|
import java.io.Serializable;
|
|
import java.util.Optional;
|
|
|
|
import org.apache.commons.io.IOUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.function.FilterFunction;
|
|
import org.apache.spark.api.java.function.MapFunction;
|
|
import org.apache.spark.sql.Dataset;
|
|
import org.apache.spark.sql.Encoders;
|
|
import org.apache.spark.sql.SaveMode;
|
|
import org.apache.spark.sql.SparkSession;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
import eu.dnetlib.dhp.eosc.model.Organization;
|
|
import eu.dnetlib.dhp.eosc.model.Project;
|
|
import eu.dnetlib.dhp.eosc.model.Provenance;
|
|
import eu.dnetlib.dhp.eosc.model.RelType;
|
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
import eu.dnetlib.dhp.schema.oaf.DataInfo;
|
|
import eu.dnetlib.dhp.schema.oaf.Relation;
|
|
import scala.Tuple2;
|
|
|
|
/**
|
|
* @author miriam.baglioni
|
|
* @Date 12/01/23
|
|
*/
|
|
public class SparkDumpOrganizationProject implements Serializable {
|
|
|
|
private static final Logger log = LoggerFactory.getLogger(SparkDumpOrganizationProject.class);
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
String jsonConfiguration = IOUtils
|
|
.toString(
|
|
SparkDumpOrganizationProject.class
|
|
.getResourceAsStream(
|
|
"/eu/dnetlib/dhp/oa/graph/dump/input_relationdump_parameters.json"));
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
parser.parseArgument(args);
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
.map(Boolean::valueOf)
|
|
.orElse(Boolean.TRUE);
|
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
|
|
final String inputPath = parser.get("sourcePath");
|
|
log.info("inputPath: {}", inputPath);
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
log.info("outputPath: {}", outputPath);
|
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
runWithSparkSession(
|
|
conf,
|
|
isSparkSessionManaged,
|
|
spark -> {
|
|
Utils.removeOutputDir(spark, outputPath);
|
|
dumpRelation(spark, inputPath, outputPath);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
private static void dumpRelation(SparkSession spark, String inputPath, String outputPath) {
|
|
Dataset<Organization> organization = Utils.readPath(spark, outputPath + "organization", Organization.class);
|
|
Dataset<Project> project = Utils.readPath(spark, outputPath + "project", Project.class);
|
|
|
|
Dataset<Relation> relation = Utils
|
|
.readPath(spark, inputPath + "/relation", Relation.class)
|
|
.filter(
|
|
(FilterFunction<Relation>) r -> !r.getDataInfo().getDeletedbyinference()
|
|
&& r.getRelClass().equalsIgnoreCase(ModelConstants.IS_PARTICIPANT));
|
|
|
|
Dataset<Relation> eoscOrgs = relation
|
|
.joinWith(organization, relation.col("source").equalTo(organization.col("id")))
|
|
.map((MapFunction<Tuple2<Relation, Organization>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
|
|
|
eoscOrgs
|
|
.joinWith(project, eoscOrgs.col("target").equalTo(project.col("id")))
|
|
.map(
|
|
(MapFunction<Tuple2<Relation, Project>, eu.dnetlib.dhp.eosc.model.Relation>) t2 -> eu.dnetlib.dhp.eosc.model.Relation
|
|
.newInstance(t2._1().getSource(), t2._1().getTarget()),
|
|
Encoders.bean(eu.dnetlib.dhp.eosc.model.Relation.class))
|
|
.write()
|
|
.mode(SaveMode.Overwrite)
|
|
.option("compression", "gzip")
|
|
.json(outputPath + "organizationProject");
|
|
|
|
}
|
|
|
|
}
|