117 lines
4.4 KiB
Java
117 lines
4.4 KiB
Java
package eu.dnetlib.jobs;
|
|
|
|
import eu.dnetlib.Deduper;
|
|
import eu.dnetlib.pace.config.DedupConfig;
|
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
|
import eu.dnetlib.pace.utils.Utility;
|
|
import eu.dnetlib.support.ArgumentApplicationParser;
|
|
import eu.dnetlib.support.ConnectedComponent;
|
|
import eu.dnetlib.support.Relation;
|
|
import org.apache.hadoop.io.compress.GzipCodec;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.JavaPairRDD;
|
|
import org.apache.spark.api.java.JavaRDD;
|
|
import org.apache.spark.api.java.function.MapFunction;
|
|
import org.apache.spark.sql.Encoders;
|
|
import org.apache.spark.sql.SparkSession;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
import scala.Tuple2;
|
|
import scala.Tuple3;
|
|
|
|
import java.io.IOException;
|
|
import java.util.*;
|
|
import java.util.stream.Collectors;
|
|
import java.util.stream.StreamSupport;
|
|
|
|
public class SparkCreateGroupEntity extends AbstractSparkJob {
|
|
|
|
private static final Logger log = LoggerFactory.getLogger(SparkCreateGroupEntity.class);
|
|
|
|
public SparkCreateGroupEntity(ArgumentApplicationParser parser, SparkSession spark) {
|
|
super(parser, spark);
|
|
}
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
|
|
ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
|
Utility.readResource("/jobs/parameters/createGroupEntity_parameters.json", SparkCreateGroupEntity.class)
|
|
);
|
|
|
|
parser.parseArgument(args);
|
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
new SparkCreateGroupEntity(
|
|
parser,
|
|
getSparkSession(conf)
|
|
).run();
|
|
}
|
|
|
|
@Override
|
|
public void run() throws IOException {
|
|
|
|
// read oozie parameters
|
|
final String entitiesPath = parser.get("entitiesPath");
|
|
final String workingPath = parser.get("workingPath");
|
|
final String dedupConfPath = parser.get("dedupConfPath");
|
|
final int numPartitions = Optional
|
|
.ofNullable(parser.get("numPartitions"))
|
|
.map(Integer::valueOf)
|
|
.orElse(NUM_PARTITIONS);
|
|
|
|
log.info("entitiesPath: '{}'", entitiesPath);
|
|
log.info("workingPath: '{}'", workingPath);
|
|
log.info("dedupConfPath: '{}'", dedupConfPath);
|
|
log.info("numPartitions: '{}'", numPartitions);
|
|
|
|
DedupConfig dedupConf = DedupConfig.load(readFileFromHDFS(dedupConfPath));
|
|
|
|
// <raw_id, json>
|
|
JavaPairRDD<String, String> entities = spark
|
|
.read()
|
|
.textFile(entitiesPath)
|
|
.map((MapFunction<String, Tuple2<String, String>>) it ->
|
|
new Tuple2<>(MapDocumentUtil.getJPathString(dedupConf.getWf().getIdPath(), it), it),
|
|
Encoders.tuple(Encoders.STRING(), Encoders.STRING()))
|
|
.toJavaRDD()
|
|
.mapToPair(t -> t);
|
|
|
|
// <source_raw_id, relation(source, target)>
|
|
JavaPairRDD<String, Relation> simRels = spark
|
|
.read()
|
|
.load(workingPath + "/simrels")
|
|
.as(Encoders.bean(Relation.class))
|
|
.toJavaRDD()
|
|
.mapToPair(r-> new Tuple2<>(r.getSource(), r));
|
|
|
|
// <raw_id, relation(dedup_id, raw_id)>
|
|
JavaPairRDD<String, Relation> mergeRels = spark
|
|
.read()
|
|
.load(workingPath + "/mergerels")
|
|
.as(Encoders.bean(Relation.class))
|
|
.toJavaRDD()
|
|
.mapToPair(r -> new Tuple2<>(r.getTarget(), r));
|
|
|
|
// <dedup_id, simrel>
|
|
JavaPairRDD<String, Iterable<Relation>> simRelsWithDedupId = simRels
|
|
.join(mergeRels)
|
|
.mapToPair(x -> new Tuple2<>(x._2()._2().getSource(), x._2()._1()))
|
|
.groupByKey();
|
|
|
|
JavaRDD<ConnectedComponent> groupEntity = mergeRels.join(entities)
|
|
.mapToPair(t -> new Tuple2<>(t._2()._1().getSource(), t._2()._2()))
|
|
.groupByKey()
|
|
.join(simRelsWithDedupId)
|
|
.map(x -> new ConnectedComponent(
|
|
x._1(),
|
|
x._2()._1(),
|
|
x._2()._2())
|
|
);
|
|
|
|
groupEntity.saveAsTextFile(workingPath + "/groupentities", GzipCodec.class);
|
|
|
|
}
|
|
|
|
}
|