Implemented deduplication on spark
This commit is contained in:
parent
6a7bee5e43
commit
cc63706347
|
@ -89,6 +89,8 @@ public class TransformationJobTest {
|
||||||
"-rh", "",
|
"-rh", "",
|
||||||
"-ro", "",
|
"-ro", "",
|
||||||
"-rr", ""});
|
"-rr", ""});
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -96,7 +98,7 @@ public class TransformationJobTest {
|
||||||
final String path = this.getClass().getResource("/eu/dnetlib/dhp/transform/mdstorenative").getFile();
|
final String path = this.getClass().getResource("/eu/dnetlib/dhp/transform/mdstorenative").getFile();
|
||||||
System.out.println("path = " + path);
|
System.out.println("path = " + path);
|
||||||
|
|
||||||
Path tempDirWithPrefix = Files.createTempDirectory("mdsotre_output");
|
Path tempDirWithPrefix = Files.createTempDirectory("mdstore_output");
|
||||||
|
|
||||||
System.out.println(tempDirWithPrefix.toFile().getAbsolutePath());
|
System.out.println(tempDirWithPrefix.toFile().getAbsolutePath());
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<parent>
|
||||||
|
<artifactId>dhp-workflows</artifactId>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<version>1.0.5-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<artifactId>dhp-dedup</artifactId>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-core_2.11</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-sql_2.11</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<artifactId>dhp-common</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<artifactId>dhp-schemas</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.arakelian</groupId>
|
||||||
|
<artifactId>java-jq</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib</groupId>
|
||||||
|
<artifactId>dnet-pace-core</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-graphx_2.11</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-databind</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-core</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
|
||||||
|
</project>
|
|
@ -0,0 +1,94 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
import eu.dnetlib.pace.clustering.BlacklistAwareClusteringCombiner;
|
||||||
|
import eu.dnetlib.pace.config.DedupConfig;
|
||||||
|
import eu.dnetlib.pace.model.MapDocument;
|
||||||
|
import org.apache.commons.codec.binary.Hex;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.spark.SparkContext;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.util.LongAccumulator;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.StringWriter;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.security.MessageDigest;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
public class DedupUtility {
|
||||||
|
|
||||||
|
public static Map<String, LongAccumulator> constructAccumulator(final DedupConfig dedupConf, final SparkContext context) {
|
||||||
|
|
||||||
|
Map<String, LongAccumulator> accumulators = new HashMap<>();
|
||||||
|
|
||||||
|
String acc1 = String.format("%s::%s",dedupConf.getWf().getEntityType(), "records per hash key = 1");
|
||||||
|
accumulators.put(acc1, context.longAccumulator(acc1));
|
||||||
|
String acc2 = String.format("%s::%s",dedupConf.getWf().getEntityType(), "missing " + dedupConf.getWf().getOrderField());
|
||||||
|
accumulators.put(acc2, context.longAccumulator(acc2));
|
||||||
|
String acc3 = String.format("%s::%s",dedupConf.getWf().getEntityType(), String.format("Skipped records for count(%s) >= %s", dedupConf.getWf().getOrderField(), dedupConf.getWf().getGroupMaxSize()));
|
||||||
|
accumulators.put(acc3, context.longAccumulator(acc3));
|
||||||
|
String acc4 = String.format("%s::%s",dedupConf.getWf().getEntityType(), "skip list");
|
||||||
|
accumulators.put(acc4, context.longAccumulator(acc4));
|
||||||
|
String acc5 = String.format("%s::%s",dedupConf.getWf().getEntityType(), "dedupSimilarity (x2)");
|
||||||
|
accumulators.put(acc5, context.longAccumulator(acc5));
|
||||||
|
String acc6 = String.format("%s::%s",dedupConf.getWf().getEntityType(), "d < " + dedupConf.getWf().getThreshold());
|
||||||
|
accumulators.put(acc6, context.longAccumulator(acc6));
|
||||||
|
|
||||||
|
return accumulators;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static JavaRDD<String> loadDataFromHDFS(String path, JavaSparkContext context) {
|
||||||
|
return context.textFile(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void deleteIfExists(String path) throws IOException {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
FileSystem fileSystem = FileSystem.get(conf);
|
||||||
|
if (fileSystem.exists(new Path(path))){
|
||||||
|
fileSystem.delete(new Path(path), true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DedupConfig loadConfigFromHDFS(String path) throws IOException {
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
FileSystem fileSystem = FileSystem.get(conf);
|
||||||
|
FSDataInputStream inputStream = new FSDataInputStream(fileSystem.open(new Path(path)));
|
||||||
|
|
||||||
|
return DedupConfig.load(IOUtils.toString(inputStream, StandardCharsets.UTF_8.name()));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static <T> String readFromClasspath(final String filename, final Class<T> clazz) {
|
||||||
|
final StringWriter sw = new StringWriter();
|
||||||
|
try {
|
||||||
|
IOUtils.copy(clazz.getResourceAsStream(filename), sw);
|
||||||
|
return sw.toString();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new RuntimeException("cannot load resource from classpath: " + filename);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Set<String> getGroupingKeys(DedupConfig conf, MapDocument doc) {
|
||||||
|
return Sets.newHashSet(BlacklistAwareClusteringCombiner.filterAndCombine(doc, conf));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String md5(final String s) {
|
||||||
|
try {
|
||||||
|
final MessageDigest md = MessageDigest.getInstance("MD5");
|
||||||
|
md.update(s.getBytes("UTF-8"));
|
||||||
|
return new String(Hex.encodeHex(md.digest()));
|
||||||
|
} catch (final Exception e) {
|
||||||
|
System.err.println("Error creating id");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import eu.dnetlib.pace.config.DedupConfig;
|
||||||
|
import eu.dnetlib.pace.model.Field;
|
||||||
|
import eu.dnetlib.pace.model.MapDocument;
|
||||||
|
import eu.dnetlib.pace.util.BlockProcessor;
|
||||||
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.api.java.function.Function2;
|
||||||
|
import org.apache.spark.api.java.function.PairFlatMapFunction;
|
||||||
|
import org.apache.spark.api.java.function.PairFunction;
|
||||||
|
import org.apache.spark.util.LongAccumulator;
|
||||||
|
import scala.Serializable;
|
||||||
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
public class Deduper implements Serializable {
|
||||||
|
|
||||||
|
private static final Log log = LogFactory.getLog(Deduper.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the list of relations generated by the deduplication
|
||||||
|
* @param: the spark context
|
||||||
|
* @param: list of JSON entities to be deduped
|
||||||
|
* @param: the dedup configuration
|
||||||
|
*/
|
||||||
|
public static JavaPairRDD<String, String> dedup(JavaSparkContext context, JavaRDD<String> entities, DedupConfig config) {
|
||||||
|
|
||||||
|
Map<String, LongAccumulator> accumulators = DedupUtility.constructAccumulator(config, context.sc());
|
||||||
|
|
||||||
|
//create vertexes of the graph: <ID, MapDocument>
|
||||||
|
JavaPairRDD<String, MapDocument> mapDocs = mapToVertexes(context, entities, config);
|
||||||
|
|
||||||
|
|
||||||
|
//create blocks for deduplication
|
||||||
|
JavaPairRDD<String, Iterable<MapDocument>> blocks = createBlocks(context, mapDocs, config);
|
||||||
|
|
||||||
|
//create relations by comparing only elements in the same group
|
||||||
|
return computeRelations(context, blocks, config);
|
||||||
|
|
||||||
|
// final RDD<Edge<String>> edgeRdd = relationRDD.map(it -> new Edge<>(it._1().hashCode(), it._2().hashCode(), "equalTo")).rdd();
|
||||||
|
//
|
||||||
|
// RDD<Tuple2<Object, MapDocument>> vertexes = mapDocs.mapToPair((PairFunction<Tuple2<String, MapDocument>, Object, MapDocument>) t -> new Tuple2<Object, MapDocument>((long) t._1().hashCode(), t._2())).rdd();
|
||||||
|
// accumulators.forEach((name, acc) -> log.info(name + " -> " + acc.value()));
|
||||||
|
//
|
||||||
|
// return GraphProcessor.findCCs(vertexes, edgeRdd, 20).toJavaRDD();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the list of relations generated by the deduplication
|
||||||
|
* @param: the spark context
|
||||||
|
* @param: list of blocks
|
||||||
|
* @param: the dedup configuration
|
||||||
|
*/
|
||||||
|
public static JavaPairRDD<String, String> computeRelations(JavaSparkContext context, JavaPairRDD<String, Iterable<MapDocument>> blocks, DedupConfig config) {
|
||||||
|
|
||||||
|
Map<String, LongAccumulator> accumulators = DedupUtility.constructAccumulator(config, context.sc());
|
||||||
|
|
||||||
|
return blocks.flatMapToPair((PairFlatMapFunction<Tuple2<String, Iterable<MapDocument>>, String, String>) it -> {
|
||||||
|
final SparkReporter reporter = new SparkReporter(accumulators);
|
||||||
|
new BlockProcessor(config).process(it._1(), it._2(), reporter);
|
||||||
|
return reporter.getRelations().iterator();
|
||||||
|
|
||||||
|
}).mapToPair(
|
||||||
|
(PairFunction<Tuple2<String, String>, String, Tuple2<String, String>>) item ->
|
||||||
|
new Tuple2<String, Tuple2<String, String>>(item._1() + item._2(), item))
|
||||||
|
.reduceByKey((a, b) -> a)
|
||||||
|
.mapToPair((PairFunction<Tuple2<String, Tuple2<String, String>>, String, String>) Tuple2::_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the list of blocks based on clustering of dedup configuration
|
||||||
|
* @param: the spark context
|
||||||
|
* @param: list of entities: <id, entity>
|
||||||
|
* @param: the dedup configuration
|
||||||
|
*/
|
||||||
|
public static JavaPairRDD<String, Iterable<MapDocument>> createBlocks(JavaSparkContext context, JavaPairRDD<String, MapDocument> mapDocs, DedupConfig config) {
|
||||||
|
return mapDocs
|
||||||
|
//the reduce is just to be sure that we haven't document with same id
|
||||||
|
.reduceByKey((a, b) -> a)
|
||||||
|
.map(Tuple2::_2)
|
||||||
|
//Clustering: from <id, doc> to List<groupkey,doc>
|
||||||
|
.flatMapToPair((PairFlatMapFunction<MapDocument, String, MapDocument>) a ->
|
||||||
|
DedupUtility.getGroupingKeys(config, a)
|
||||||
|
.stream()
|
||||||
|
.map(it -> new Tuple2<>(it, a))
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
.iterator())
|
||||||
|
.groupByKey();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static JavaPairRDD<String, List<MapDocument>> createsortedBlocks(JavaSparkContext context, JavaPairRDD<String, MapDocument> mapDocs, DedupConfig config) {
|
||||||
|
final String of = config.getWf().getOrderField();
|
||||||
|
final int maxQueueSize = config.getWf().getGroupMaxSize();
|
||||||
|
return mapDocs
|
||||||
|
//the reduce is just to be sure that we haven't document with same id
|
||||||
|
.reduceByKey((a, b) -> a)
|
||||||
|
.map(Tuple2::_2)
|
||||||
|
//Clustering: from <id, doc> to List<groupkey,doc>
|
||||||
|
.flatMapToPair((PairFlatMapFunction<MapDocument, String, List<MapDocument>>) a ->
|
||||||
|
DedupUtility.getGroupingKeys(config, a)
|
||||||
|
.stream()
|
||||||
|
.map(it -> {
|
||||||
|
List<MapDocument> tmp = new ArrayList<>();
|
||||||
|
tmp.add(a);
|
||||||
|
return new Tuple2<>(it, tmp);
|
||||||
|
}
|
||||||
|
)
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
.iterator())
|
||||||
|
.reduceByKey((Function2<List<MapDocument>, List<MapDocument>, List<MapDocument>>) (v1, v2) -> {
|
||||||
|
v1.addAll(v2);
|
||||||
|
v1.sort(Comparator.comparing(a -> a.getFieldMap().get(of).stringValue()));
|
||||||
|
if (v1.size()> maxQueueSize)
|
||||||
|
return new ArrayList<>(v1.subList(0, maxQueueSize));
|
||||||
|
return v1;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the list of vertexes: <id, mapDocument>
|
||||||
|
* @param: the spark context
|
||||||
|
* @param: list of JSON entities
|
||||||
|
* @param: the dedup configuration
|
||||||
|
*/
|
||||||
|
public static JavaPairRDD<String, MapDocument> mapToVertexes(JavaSparkContext context, JavaRDD<String> entities, DedupConfig config) {
|
||||||
|
|
||||||
|
return entities.mapToPair((PairFunction<String, String, MapDocument>) s -> {
|
||||||
|
|
||||||
|
MapDocument mapDocument = MapDocumentUtil.asMapDocumentWithJPath(config, s);
|
||||||
|
return new Tuple2<String, MapDocument>(mapDocument.getIdentifier(), mapDocument);
|
||||||
|
|
||||||
|
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public static JavaPairRDD<String, String> computeRelations2(JavaSparkContext context, JavaPairRDD<String, List<MapDocument>> blocks, DedupConfig config) {
|
||||||
|
Map<String, LongAccumulator> accumulators = DedupUtility.constructAccumulator(config, context.sc());
|
||||||
|
|
||||||
|
return blocks.flatMapToPair((PairFlatMapFunction<Tuple2<String, List<MapDocument>>, String, String>) it -> {
|
||||||
|
final SparkReporter reporter = new SparkReporter(accumulators);
|
||||||
|
new BlockProcessor(config).processSortedBlock(it._1(), it._2(), reporter);
|
||||||
|
return reporter.getRelations().iterator();
|
||||||
|
|
||||||
|
}).mapToPair(
|
||||||
|
(PairFunction<Tuple2<String, String>, String, Tuple2<String, String>>) item ->
|
||||||
|
new Tuple2<String, Tuple2<String, String>>(item._1() + item._2(), item))
|
||||||
|
.reduceByKey((a, b) -> a)
|
||||||
|
.mapToPair((PairFunction<Tuple2<String, Tuple2<String, String>>, String, String>) Tuple2::_2);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import eu.dnetlib.dedup.graph.ConnectedComponent;
|
||||||
|
import eu.dnetlib.dedup.graph.GraphProcessor;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||||
|
import eu.dnetlib.pace.config.DedupConfig;
|
||||||
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||||
|
import org.apache.spark.api.java.function.PairFunction;
|
||||||
|
import org.apache.spark.graphx.Edge;
|
||||||
|
import org.apache.spark.rdd.RDD;
|
||||||
|
import org.apache.spark.sql.Dataset;
|
||||||
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class SparkCreateConnectedComponent {
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkCreateConnectedComponent.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/dedup_parameters.json")));
|
||||||
|
parser.parseArgument(args);
|
||||||
|
final SparkSession spark = SparkSession
|
||||||
|
.builder()
|
||||||
|
.appName(SparkCreateConnectedComponent.class.getSimpleName())
|
||||||
|
.master(parser.get("master"))
|
||||||
|
.getOrCreate();
|
||||||
|
|
||||||
|
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||||
|
final String inputPath = parser.get("sourcePath");
|
||||||
|
final String entity = parser.get("entity");
|
||||||
|
final String targetPath = parser.get("targetPath");
|
||||||
|
final DedupConfig dedupConf = DedupConfig.load(IOUtils.toString(SparkCreateConnectedComponent.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/conf/org.curr.conf2.json")));
|
||||||
|
|
||||||
|
|
||||||
|
final JavaPairRDD<Object, String> vertexes = sc.textFile(inputPath + "/" + entity)
|
||||||
|
.map(s -> MapDocumentUtil.getJPathString(dedupConf.getWf().getIdPath(), s))
|
||||||
|
.mapToPair((PairFunction<String, Object, String>)
|
||||||
|
s -> new Tuple2<Object, String>((long) s.hashCode(), s)
|
||||||
|
);
|
||||||
|
|
||||||
|
final Dataset<Relation> similarityRelations = spark.read().load(targetPath + "/" + entity+"_simrel").as(Encoders.bean(Relation.class));
|
||||||
|
|
||||||
|
|
||||||
|
final RDD<Edge<String>> edgeRdd = similarityRelations.javaRDD().map(it -> new Edge<>(it.getSource().hashCode(), it.getTarget().hashCode(), it.getRelClass())).rdd();
|
||||||
|
|
||||||
|
|
||||||
|
final JavaRDD<ConnectedComponent> cc = GraphProcessor.findCCs(vertexes.rdd(), edgeRdd, 20).toJavaRDD();
|
||||||
|
|
||||||
|
|
||||||
|
final Dataset<Relation> mergeRelation = spark.createDataset(cc.filter(k->k.getDocIds().size()>1).flatMap((FlatMapFunction<ConnectedComponent, Relation>) c ->
|
||||||
|
c.getDocIds()
|
||||||
|
.stream()
|
||||||
|
.flatMap(id -> {
|
||||||
|
List<Relation> tmp = new ArrayList<>();
|
||||||
|
Relation r = new Relation();
|
||||||
|
r.setSource(c.getCcId());
|
||||||
|
r.setTarget(id);
|
||||||
|
r.setRelClass("merges");
|
||||||
|
tmp.add(r);
|
||||||
|
r = new Relation();
|
||||||
|
r.setTarget(c.getCcId());
|
||||||
|
r.setSource(id);
|
||||||
|
r.setRelClass("isMergedIn");
|
||||||
|
tmp.add(r);
|
||||||
|
return tmp.stream();
|
||||||
|
}).iterator()).rdd(), Encoders.bean(Relation.class));
|
||||||
|
|
||||||
|
mergeRelation.write().mode("overwrite").save(targetPath+"/"+entity+"_mergeRels");
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,101 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import com.google.common.collect.ComparisonChain;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||||
|
import eu.dnetlib.pace.config.DedupConfig;
|
||||||
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.spark.Partitioner;
|
||||||
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.api.java.Optional;
|
||||||
|
import org.apache.spark.api.java.function.Function;
|
||||||
|
import org.apache.spark.api.java.function.PairFunction;
|
||||||
|
import org.apache.spark.sql.Dataset;
|
||||||
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class SparkCreateDedupRecord {
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkCreateDedupRecord.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/dedup_parameters.json")));
|
||||||
|
parser.parseArgument(args);
|
||||||
|
final SparkSession spark = SparkSession
|
||||||
|
.builder()
|
||||||
|
.appName(SparkCreateDedupRecord.class.getSimpleName())
|
||||||
|
.master(parser.get("master"))
|
||||||
|
.getOrCreate();
|
||||||
|
|
||||||
|
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||||
|
final String inputPath = parser.get("sourcePath");
|
||||||
|
final String entity = parser.get("entity");
|
||||||
|
final String targetPath = parser.get("targetPath");
|
||||||
|
final DedupConfig dedupConf = DedupConfig.load(IOUtils.toString(SparkCreateDedupRecord.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/conf/org.curr.conf2.json")));
|
||||||
|
|
||||||
|
final JavaPairRDD<String, String> inputJsonEntities = sc.textFile(inputPath + "/" + entity)
|
||||||
|
.mapToPair((PairFunction<String,String,String>)it->
|
||||||
|
new Tuple2<String,String>(MapDocumentUtil.getJPathString(dedupConf.getWf().getIdPath(), it),it)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
// JavaPairRDD<String,String> mergeRels = spark
|
||||||
|
// .read().load(targetPath + "/" + entity+"_mergeRels").as(Encoders.bean(Relation.class))
|
||||||
|
// .where("relClass=='merges'")
|
||||||
|
// .javaRDD()
|
||||||
|
// .mapToPair(
|
||||||
|
// (PairFunction<Relation, String,String>)r->
|
||||||
|
// new Tuple2<String,String>(r.getTarget(), r.getSource())
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// final JavaPairRDD<String, String> p = mergeRels.join(inputJsonEntities).mapToPair((PairFunction<Tuple2<String, Tuple2<String, String>>, String, String>) Tuple2::_2);
|
||||||
|
//
|
||||||
|
// Comparator<String> c = new Comparator<String>() {
|
||||||
|
// @Override
|
||||||
|
// public int compare(String s, String t1) {
|
||||||
|
// return 0;
|
||||||
|
// }
|
||||||
|
// };
|
||||||
|
// final JavaPairRDD<String, String> stringStringJavaPairRDD = p.repartitionAndSortWithinPartitions(p.partitioner().get(), c);
|
||||||
|
|
||||||
|
|
||||||
|
// List<Foo> inputValues = Arrays.asList(
|
||||||
|
// new Foo("k",5),
|
||||||
|
// new Foo("a",1),
|
||||||
|
// new Foo("a",30),
|
||||||
|
// new Foo("a",18),
|
||||||
|
// new Foo("a",22),
|
||||||
|
// new Foo("b",22),
|
||||||
|
// new Foo("c",5),
|
||||||
|
// new Foo("a",5),
|
||||||
|
// new Foo("s",1),
|
||||||
|
// new Foo("h",4)
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// final JavaPairRDD<Foo, Foo> fooFighters = sc.parallelize(inputValues).mapToPair((PairFunction<Foo, Foo, Foo>) i -> new Tuple2<Foo, Foo>(i, i));
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// FooComparator c = new FooComparator();
|
||||||
|
// final List<Tuple2<String, List<Foo>>> result =
|
||||||
|
// fooFighters.repartitionAndSortWithinPartitions(new FooPartitioner(fooFighters.getNumPartitions()), c)
|
||||||
|
// .mapToPair((PairFunction<Tuple2<Foo, Foo>, String, Foo>) t-> new Tuple2<String,Foo>(t._1().getValue(), t._2()) )
|
||||||
|
// .groupByKey()
|
||||||
|
// .mapValues((Function<Iterable<Foo>, List<Foo>>) Lists::newArrayList)
|
||||||
|
// .collect();
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// System.out.println(result);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import eu.dnetlib.dedup.graph.ConnectedComponent;
|
||||||
|
import eu.dnetlib.dedup.graph.GraphProcessor;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||||
|
import eu.dnetlib.pace.config.DedupConfig;
|
||||||
|
import eu.dnetlib.pace.model.MapDocument;
|
||||||
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.spark.api.java.JavaPairRDD;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.api.java.function.PairFunction;
|
||||||
|
import org.apache.spark.graphx.Edge;
|
||||||
|
import org.apache.spark.rdd.RDD;
|
||||||
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This Spark class creates similarity relations between entities, saving result
|
||||||
|
*
|
||||||
|
* param request:
|
||||||
|
* sourcePath
|
||||||
|
* entityType
|
||||||
|
* target Path
|
||||||
|
*/
|
||||||
|
public class SparkCreateSimRels {
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkCreateSimRels.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/dedup_parameters.json")));
|
||||||
|
parser.parseArgument(args);
|
||||||
|
final SparkSession spark = SparkSession
|
||||||
|
.builder()
|
||||||
|
.appName(SparkCreateSimRels.class.getSimpleName())
|
||||||
|
.master(parser.get("master"))
|
||||||
|
.getOrCreate();
|
||||||
|
|
||||||
|
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||||
|
final String inputPath = parser.get("sourcePath");
|
||||||
|
final String entity = parser.get("entity");
|
||||||
|
final String targetPath = parser.get("targetPath");
|
||||||
|
final DedupConfig dedupConf = DedupConfig.load(IOUtils.toString(SparkCreateSimRels.class.getResourceAsStream("/eu/dnetlib/dhp/dedup/conf/org.curr.conf2.json")));
|
||||||
|
|
||||||
|
|
||||||
|
final long total = sc.textFile(inputPath + "/" + entity).count();
|
||||||
|
|
||||||
|
JavaPairRDD<Object, MapDocument> vertexes = sc.textFile(inputPath + "/" + entity)
|
||||||
|
.map(s->{
|
||||||
|
MapDocument d = MapDocumentUtil.asMapDocumentWithJPath(dedupConf,s);
|
||||||
|
return new Tuple2<>(d.getIdentifier(), d);})
|
||||||
|
.mapToPair((PairFunction<Tuple2<String, MapDocument>, Object, MapDocument>) t -> new Tuple2<Object, MapDocument>((long) t._1().hashCode(), t._2()));
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
JavaPairRDD<String, MapDocument> mapDocument = vertexes.mapToPair((PairFunction<Tuple2<Object, MapDocument>, String, MapDocument>) item -> new Tuple2<String, MapDocument>(item._2().getIdentifier(), item._2()));
|
||||||
|
|
||||||
|
//create blocks for deduplication
|
||||||
|
JavaPairRDD<String, List<MapDocument>> blocks = Deduper.createsortedBlocks(sc,mapDocument, dedupConf);
|
||||||
|
|
||||||
|
|
||||||
|
//create relations by comparing only elements in the same group
|
||||||
|
final JavaPairRDD<String,String> dedupRels = Deduper.computeRelations2(sc, blocks, dedupConf);
|
||||||
|
|
||||||
|
|
||||||
|
final JavaRDD<Relation> isSimilarToRDD = dedupRels.map(simRel -> {
|
||||||
|
final Relation r = new Relation();
|
||||||
|
r.setSource(simRel._1());
|
||||||
|
r.setTarget(simRel._2());
|
||||||
|
r.setRelClass("isSimilarTo");
|
||||||
|
return r;
|
||||||
|
});
|
||||||
|
|
||||||
|
spark.createDataset(isSimilarToRDD.rdd(), Encoders.bean(Relation.class)).write().mode("overwrite").save(targetPath+"/"+entity+"_simrel");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import eu.dnetlib.pace.util.Reporter;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.spark.util.LongAccumulator;
|
||||||
|
import scala.Serializable;
|
||||||
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class SparkReporter implements Serializable, Reporter {
|
||||||
|
|
||||||
|
final List<Tuple2<String, String>> relations = new ArrayList<>();
|
||||||
|
private static final Log log = LogFactory.getLog(SparkReporter.class);
|
||||||
|
Map<String, LongAccumulator> accumulators;
|
||||||
|
|
||||||
|
public SparkReporter(Map<String, LongAccumulator> accumulators){
|
||||||
|
this.accumulators = accumulators;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void incrementCounter(String counterGroup, String counterName, long delta, Map<String, LongAccumulator> accumulators) {
|
||||||
|
|
||||||
|
final String accumulatorName = String.format("%s::%s", counterGroup, counterName);
|
||||||
|
if (accumulators.containsKey(accumulatorName)){
|
||||||
|
accumulators.get(accumulatorName).add(delta);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void incrementCounter(String counterGroup, String counterName, long delta) {
|
||||||
|
|
||||||
|
incrementCounter(counterGroup, counterName, delta, accumulators);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void emit(String type, String from, String to) {
|
||||||
|
relations.add(new Tuple2<>(from, to));
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Tuple2<String, String>> getRelations() {
|
||||||
|
return relations;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package eu.dnetlib.dedup.graph;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import eu.dnetlib.dedup.DedupUtility;
|
||||||
|
import eu.dnetlib.pace.util.PaceException;
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.codehaus.jackson.annotate.JsonIgnore;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
public class ConnectedComponent implements Serializable {
|
||||||
|
|
||||||
|
private Set<String> docIds;
|
||||||
|
private String ccId;
|
||||||
|
|
||||||
|
|
||||||
|
public ConnectedComponent() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public ConnectedComponent(Set<String> docIds) {
|
||||||
|
this.docIds = docIds;
|
||||||
|
createID();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String createID() {
|
||||||
|
if (docIds.size() > 1) {
|
||||||
|
final String s = getMin();
|
||||||
|
String prefix = s.split("\\|")[0];
|
||||||
|
ccId =prefix + "|dedup_______::" + DedupUtility.md5(s);
|
||||||
|
return ccId;
|
||||||
|
} else {
|
||||||
|
return docIds.iterator().next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@JsonIgnore
|
||||||
|
public String getMin(){
|
||||||
|
|
||||||
|
final StringBuilder min = new StringBuilder();
|
||||||
|
docIds.forEach(i -> {
|
||||||
|
if (StringUtils.isBlank(min.toString())) {
|
||||||
|
min.append(i);
|
||||||
|
} else {
|
||||||
|
if (min.toString().compareTo(i) > 0) {
|
||||||
|
min.setLength(0);
|
||||||
|
min.append(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return min.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString(){
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
try {
|
||||||
|
return mapper.writeValueAsString(this);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new PaceException("Failed to create Json: ", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Set<String> getDocIds() {
|
||||||
|
return docIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setDocIds(Set<String> docIds) {
|
||||||
|
this.docIds = docIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCcId() {
|
||||||
|
return ccId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCcId(String ccId) {
|
||||||
|
this.ccId = ccId;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
package eu.dnetlib.dedup.graph
|
||||||
|
|
||||||
|
|
||||||
|
import eu.dnetlib.pace.model.MapDocument
|
||||||
|
import org.apache.spark.graphx._
|
||||||
|
import org.apache.spark.rdd.RDD
|
||||||
|
|
||||||
|
import scala.collection.JavaConversions;
|
||||||
|
|
||||||
|
object GraphProcessor {
|
||||||
|
|
||||||
|
def findCCs(vertexes: RDD[(VertexId, String)], edges: RDD[Edge[String]], maxIterations: Int): RDD[ConnectedComponent] = {
|
||||||
|
val graph: Graph[String, String] = Graph(vertexes, edges).partitionBy(PartitionStrategy.RandomVertexCut) //TODO remember to remove partitionby
|
||||||
|
val cc = graph.connectedComponents(maxIterations).vertices
|
||||||
|
|
||||||
|
val joinResult = vertexes.leftOuterJoin(cc).map {
|
||||||
|
case (id, (openaireId, cc)) => {
|
||||||
|
if (cc.isEmpty) {
|
||||||
|
(id, openaireId)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
(cc.get, openaireId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val connectedComponents = joinResult.groupByKey()
|
||||||
|
.map[ConnectedComponent](cc => asConnectedComponent(cc))
|
||||||
|
(connectedComponents)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def asConnectedComponent(group: (VertexId, Iterable[String])): ConnectedComponent = {
|
||||||
|
val docs = group._2.toSet[String]
|
||||||
|
val connectedComponent = new ConnectedComponent(JavaConversions.setAsJavaSet[String](docs));
|
||||||
|
connectedComponent
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,280 @@
|
||||||
|
{
|
||||||
|
"wf" : {
|
||||||
|
"threshold" : "0.99",
|
||||||
|
"dedupRun" : "001",
|
||||||
|
"entityType" : "result",
|
||||||
|
"subEntityType" : "resulttype",
|
||||||
|
"subEntityValue" : "publication",
|
||||||
|
"orderField" : "title",
|
||||||
|
"queueMaxSize" : "2000",
|
||||||
|
"groupMaxSize" : "100",
|
||||||
|
"maxChildren" : "100",
|
||||||
|
"idPath": "$.id",
|
||||||
|
"slidingWindowSize" : "200",
|
||||||
|
"rootBuilder" : [ "result", "resultProject_outcome_isProducedBy", "resultResult_publicationDataset_isRelatedTo", "resultResult_similarity_isAmongTopNSimilarDocuments", "resultResult_similarity_hasAmongTopNSimilarDocuments", "resultOrganization_affiliation_isAffiliatedWith", "resultResult_part_hasPart", "resultResult_part_isPartOf", "resultResult_supplement_isSupplementTo", "resultResult_supplement_isSupplementedBy", "resultResult_version_isVersionOf" ],
|
||||||
|
"includeChildren" : "true"
|
||||||
|
},
|
||||||
|
"pace" : {
|
||||||
|
"clustering" : [
|
||||||
|
{ "name" : "ngrampairs", "fields" : [ "title" ], "params" : { "max" : "1", "ngramLen" : "3"} },
|
||||||
|
{ "name" : "suffixprefix", "fields" : [ "title" ], "params" : { "max" : "1", "len" : "3" } },
|
||||||
|
{ "name" : "lowercase", "fields" : [ "doi" ], "params" : { } }
|
||||||
|
],
|
||||||
|
"strictConditions" : [
|
||||||
|
{ "name" : "pidMatch", "fields" : [ "pid" ] }
|
||||||
|
],
|
||||||
|
"conditions" : [
|
||||||
|
{ "name" : "titleVersionMatch", "fields" : [ "title" ] },
|
||||||
|
{ "name" : "sizeMatch", "fields" : [ "authors" ] }
|
||||||
|
],
|
||||||
|
"model" : [
|
||||||
|
{ "name" : "doi", "algo" : "Null", "type" : "String", "weight" : "0.0", "ignoreMissing" : "true", "path" : "$.pid[?(@.qualifier.classid ==\"doi\")].value" },
|
||||||
|
{ "name" : "pid", "algo" : "Null", "type" : "JSON", "weight" : "0.0", "ignoreMissing" : "true", "path" : "$.pid", "overrideMatch" : "true" },
|
||||||
|
{ "name" : "title", "algo" : "LevensteinTitle", "type" : "String", "weight" : "1.0", "ignoreMissing" : "false", "path" : "$.title[?(@.qualifier.classid ==\"main title\")].value", "length" : 250, "size" : 5 },
|
||||||
|
{ "name" : "authors", "algo" : "Null", "type" : "List", "weight" : "0.0", "ignoreMissing" : "true", "path" : "$.author[*].fullname", "size" : 200 },
|
||||||
|
{ "name" : "resulttype", "algo" : "Null", "type" : "String", "weight" : "0.0", "ignoreMissing" : "false", "path" : "$.resulttype.classid" }
|
||||||
|
],
|
||||||
|
"synonyms": {},
|
||||||
|
"blacklists" : {
|
||||||
|
"title" : [
|
||||||
|
"^Inside Front Cover$",
|
||||||
|
"(?i)^Poster presentations$",
|
||||||
|
"^THE ASSOCIATION AND THE GENERAL MEDICAL COUNCIL$",
|
||||||
|
"^Problems with perinatal pathology\\.?$",
|
||||||
|
"(?i)^Cases? of Puerperal Convulsions$",
|
||||||
|
"(?i)^Operative Gyna?ecology$",
|
||||||
|
"(?i)^Mind the gap\\!?\\:?$",
|
||||||
|
"^Chronic fatigue syndrome\\.?$",
|
||||||
|
"^Cartas? ao editor Letters? to the Editor$",
|
||||||
|
"^Note from the Editor$",
|
||||||
|
"^Anesthesia Abstract$",
|
||||||
|
|
||||||
|
"^Annual report$",
|
||||||
|
"(?i)^“?THE RADICAL PREVENTION OF VENEREAL DISEASE\\.?”?$",
|
||||||
|
"(?i)^Graph and Table of Infectious Diseases?$",
|
||||||
|
"^Presentation$",
|
||||||
|
"(?i)^Reviews and Information on Publications$",
|
||||||
|
"(?i)^PUBLIC HEALTH SERVICES?$",
|
||||||
|
"(?i)^COMBINED TEXT-?BOOK OF OBSTETRICS AND GYN(Æ|ae)COLOGY$",
|
||||||
|
"(?i)^Adrese autora$",
|
||||||
|
"(?i)^Systematic Part .*\\. Catalogus Fossilium Austriae, Band 2: Echinoidea neogenica$",
|
||||||
|
"(?i)^Acknowledgement to Referees$",
|
||||||
|
"(?i)^Behçet's disease\\.?$",
|
||||||
|
"(?i)^Isolation and identification of restriction endonuclease.*$",
|
||||||
|
"(?i)^CEREBROVASCULAR DISEASES?.?$",
|
||||||
|
"(?i)^Screening for abdominal aortic aneurysms?\\.?$",
|
||||||
|
"^Event management$",
|
||||||
|
"(?i)^Breakfast and Crohn's disease.*\\.?$",
|
||||||
|
"^Cálculo de concentraciones en disoluciones acuosas. Ejercicio interactivo\\..*\\.$",
|
||||||
|
"(?i)^Genetic and functional analyses of SHANK2 mutations suggest a multiple hit model of Autism spectrum disorders?\\.?$",
|
||||||
|
"^Gushi hakubutsugaku$",
|
||||||
|
|
||||||
|
"^Starobosanski nadpisi u Bosni i Hercegovini \\(.*\\)$",
|
||||||
|
"^Intestinal spirocha?etosis$",
|
||||||
|
"^Treatment of Rodent Ulcer$",
|
||||||
|
"(?i)^\\W*Cloud Computing\\W*$",
|
||||||
|
"^Compendio mathematico : en que se contienen todas las materias mas principales de las Ciencias que tratan de la cantidad$",
|
||||||
|
"^Free Communications, Poster Presentations: Session [A-F]$",
|
||||||
|
|
||||||
|
"^“The Historical Aspects? of Quackery\\.?”$",
|
||||||
|
"^A designated centre for people with disabilities operated by St John of God Community Services (Limited|Ltd), Louth$",
|
||||||
|
"^P(er|re)-Mile Premiums for Auto Insurance\\.?$",
|
||||||
|
"(?i)^Case Report$",
|
||||||
|
"^Boletín Informativo$",
|
||||||
|
"(?i)^Glioblastoma Multiforme$",
|
||||||
|
"(?i)^Nuevos táxones animales descritos en la península Ibérica y Macaronesia desde 1994 \\(.*\\)$",
|
||||||
|
"^Zaměstnanecké výhody$",
|
||||||
|
"(?i)^The Economics of Terrorism and Counter-Terrorism: A Survey \\(Part .*\\)$",
|
||||||
|
"(?i)^Carotid body tumours?\\.?$",
|
||||||
|
"(?i)^\\[Españoles en Francia : La condición Emigrante.*\\]$",
|
||||||
|
"^Avant-propos$",
|
||||||
|
"(?i)^St\\. Patrick's Cathedral, Dublin, County Dublin - Head(s)? and Capital(s)?$",
|
||||||
|
"(?i)^St\\. Patrick's Cathedral, Dublin, County Dublin - Bases?$",
|
||||||
|
"(?i)^PUBLIC HEALTH VERSUS THE STATE$",
|
||||||
|
"^Viñetas de Cortázar$",
|
||||||
|
"(?i)^Search for heavy neutrinos and W(\\[|_|\\(|_\\{|-)?R(\\]|\\)|\\})? bosons with right-handed couplings in a left-right symmetric model in pp collisions at.*TeV(\\.)?$",
|
||||||
|
"(?i)^Measurement of the pseudorapidity and centrality dependence of the transverse energy density in Pb(-?)Pb collisions at.*tev(\\.?)$",
|
||||||
|
"(?i)^Search for resonances decaying into top-quark pairs using fully hadronic decays in pp collisions with ATLAS at.*TeV$",
|
||||||
|
"(?i)^Search for neutral minimal supersymmetric standard model Higgs bosons decaying to tau pairs in pp collisions at.*tev$",
|
||||||
|
|
||||||
|
"(?i)^Relatório de Estágio (de|em) Angiologia e Cirurgia Vascular$",
|
||||||
|
"^Aus der AGMB$",
|
||||||
|
|
||||||
|
"^Znanstveno-stručni prilozi$",
|
||||||
|
"(?i)^Zhodnocení finanční situace podniku a návrhy na zlepšení$",
|
||||||
|
"(?i)^Evaluation of the Financial Situation in the Firm and Proposals to its Improvement$",
|
||||||
|
"(?i)^Hodnocení finanční situace podniku a návrhy na její zlepšení$",
|
||||||
|
"^Finanční analýza podniku$",
|
||||||
|
"^Financial analysis( of business)?$",
|
||||||
|
"(?i)^Textbook of Gyn(a)?(Æ)?(e)?cology$",
|
||||||
|
"^Jikken nihon shūshinsho$",
|
||||||
|
"(?i)^CORONER('|s)(s|') INQUESTS$",
|
||||||
|
"(?i)^(Μελέτη παραγόντων )?risk management( για ανάπτυξη και εφαρμογή ενός πληροφοριακού συστήματος| και ανάπτυξη συστήματος)?$",
|
||||||
|
"(?i)^Consultants' contract(s)?$",
|
||||||
|
"(?i)^Upute autorima$",
|
||||||
|
"(?i)^Bijdrage tot de Kennis van den Godsdienst der Dajaks van Lan(d|f)ak en Tajan$",
|
||||||
|
"^Joshi shin kokubun$",
|
||||||
|
"^Kōtō shōgaku dokuhon nōson'yō$",
|
||||||
|
"^Jinjō shōgaku shōka$",
|
||||||
|
"^Shōgaku shūjichō$",
|
||||||
|
"^Nihon joshi dokuhon$",
|
||||||
|
"^Joshi shin dokuhon$",
|
||||||
|
"^Chūtō kanbun dokuhon$",
|
||||||
|
"^Wabun dokuhon$",
|
||||||
|
"(?i)^(Analysis of economy selected village or town|Rozbor hospodaření vybrané obce či města)$",
|
||||||
|
"(?i)^cardiac rehabilitation$",
|
||||||
|
"(?i)^Analytical summary$",
|
||||||
|
"^Thesaurus resolutionum Sacrae Congregationis Concilii$",
|
||||||
|
"(?i)^Sumario analítico(\\s{1})?(Analitic summary)?$",
|
||||||
|
"^Prikazi i osvrti$",
|
||||||
|
"^Rodinný dům s provozovnou$",
|
||||||
|
"^Family house with an establishment$",
|
||||||
|
"^Shinsei chūtō shin kokugun$",
|
||||||
|
"^Pulmonary alveolar proteinosis(\\.?)$",
|
||||||
|
"^Shinshū kanbun$",
|
||||||
|
"^Viñeta(s?) de Rodríguez$",
|
||||||
|
"(?i)^RUBRIKA UREDNIKA$",
|
||||||
|
"^A Matching Model of the Academic Publication Market$",
|
||||||
|
"^Yōgaku kōyō$",
|
||||||
|
|
||||||
|
"^Internetový marketing$",
|
||||||
|
"^Internet marketing$",
|
||||||
|
"^Chūtō kokugo dokuhon$",
|
||||||
|
"^Kokugo dokuhon$",
|
||||||
|
"^Antibiotic Cover for Dental Extraction(s?)$",
|
||||||
|
"^Strategie podniku$",
|
||||||
|
"^Strategy of an Enterprise$",
|
||||||
|
"(?i)^respiratory disease(s?)(\\.?)$",
|
||||||
|
"^Award(s?) for Gallantry in Civil Defence$",
|
||||||
|
"^Podniková kultura$",
|
||||||
|
"^Corporate Culture$",
|
||||||
|
"^Severe hyponatraemia in hospital inpatient(s?)(\\.?)$",
|
||||||
|
"^Pracovní motivace$",
|
||||||
|
"^Work Motivation$",
|
||||||
|
"^Kaitei kōtō jogaku dokuhon$",
|
||||||
|
"^Konsolidovaná účetní závěrka$",
|
||||||
|
"^Consolidated Financial Statements$",
|
||||||
|
"(?i)^intracranial tumour(s?)$",
|
||||||
|
"^Climate Change Mitigation Options and Directed Technical Change: A Decentralized Equilibrium Analysis$",
|
||||||
|
"^\\[CERVECERIAS MAHOU(\\.|\\:) INTERIOR\\] \\[Material gráfico\\]$",
|
||||||
|
"^Housing Market Dynamics(\\:|\\.) On the Contribution of Income Shocks and Credit Constraint(s?)$",
|
||||||
|
"^\\[Funciones auxiliares de la música en Radio París,.*\\]$",
|
||||||
|
"^Úroveň motivačního procesu jako způsobu vedení lidí$",
|
||||||
|
"^The level of motivation process as a leadership$",
|
||||||
|
"^Pay-beds in N(\\.?)H(\\.?)S(\\.?) Hospitals$",
|
||||||
|
"(?i)^news and events$",
|
||||||
|
"(?i)^NOVOSTI I DOGAĐAJI$",
|
||||||
|
"^Sansū no gakushū$",
|
||||||
|
"^Posouzení informačního systému firmy a návrh změn$",
|
||||||
|
"^Information System Assessment and Proposal for ICT Modification$",
|
||||||
|
"^Stresové zatížení pracovníků ve vybrané profesi$",
|
||||||
|
"^Stress load in a specific job$",
|
||||||
|
|
||||||
|
"^Sunday: Poster Sessions, Pt.*$",
|
||||||
|
"^Monday: Poster Sessions, Pt.*$",
|
||||||
|
"^Wednesday: Poster Sessions, Pt.*",
|
||||||
|
"^Tuesday: Poster Sessions, Pt.*$",
|
||||||
|
|
||||||
|
"^Analýza reklamy$",
|
||||||
|
"^Analysis of advertising$",
|
||||||
|
|
||||||
|
"^Shōgaku shūshinsho$",
|
||||||
|
"^Shōgaku sansū$",
|
||||||
|
"^Shintei joshi kokubun$",
|
||||||
|
"^Taishō joshi kokubun dokuhon$",
|
||||||
|
"^Joshi kokubun$",
|
||||||
|
|
||||||
|
"^Účetní uzávěrka a účetní závěrka v ČR$",
|
||||||
|
"(?i)^The \"?Causes\"? of Cancer$",
|
||||||
|
"^Normas para la publicación de artículos$",
|
||||||
|
"^Editor('|s)(s|') [Rr]eply$",
|
||||||
|
"^Editor(’|s)(s|’) letter$",
|
||||||
|
"^Redaktoriaus žodis$",
|
||||||
|
"^DISCUSSION ON THE PRECEDING PAPER$",
|
||||||
|
"^Kōtō shōgaku shūshinsho jidōyō$",
|
||||||
|
"^Shōgaku nihon rekishi$",
|
||||||
|
"^(Theory of the flow of action currents in isolated myelinated nerve fibers).*$",
|
||||||
|
"^Préface$",
|
||||||
|
"^Occupational [Hh]ealth [Ss]ervices.$",
|
||||||
|
"^In Memoriam Professor Toshiyuki TAKESHIMA$",
|
||||||
|
"^Účetní závěrka ve vybraném podniku.*$",
|
||||||
|
"^Financial statements in selected company$",
|
||||||
|
"^Abdominal [Aa]ortic [Aa]neurysms.*$",
|
||||||
|
"^Pseudomyxoma peritonei$",
|
||||||
|
"^Kazalo autora$",
|
||||||
|
|
||||||
|
"(?i)^uvodna riječ$",
|
||||||
|
"^Motivace jako způsob vedení lidí$",
|
||||||
|
"^Motivation as a leadership$",
|
||||||
|
"^Polyfunkční dům$",
|
||||||
|
"^Multi\\-funkcional building$",
|
||||||
|
"^Podnikatelský plán$",
|
||||||
|
"(?i)^Podnikatelský záměr$",
|
||||||
|
"(?i)^Business Plan$",
|
||||||
|
"^Oceňování nemovitostí$",
|
||||||
|
"^Marketingová komunikace$",
|
||||||
|
"^Marketing communication$",
|
||||||
|
"^Sumario Analítico$",
|
||||||
|
"^Riječ uredništva$",
|
||||||
|
"^Savjetovanja i priredbe$",
|
||||||
|
"^Índice$",
|
||||||
|
"^(Starobosanski nadpisi).*$",
|
||||||
|
"^Vzdělávání pracovníků v organizaci$",
|
||||||
|
"^Staff training in organization$",
|
||||||
|
"^(Life Histories of North American Geometridae).*$",
|
||||||
|
"^Strategická analýza podniku$",
|
||||||
|
"^Strategic Analysis of an Enterprise$",
|
||||||
|
"^Sadržaj$",
|
||||||
|
"^Upute suradnicima$",
|
||||||
|
"^Rodinný dům$",
|
||||||
|
"(?i)^Fami(l)?ly house$",
|
||||||
|
"^Upute autorima$",
|
||||||
|
"^Strategic Analysis$",
|
||||||
|
"^Finanční analýza vybraného podniku$",
|
||||||
|
"^Finanční analýza$",
|
||||||
|
"^Riječ urednika$",
|
||||||
|
"(?i)^Content(s?)$",
|
||||||
|
"(?i)^Inhalt$",
|
||||||
|
"^Jinjō shōgaku shūshinsho jidōyō$",
|
||||||
|
"(?i)^Index$",
|
||||||
|
"^Chūgaku kokubun kyōkasho$",
|
||||||
|
"^Retrato de una mujer$",
|
||||||
|
"^Retrato de un hombre$",
|
||||||
|
"^Kōtō shōgaku dokuhon$",
|
||||||
|
"^Shotōka kokugo$",
|
||||||
|
"^Shōgaku dokuhon$",
|
||||||
|
"^Jinjō shōgaku kokugo dokuhon$",
|
||||||
|
"^Shinsei kokugo dokuhon$",
|
||||||
|
"^Teikoku dokuhon$",
|
||||||
|
"^Instructions to Authors$",
|
||||||
|
"^KİTAP TAHLİLİ$",
|
||||||
|
"^PRZEGLĄD PIŚMIENNICTWA$",
|
||||||
|
"(?i)^Presentación$",
|
||||||
|
"^İçindekiler$",
|
||||||
|
"(?i)^Tabl?e of contents$",
|
||||||
|
"^(CODICE DEL BEATO DE LOS REYES FERNANDO I Y SANCHA).*$",
|
||||||
|
"^(\\[MADRID\\. BIBL\\. NAC\\. N.*KING FERDINAND I.*FROM SAN ISIDORO DE LEON\\. FACUNDUS SCRIPSIT DATED.*\\]).*",
|
||||||
|
"^Editorial( Board)?$",
|
||||||
|
"(?i)^Editorial \\(English\\)$",
|
||||||
|
"^Editörden$",
|
||||||
|
"^(Corpus Oral Dialectal \\(COD\\)\\.).*$",
|
||||||
|
"^(Kiri Karl Morgensternile).*$",
|
||||||
|
"^(\\[Eksliibris Aleksandr).*\\]$",
|
||||||
|
"^(\\[Eksliibris Aleksandr).*$",
|
||||||
|
"^(Eksliibris Aleksandr).*$",
|
||||||
|
"^(Kiri A\\. de Vignolles).*$",
|
||||||
|
"^(2 kirja Karl Morgensternile).*$",
|
||||||
|
"^(Pirita kloostri idaosa arheoloogilised).*$",
|
||||||
|
"^(Kiri tundmatule).*$",
|
||||||
|
"^(Kiri Jenaer Allgemeine Literaturzeitung toimetusele).*$",
|
||||||
|
"^(Eksliibris Nikolai Birukovile).*$",
|
||||||
|
"^(Eksliibris Nikolai Issakovile).*$",
|
||||||
|
"^(WHP Cruise Summary Information of section).*$",
|
||||||
|
"^(Measurement of the top quark\\-pair production cross section with ATLAS in pp collisions at).*$",
|
||||||
|
"^(Measurement of the spin\\-dependent structure function).*",
|
||||||
|
"(?i)^.*authors['’′]? reply\\.?$",
|
||||||
|
"(?i)^.*authors['’′]? response\\.?$"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
[
|
||||||
|
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
||||||
|
{"paramName":"s", "paramLongName":"sourcePath", "paramDescription": "the path of the sequential file to read", "paramRequired": true},
|
||||||
|
{"paramName":"e", "paramLongName":"entity", "paramDescription": "the type of entity to be deduped", "paramRequired": true},
|
||||||
|
{"paramName":"c", "paramLongName":"dedupConf", "paramDescription": "dedup configuration to be used", "paramRequired": true},
|
||||||
|
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "target path to save dedup result", "paramRequired": true}
|
||||||
|
]
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,101 @@
|
||||||
|
<workflow-app name="Dedup Entities" xmlns="uri:oozie:workflow:0.5">
|
||||||
|
<parameters>
|
||||||
|
<property>
|
||||||
|
<name>sourcePath</name>
|
||||||
|
<description>the source path</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>entity</name>
|
||||||
|
<description>the entity that should be processed</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>dedupConf</name>
|
||||||
|
<description>the dedup Configuration</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>targetPath</name>
|
||||||
|
<description>the target path</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkDriverMemory</name>
|
||||||
|
<description>memory for driver process</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorMemory</name>
|
||||||
|
<description>memory for individual executor</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorCores</name>
|
||||||
|
<description>number of cores used by single executor</description>
|
||||||
|
</property>
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<start to="CreateSimRels"/>
|
||||||
|
|
||||||
|
|
||||||
|
<kill name="Kill">
|
||||||
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
</kill>
|
||||||
|
|
||||||
|
<!-- <action name="DeleteTargetPath">-->
|
||||||
|
<!-- <fs>-->
|
||||||
|
<!-- <delete path='${targetPath}/${entity}_simrel'/>-->
|
||||||
|
<!-- <delete path='${targetPath}/${entity}_mergeRels'/>-->
|
||||||
|
<!-- </fs>-->
|
||||||
|
<!-- <ok to="CreateSimRels"/>-->
|
||||||
|
<!-- <error to="Kill"/>-->
|
||||||
|
<!-- </action>-->
|
||||||
|
|
||||||
|
<action name="CreateSimRels">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
<name-node>${nameNode}</name-node>
|
||||||
|
<master>yarn-cluster</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>Create Similarity Relations</name>
|
||||||
|
<class>eu.dnetlib.dedup.SparkCreateSimRels</class>
|
||||||
|
<jar>dhp-dedup-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>--executor-memory ${sparkExecutorMemory} --executor-cores ${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory} --conf
|
||||||
|
spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener" --conf
|
||||||
|
spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener" --conf
|
||||||
|
spark.sql.warehouse.dir="/user/hive/warehouse"
|
||||||
|
</spark-opts>
|
||||||
|
<arg>-mt</arg><arg>yarn-cluster</arg>
|
||||||
|
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||||
|
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
||||||
|
<arg>--entity</arg><arg>${entity}</arg>
|
||||||
|
<arg>--dedupConf</arg><arg>${dedupConf}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="CreateConnectedComponents"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
<action name="CreateConnectedComponents">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
<name-node>${nameNode}</name-node>
|
||||||
|
<master>yarn-cluster</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>Create Connected Components</name>
|
||||||
|
<class>eu.dnetlib.dedup.SparkCreateConnectedComponent</class>
|
||||||
|
<jar>dhp-dedup-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>--executor-memory ${sparkExecutorMemory} --executor-cores ${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory} --conf
|
||||||
|
spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener" --conf
|
||||||
|
spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener" --conf
|
||||||
|
spark.sql.warehouse.dir="/user/hive/warehouse"
|
||||||
|
</spark-opts>
|
||||||
|
<arg>-mt</arg><arg>yarn-cluster</arg>
|
||||||
|
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||||
|
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
||||||
|
<arg>--entity</arg><arg>${entity}</arg>
|
||||||
|
<arg>--dedupConf</arg><arg>${dedupConf}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<end name="End"/>
|
||||||
|
</workflow-app>
|
|
@ -0,0 +1,62 @@
|
||||||
|
package eu.dnetlib.dedup;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Ignore;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class SparkCreateDedupTest {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws IOException {
|
||||||
|
FileUtils.deleteDirectory(new File("/tmp/pub_dedup_vertex"));
|
||||||
|
FileUtils.deleteDirectory(new File("/tmp/pub_dedup_rels"));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Ignore
|
||||||
|
public void dedupTest() throws Exception {
|
||||||
|
final String configuration = IOUtils.toString(getClass().getResourceAsStream("/eu/dnetlib/dedup/conf/org.curr.conf.json"));
|
||||||
|
|
||||||
|
|
||||||
|
SparkCreateSimRels.main(new String[] {
|
||||||
|
"-mt", "local[*]",
|
||||||
|
"-s", "/home/sandro/betadump",
|
||||||
|
"-e", "publication",
|
||||||
|
"-c", configuration,
|
||||||
|
"-t", "/tmp/dedup",
|
||||||
|
});
|
||||||
|
|
||||||
|
SparkCreateConnectedComponent.main(new String[] {
|
||||||
|
"-mt", "local[*]",
|
||||||
|
"-s", "/home/sandro/betadump",
|
||||||
|
"-e", "publication",
|
||||||
|
"-c", configuration,
|
||||||
|
"-t", "/tmp/dedup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Ignore
|
||||||
|
public void dedupRecordTest() throws Exception {
|
||||||
|
SparkCreateDedupRecord.main(new String[] {
|
||||||
|
"-mt", "local[*]",
|
||||||
|
"-s", "/home/sandro/betadump",
|
||||||
|
"-e", "publication",
|
||||||
|
"-c", "configuration",
|
||||||
|
"-t", "/tmp/dedup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package eu.dnetlib.dedup.jpath;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.jayway.jsonpath.JsonPath;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class JsonPathTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testJPath () throws Exception {
|
||||||
|
final String json = IOUtils.toString(getClass().getResourceAsStream("/eu/dnetlib/dedup/conf/sample.json"));
|
||||||
|
List<Map<String, Object>> pid = JsonPath.read(json, "$.pid[*]");
|
||||||
|
// System.out.println(json);
|
||||||
|
|
||||||
|
pid.forEach(it -> {
|
||||||
|
try {
|
||||||
|
System.out.println(new ObjectMapper().writeValueAsString(it));
|
||||||
|
} catch (JsonProcessingException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -17,6 +17,7 @@
|
||||||
<module>dhp-aggregation</module>
|
<module>dhp-aggregation</module>
|
||||||
<module>dhp-distcp</module>
|
<module>dhp-distcp</module>
|
||||||
<module>dhp-graph-mapper</module>
|
<module>dhp-graph-mapper</module>
|
||||||
|
<module>dhp-dedup</module>
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
<pluginRepositories>
|
<pluginRepositories>
|
||||||
|
@ -310,6 +311,7 @@
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<!-- this plugin prepares oozie installer package-->
|
<!-- this plugin prepares oozie installer package-->
|
||||||
|
|
||||||
|
|
79
pom.xml
79
pom.xml
|
@ -114,6 +114,12 @@
|
||||||
<version>${dhp.spark.version}</version>
|
<version>${dhp.spark.version}</version>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-graphx_2.11</artifactId>
|
||||||
|
<version>${dhp.spark.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
|
@ -177,6 +183,17 @@
|
||||||
<version>${dhp.jackson.version}</version>
|
<version>${dhp.jackson.version}</version>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-core</artifactId>
|
||||||
|
<version>${dhp.jackson.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.fasterxml.jackson.core</groupId>
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
<artifactId>jackson-annotations</artifactId>
|
<artifactId>jackson-annotations</artifactId>
|
||||||
|
@ -190,6 +207,12 @@
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib</groupId>
|
||||||
|
<artifactId>dnet-pace-core</artifactId>
|
||||||
|
<version>4.0.0-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>javax.persistence</groupId>
|
<groupId>javax.persistence</groupId>
|
||||||
|
@ -203,6 +226,16 @@
|
||||||
<artifactId>amqp-client</artifactId>
|
<artifactId>amqp-client</artifactId>
|
||||||
<version>5.6.0</version>
|
<version>5.6.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.jayway.jsonpath</groupId>
|
||||||
|
<artifactId>json-path</artifactId>
|
||||||
|
<version>2.4.0</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.arakelian</groupId>
|
||||||
|
<artifactId>java-jq</artifactId>
|
||||||
|
<version>0.10.1</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.oozie</groupId>
|
<groupId>org.apache.oozie</groupId>
|
||||||
|
@ -259,27 +292,6 @@
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
<plugin>
|
|
||||||
<groupId>eu.dnetlib</groupId>
|
|
||||||
<artifactId>protoc-jar-maven-plugin</artifactId>
|
|
||||||
<version>1.1.0</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<phase>generate-sources</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>run</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<protocVersion>${google.protobuf.version}</protocVersion>
|
|
||||||
<inputDirectories>
|
|
||||||
<include>src/main/resources</include>
|
|
||||||
</inputDirectories>
|
|
||||||
<outputDirectory>src/gen/java</outputDirectory>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
@ -342,6 +354,31 @@
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>net.alchim31.maven</groupId>
|
||||||
|
<artifactId>scala-maven-plugin</artifactId>
|
||||||
|
<version>4.0.1</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>scala-compile-first</id>
|
||||||
|
<phase>initialize</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>add-source</goal>
|
||||||
|
<goal>compile</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>scala-test-compile</id>
|
||||||
|
<phase>process-test-resources</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>testCompile</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
<configuration>
|
||||||
|
<scalaVersion>${scala.version}</scalaVersion>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
|
|
||||||
<extensions>
|
<extensions>
|
||||||
|
|
Loading…
Reference in New Issue