forked from D-Net/dnet-hadoop
Merge branch 'master' of code-repo.d4science.org:D-Net/dnet-hadoop
This commit is contained in:
commit
9c3ab11d5b
|
@ -52,6 +52,8 @@
|
|||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<failOnMultipleScalaVersions>true</failOnMultipleScalaVersions>
|
||||
<scalaCompatVersion>${scala.binary.version}</scalaCompatVersion>
|
||||
<scalaVersion>${scala.version}</scalaVersion>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
@ -60,6 +62,11 @@
|
|||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-pace-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
|
@ -76,11 +83,11 @@
|
|||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-core_2.11</artifactId>
|
||||
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-sql_2.11</artifactId>
|
||||
<artifactId>spark-sql_${scala.binary.version}</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -142,11 +149,6 @@
|
|||
<artifactId>okhttp</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-pace-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
|
@ -159,7 +161,7 @@
|
|||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-schemas</artifactId>
|
||||
<artifactId>${dhp-schemas.artifact}</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -10,6 +10,12 @@ public class Constants {
|
|||
public static final Map<String, String> accessRightsCoarMap = Maps.newHashMap();
|
||||
public static final Map<String, String> coarCodeLabelMap = Maps.newHashMap();
|
||||
|
||||
public static final String ROR_NS_PREFIX = "ror_________";
|
||||
|
||||
public static final String ROR_OPENAIRE_ID = "10|openaire____::993a7ae7a863813cf95028b50708e222";
|
||||
|
||||
public static final String ROR_DATASOURCE_NAME = "Research Organization Registry (ROR)";
|
||||
|
||||
public static String COAR_ACCESS_RIGHT_SCHEMA = "http://vocabularies.coar-repositories.org/documentation/access_rights/";
|
||||
|
||||
private Constants() {
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
|
||||
package eu.dnetlib.dhp.common;
|
||||
|
||||
/**
|
||||
* This utility represent the Metadata Store information
|
||||
* needed during the migration from mongo to HDFS to store
|
||||
*/
|
||||
public class MDStoreInfo {
|
||||
private String mdstore;
|
||||
private String currentId;
|
||||
private Long latestTimestamp;
|
||||
|
||||
/**
|
||||
* Instantiates a new Md store info.
|
||||
*/
|
||||
public MDStoreInfo() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new Md store info.
|
||||
*
|
||||
* @param mdstore the mdstore
|
||||
* @param currentId the current id
|
||||
* @param latestTimestamp the latest timestamp
|
||||
*/
|
||||
public MDStoreInfo(String mdstore, String currentId, Long latestTimestamp) {
|
||||
this.mdstore = mdstore;
|
||||
this.currentId = currentId;
|
||||
this.latestTimestamp = latestTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets mdstore.
|
||||
*
|
||||
* @return the mdstore
|
||||
*/
|
||||
public String getMdstore() {
|
||||
return mdstore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets mdstore.
|
||||
*
|
||||
* @param mdstore the mdstore
|
||||
* @return the mdstore
|
||||
*/
|
||||
public MDStoreInfo setMdstore(String mdstore) {
|
||||
this.mdstore = mdstore;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets current id.
|
||||
*
|
||||
* @return the current id
|
||||
*/
|
||||
public String getCurrentId() {
|
||||
return currentId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets current id.
|
||||
*
|
||||
* @param currentId the current id
|
||||
* @return the current id
|
||||
*/
|
||||
public MDStoreInfo setCurrentId(String currentId) {
|
||||
this.currentId = currentId;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets latest timestamp.
|
||||
*
|
||||
* @return the latest timestamp
|
||||
*/
|
||||
public Long getLatestTimestamp() {
|
||||
return latestTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets latest timestamp.
|
||||
*
|
||||
* @param latestTimestamp the latest timestamp
|
||||
* @return the latest timestamp
|
||||
*/
|
||||
public MDStoreInfo setLatestTimestamp(Long latestTimestamp) {
|
||||
this.latestTimestamp = latestTimestamp;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MDStoreInfo{" +
|
||||
"mdstore='" + mdstore + '\'' +
|
||||
", currentId='" + currentId + '\'' +
|
||||
", latestTimestamp=" + latestTimestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -45,28 +45,17 @@ public class MakeTarArchive implements Serializable {
|
|||
.map(Integer::valueOf)
|
||||
.orElse(10);
|
||||
|
||||
final boolean rename = Optional
|
||||
.ofNullable(parser.get("rename"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.FALSE);
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("fs.defaultFS", hdfsNameNode);
|
||||
|
||||
FileSystem fileSystem = FileSystem.get(conf);
|
||||
|
||||
makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit, rename);
|
||||
makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit);
|
||||
|
||||
}
|
||||
|
||||
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath, int gBperSplit)
|
||||
throws IOException {
|
||||
makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit, false);
|
||||
}
|
||||
|
||||
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath, int gBperSplit,
|
||||
boolean rename)
|
||||
throws IOException {
|
||||
|
||||
RemoteIterator<LocatedFileStatus> dirIterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
||||
|
||||
|
@ -77,7 +66,7 @@ public class MakeTarArchive implements Serializable {
|
|||
String pathString = p.toString();
|
||||
String entity = pathString.substring(pathString.lastIndexOf("/") + 1);
|
||||
|
||||
MakeTarArchive.tarMaxSize(fileSystem, pathString, outputPath + "/" + entity, entity, gBperSplit, rename);
|
||||
MakeTarArchive.tarMaxSize(fileSystem, pathString, outputPath + "/" + entity, entity, gBperSplit);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,8 +79,7 @@ public class MakeTarArchive implements Serializable {
|
|||
return new TarArchiveOutputStream(fileSystem.create(hdfsWritePath).getWrappedStream());
|
||||
}
|
||||
|
||||
private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dirName,
|
||||
boolean rename)
|
||||
private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dirName)
|
||||
throws IOException {
|
||||
|
||||
Path hdfsWritePath = new Path(outputPath);
|
||||
|
@ -107,20 +95,20 @@ public class MakeTarArchive implements Serializable {
|
|||
new Path(inputPath), true);
|
||||
|
||||
while (iterator.hasNext()) {
|
||||
writeCurrentFile(fileSystem, dirName, iterator, ar, 0, rename);
|
||||
writeCurrentFile(fileSystem, dirName, iterator, ar, 0);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public static void tarMaxSize(FileSystem fileSystem, String inputPath, String outputPath, String dir_name,
|
||||
int gBperSplit, boolean rename) throws IOException {
|
||||
int gBperSplit) throws IOException {
|
||||
final long bytesPerSplit = 1024L * 1024L * 1024L * gBperSplit;
|
||||
|
||||
long sourceSize = fileSystem.getContentSummary(new Path(inputPath)).getSpaceConsumed();
|
||||
|
||||
if (sourceSize < bytesPerSplit) {
|
||||
write(fileSystem, inputPath, outputPath + ".tar", dir_name, rename);
|
||||
write(fileSystem, inputPath, outputPath + ".tar", dir_name);
|
||||
} else {
|
||||
int partNum = 0;
|
||||
|
||||
|
@ -133,8 +121,7 @@ public class MakeTarArchive implements Serializable {
|
|||
|
||||
long currentSize = 0;
|
||||
while (next && currentSize < bytesPerSplit) {
|
||||
currentSize = writeCurrentFile(
|
||||
fileSystem, dir_name, fileStatusListIterator, ar, currentSize, rename);
|
||||
currentSize = writeCurrentFile(fileSystem, dir_name, fileStatusListIterator, ar, currentSize);
|
||||
next = fileStatusListIterator.hasNext();
|
||||
|
||||
}
|
||||
|
@ -147,7 +134,7 @@ public class MakeTarArchive implements Serializable {
|
|||
|
||||
private static long writeCurrentFile(FileSystem fileSystem, String dirName,
|
||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator,
|
||||
TarArchiveOutputStream ar, long currentSize, boolean rename) throws IOException {
|
||||
TarArchiveOutputStream ar, long currentSize) throws IOException {
|
||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||
|
||||
Path p = fileStatus.getPath();
|
||||
|
@ -161,11 +148,6 @@ public class MakeTarArchive implements Serializable {
|
|||
}
|
||||
name = tmp;
|
||||
}
|
||||
if (rename) {
|
||||
if (name.endsWith(".txt.gz"))
|
||||
name = name.replace(".txt.gz", ".json.gz");
|
||||
}
|
||||
|
||||
TarArchiveEntry entry = new TarArchiveEntry(dirName + "/" + name);
|
||||
entry.setSize(fileStatus.getLen());
|
||||
currentSize += fileStatus.getLen();
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
|
||||
package eu.dnetlib.dhp.common;
|
||||
|
||||
import static com.mongodb.client.model.Sorts.descending;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
@ -38,6 +38,26 @@ public class MdstoreClient implements Closeable {
|
|||
this.db = getDb(client, dbName);
|
||||
}
|
||||
|
||||
private Long parseTimestamp(Document f) {
|
||||
if (f == null || !f.containsKey("timestamp"))
|
||||
return null;
|
||||
|
||||
Object ts = f.get("timestamp");
|
||||
|
||||
return Long.parseLong(ts.toString());
|
||||
}
|
||||
|
||||
public Long getLatestTimestamp(final String collectionId) {
|
||||
MongoCollection<Document> collection = db.getCollection(collectionId);
|
||||
FindIterable<Document> result = collection.find().sort(descending("timestamp")).limit(1);
|
||||
if (result == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Document f = result.first();
|
||||
return parseTimestamp(f);
|
||||
}
|
||||
|
||||
public MongoCollection<Document> mdStore(final String mdId) {
|
||||
BasicDBObject query = (BasicDBObject) QueryBuilder.start("mdId").is(mdId).get();
|
||||
|
||||
|
@ -54,6 +74,16 @@ public class MdstoreClient implements Closeable {
|
|||
return getColl(db, currentId, true);
|
||||
}
|
||||
|
||||
public List<MDStoreInfo> mdStoreWithTimestamp(final String mdFormat, final String mdLayout,
|
||||
final String mdInterpretation) {
|
||||
Map<String, String> res = validCollections(mdFormat, mdLayout, mdInterpretation);
|
||||
return res
|
||||
.entrySet()
|
||||
.stream()
|
||||
.map(e -> new MDStoreInfo(e.getKey(), e.getValue(), getLatestTimestamp(e.getValue())))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public Map<String, String> validCollections(
|
||||
final String mdFormat, final String mdLayout, final String mdInterpretation) {
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
|
||||
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModel;
|
||||
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModelList;
|
||||
import okhttp3.*;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class ZenodoAPIClient implements Serializable {
|
||||
|
||||
|
@ -80,7 +80,7 @@ public class ZenodoAPIClient implements Serializable {
|
|||
int responseCode = conn.getResponseCode();
|
||||
conn.disconnect();
|
||||
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + body);
|
||||
|
||||
ZenodoModel newSubmission = new Gson().fromJson(body, ZenodoModel.class);
|
||||
|
@ -115,7 +115,7 @@ public class ZenodoAPIClient implements Serializable {
|
|||
|
||||
}
|
||||
int responseCode = conn.getResponseCode();
|
||||
if(! checkOKStatus(responseCode)){
|
||||
if (!checkOKStatus(responseCode)) {
|
||||
throw new IOException("Unexpected code " + responseCode + getBody(conn));
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,6 @@ public class ZenodoAPIClient implements Serializable {
|
|||
conn.setDoOutput(true);
|
||||
conn.setRequestMethod("PUT");
|
||||
|
||||
|
||||
try (OutputStream os = conn.getOutputStream()) {
|
||||
byte[] input = metadata.getBytes("utf-8");
|
||||
os.write(input, 0, input.length);
|
||||
|
@ -164,19 +163,18 @@ public class ZenodoAPIClient implements Serializable {
|
|||
|
||||
final int responseCode = conn.getResponseCode();
|
||||
conn.disconnect();
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + getBody(conn));
|
||||
|
||||
return responseCode;
|
||||
|
||||
|
||||
}
|
||||
|
||||
private boolean checkOKStatus(int responseCode) {
|
||||
|
||||
if(HttpURLConnection.HTTP_OK != responseCode ||
|
||||
if (HttpURLConnection.HTTP_OK != responseCode ||
|
||||
HttpURLConnection.HTTP_CREATED != responseCode)
|
||||
return true ;
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -233,7 +231,6 @@ public class ZenodoAPIClient implements Serializable {
|
|||
conn.setDoOutput(true);
|
||||
conn.setRequestMethod("POST");
|
||||
|
||||
|
||||
try (OutputStream os = conn.getOutputStream()) {
|
||||
byte[] input = json.getBytes("utf-8");
|
||||
os.write(input, 0, input.length);
|
||||
|
@ -245,7 +242,7 @@ public class ZenodoAPIClient implements Serializable {
|
|||
int responseCode = conn.getResponseCode();
|
||||
|
||||
conn.disconnect();
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + body);
|
||||
|
||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
||||
|
@ -290,13 +287,12 @@ public class ZenodoAPIClient implements Serializable {
|
|||
int responseCode = conn.getResponseCode();
|
||||
conn.disconnect();
|
||||
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + body);
|
||||
|
||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
||||
bucket = zenodoModel.getLinks().getBucket();
|
||||
|
||||
|
||||
return responseCode;
|
||||
|
||||
}
|
||||
|
@ -331,22 +327,16 @@ public class ZenodoAPIClient implements Serializable {
|
|||
conn.setDoOutput(true);
|
||||
conn.setRequestMethod("GET");
|
||||
|
||||
|
||||
|
||||
String body = getBody(conn);
|
||||
|
||||
int responseCode = conn.getResponseCode();
|
||||
|
||||
conn.disconnect();
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + body);
|
||||
|
||||
|
||||
|
||||
return body;
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
private String getBucket(String inputUurl) throws IOException {
|
||||
|
@ -363,15 +353,13 @@ public class ZenodoAPIClient implements Serializable {
|
|||
int responseCode = conn.getResponseCode();
|
||||
|
||||
conn.disconnect();
|
||||
if(!checkOKStatus(responseCode))
|
||||
if (!checkOKStatus(responseCode))
|
||||
throw new IOException("Unexpected code " + responseCode + body);
|
||||
|
||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
||||
|
||||
return zenodoModel.getLinks().getBucket();
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package eu.dnetlib.dhp.common.vocabulary;
|
|||
import java.io.Serializable;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
@ -66,21 +67,39 @@ public class Vocabulary implements Serializable {
|
|||
}
|
||||
|
||||
public Qualifier getTermAsQualifier(final String termId) {
|
||||
if (StringUtils.isBlank(termId)) {
|
||||
return getTermAsQualifier(termId, false);
|
||||
}
|
||||
|
||||
public Qualifier getTermAsQualifier(final String termId, boolean strict) {
|
||||
final VocabularyTerm term = getTerm(termId);
|
||||
if (Objects.nonNull(term)) {
|
||||
return OafMapperUtils.qualifier(term.getId(), term.getName(), getId(), getName());
|
||||
} else if (Objects.isNull(term) && strict) {
|
||||
return OafMapperUtils.unknown(getId(), getName());
|
||||
} else if (termExists(termId)) {
|
||||
final VocabularyTerm t = getTerm(termId);
|
||||
return OafMapperUtils.qualifier(t.getId(), t.getName(), getId(), getName());
|
||||
} else {
|
||||
return OafMapperUtils.qualifier(termId, termId, getId(), getName());
|
||||
}
|
||||
}
|
||||
|
||||
public Qualifier getSynonymAsQualifier(final String syn) {
|
||||
return getSynonymAsQualifier(syn, false);
|
||||
}
|
||||
|
||||
public Qualifier getSynonymAsQualifier(final String syn, boolean strict) {
|
||||
return Optional
|
||||
.ofNullable(getTermBySynonym(syn))
|
||||
.map(term -> getTermAsQualifier(term.getId()))
|
||||
.map(term -> getTermAsQualifier(term.getId(), strict))
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
public Qualifier lookup(String id) {
|
||||
return lookup(id, false);
|
||||
}
|
||||
|
||||
public Qualifier lookup(String id, boolean strict) {
|
||||
return Optional
|
||||
.ofNullable(getSynonymAsQualifier(id, strict))
|
||||
.orElse(getTermAsQualifier(id, strict));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -81,6 +81,13 @@ public class VocabularyGroup implements Serializable {
|
|||
vocs.put(id.toLowerCase(), new Vocabulary(id, name));
|
||||
}
|
||||
|
||||
public Optional<Vocabulary> find(final String vocId) {
|
||||
return Optional
|
||||
.ofNullable(vocId)
|
||||
.map(String::toLowerCase)
|
||||
.map(vocs::get);
|
||||
}
|
||||
|
||||
public void addTerm(final String vocId, final String id, final String name) {
|
||||
if (vocabularyExists(vocId)) {
|
||||
vocs.get(vocId.toLowerCase()).addTerm(id, name);
|
||||
|
|
|
@ -11,25 +11,18 @@ import org.apache.commons.lang3.StringUtils;
|
|||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SaveMode;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
|
||||
public class DispatchEntitiesSparkJob {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DispatchEntitiesSparkJob.class);
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
String jsonConfiguration = IOUtils
|
||||
|
@ -54,44 +47,51 @@ public class DispatchEntitiesSparkJob {
|
|||
String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
String graphTableClassName = parser.get("graphTableClassName");
|
||||
log.info("graphTableClassName: {}", graphTableClassName);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Class<? extends OafEntity> entityClazz = (Class<? extends OafEntity>) Class.forName(graphTableClassName);
|
||||
boolean filterInvisible = Boolean.parseBoolean(parser.get("filterInvisible"));
|
||||
log.info("filterInvisible: {}", filterInvisible);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
|
||||
dispatchEntities(spark, inputPath, entityClazz, outputPath);
|
||||
});
|
||||
spark -> dispatchEntities(spark, inputPath, outputPath, filterInvisible));
|
||||
}
|
||||
|
||||
private static <T extends Oaf> void dispatchEntities(
|
||||
private static void dispatchEntities(
|
||||
SparkSession spark,
|
||||
String inputPath,
|
||||
Class<T> clazz,
|
||||
String outputPath) {
|
||||
String outputPath,
|
||||
boolean filterInvisible) {
|
||||
|
||||
spark
|
||||
Dataset<String> df = spark.read().textFile(inputPath);
|
||||
|
||||
ModelSupport.oafTypes.entrySet().parallelStream().forEach(entry -> {
|
||||
String entityType = entry.getKey();
|
||||
Class<?> clazz = entry.getValue();
|
||||
|
||||
final String entityPath = outputPath + "/" + entityType;
|
||||
if (!entityType.equalsIgnoreCase("relation")) {
|
||||
HdfsSupport.remove(entityPath, spark.sparkContext().hadoopConfiguration());
|
||||
Dataset<Row> entityDF = spark
|
||||
.read()
|
||||
.textFile(inputPath)
|
||||
.filter((FilterFunction<String>) s -> isEntityType(s, clazz))
|
||||
.map((MapFunction<String, String>) s -> StringUtils.substringAfter(s, "|"), Encoders.STRING())
|
||||
.schema(Encoders.bean(clazz).schema())
|
||||
.json(
|
||||
df
|
||||
.filter((FilterFunction<String>) s -> s.startsWith(clazz.getName()))
|
||||
.map(
|
||||
(MapFunction<String, T>) value -> OBJECT_MAPPER.readValue(value, clazz),
|
||||
Encoders.bean(clazz))
|
||||
(MapFunction<String, String>) s -> StringUtils.substringAfter(s, "|"),
|
||||
Encoders.STRING()));
|
||||
|
||||
if (filterInvisible) {
|
||||
entityDF = entityDF.filter("dataInfo.invisible != true");
|
||||
}
|
||||
|
||||
entityDF
|
||||
.write()
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath);
|
||||
.json(entityPath);
|
||||
}
|
||||
|
||||
private static <T extends Oaf> boolean isEntityType(final String s, final Class<T> clazz) {
|
||||
return StringUtils.substringBefore(s, "|").equals(clazz.getName());
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
public class DoiCleaningRule {
|
||||
|
||||
public static String clean(final String doi) {
|
||||
return doi
|
||||
.toLowerCase()
|
||||
.replaceAll("\\s", "")
|
||||
.replaceAll("^doi:", "")
|
||||
.replaceFirst(CleaningFunctions.DOI_PREFIX_REGEX, CleaningFunctions.DOI_PREFIX);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class FundRefCleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("\\d+");
|
||||
|
||||
public static String clean(final String fundRefId) {
|
||||
|
||||
String s = fundRefId
|
||||
.toLowerCase()
|
||||
.replaceAll("\\s", "");
|
||||
|
||||
Matcher m = PATTERN.matcher(s);
|
||||
if (m.find()) {
|
||||
return m.group();
|
||||
} else {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
|
||||
|
||||
import java.time.LocalDate;
|
||||
import java.time.ZoneId;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
|
@ -16,7 +18,6 @@ import com.github.sisyphsu.dateparser.DateParserUtils;
|
|||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.dhp.common.vocabulary.Vocabulary;
|
||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
|
@ -34,21 +35,139 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
public static final String TITLE_FILTER_REGEX = String.format("(%s)|\\W|\\d", TITLE_TEST);
|
||||
|
||||
public static final int TITLE_FILTER_RESIDUAL_LENGTH = 5;
|
||||
private static final String NAME_CLEANING_REGEX = "[\\r\\n\\t\\s]+";
|
||||
|
||||
public static <T extends Oaf> T cleanContext(T value, String contextId, String verifyParam) {
|
||||
if (ModelSupport.isSubClass(value, Result.class)) {
|
||||
final Result res = (Result) value;
|
||||
if (shouldCleanContext(res, verifyParam)) {
|
||||
res
|
||||
.setContext(
|
||||
res
|
||||
.getContext()
|
||||
.stream()
|
||||
.filter(c -> !StringUtils.startsWith(c.getId().toLowerCase(), contextId))
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
return (T) res;
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean shouldCleanContext(Result res, String verifyParam) {
|
||||
boolean titleMatch = res
|
||||
.getTitle()
|
||||
.stream()
|
||||
.filter(
|
||||
t -> t
|
||||
.getQualifier()
|
||||
.getClassid()
|
||||
.equalsIgnoreCase(ModelConstants.MAIN_TITLE_QUALIFIER.getClassid()))
|
||||
.anyMatch(t -> t.getValue().toLowerCase().startsWith(verifyParam.toLowerCase()));
|
||||
|
||||
return titleMatch && Objects.nonNull(res.getContext());
|
||||
}
|
||||
|
||||
public static <T extends Oaf> T cleanCountry(T value, String[] verifyParam, Set<String> hostedBy,
|
||||
String collectedfrom, String country) {
|
||||
if (ModelSupport.isSubClass(value, Result.class)) {
|
||||
final Result res = (Result) value;
|
||||
if (res.getInstance().stream().anyMatch(i -> hostedBy.contains(i.getHostedby().getKey())) ||
|
||||
!res.getCollectedfrom().stream().anyMatch(cf -> cf.getValue().equals(collectedfrom))) {
|
||||
return (T) res;
|
||||
}
|
||||
|
||||
List<StructuredProperty> ids = getPidsAndAltIds(res).collect(Collectors.toList());
|
||||
if (ids
|
||||
.stream()
|
||||
.anyMatch(
|
||||
p -> p
|
||||
.getQualifier()
|
||||
.getClassid()
|
||||
.equals(PidType.doi.toString()) && pidInParam(p.getValue(), verifyParam))) {
|
||||
res
|
||||
.setCountry(
|
||||
res
|
||||
.getCountry()
|
||||
.stream()
|
||||
.filter(
|
||||
c -> toTakeCountry(c, country))
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
return (T) res;
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
private static <T extends Result> Stream<StructuredProperty> getPidsAndAltIds(T r) {
|
||||
final Stream<StructuredProperty> resultPids = Optional
|
||||
.ofNullable(r.getPid())
|
||||
.map(Collection::stream)
|
||||
.orElse(Stream.empty());
|
||||
|
||||
final Stream<StructuredProperty> instancePids = Optional
|
||||
.ofNullable(r.getInstance())
|
||||
.map(
|
||||
instance -> instance
|
||||
.stream()
|
||||
.flatMap(
|
||||
i -> Optional
|
||||
.ofNullable(i.getPid())
|
||||
.map(Collection::stream)
|
||||
.orElse(Stream.empty())))
|
||||
.orElse(Stream.empty());
|
||||
|
||||
final Stream<StructuredProperty> instanceAltIds = Optional
|
||||
.ofNullable(r.getInstance())
|
||||
.map(
|
||||
instance -> instance
|
||||
.stream()
|
||||
.flatMap(
|
||||
i -> Optional
|
||||
.ofNullable(i.getAlternateIdentifier())
|
||||
.map(Collection::stream)
|
||||
.orElse(Stream.empty())))
|
||||
.orElse(Stream.empty());
|
||||
|
||||
return Stream
|
||||
.concat(
|
||||
Stream.concat(resultPids, instancePids),
|
||||
instanceAltIds);
|
||||
}
|
||||
|
||||
private static boolean pidInParam(String value, String[] verifyParam) {
|
||||
for (String s : verifyParam)
|
||||
if (value.startsWith(s))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
private static boolean toTakeCountry(Country c, String country) {
|
||||
// If dataInfo is not set, or dataInfo.inferenceprovenance is not set or not present then it cannot be
|
||||
// inserted via propagation
|
||||
if (!Optional.ofNullable(c.getDataInfo()).isPresent())
|
||||
return true;
|
||||
if (!Optional.ofNullable(c.getDataInfo().getInferenceprovenance()).isPresent())
|
||||
return true;
|
||||
return !(c
|
||||
.getClassid()
|
||||
.equalsIgnoreCase(country) &&
|
||||
c.getDataInfo().getInferenceprovenance().equals("propagation"));
|
||||
}
|
||||
|
||||
public static <T extends Oaf> T fixVocabularyNames(T value) {
|
||||
if (value instanceof Datasource) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Project) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Organization) {
|
||||
Organization o = (Organization) value;
|
||||
if (Objects.nonNull(o.getCountry())) {
|
||||
fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
|
||||
}
|
||||
} else if (value instanceof Relation) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Result) {
|
||||
if (value instanceof OafEntity) {
|
||||
|
||||
OafEntity e = (OafEntity) value;
|
||||
|
||||
Optional
|
||||
.ofNullable(e.getPid())
|
||||
.ifPresent(pid -> pid.forEach(p -> fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES)));
|
||||
|
||||
if (value instanceof Result) {
|
||||
Result r = (Result) value;
|
||||
|
||||
fixVocabName(r.getLanguage(), ModelConstants.DNET_LANGUAGES);
|
||||
|
@ -62,6 +181,11 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
for (Instance i : r.getInstance()) {
|
||||
fixVocabName(i.getAccessright(), ModelConstants.DNET_ACCESS_MODES);
|
||||
fixVocabName(i.getRefereed(), ModelConstants.DNET_REVIEW_LEVELS);
|
||||
Optional
|
||||
.ofNullable(i.getPid())
|
||||
.ifPresent(
|
||||
pid -> pid.forEach(p -> fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES)));
|
||||
|
||||
}
|
||||
}
|
||||
if (Objects.nonNull(r.getAuthor())) {
|
||||
|
@ -82,13 +206,26 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
} else if (value instanceof Software) {
|
||||
|
||||
}
|
||||
} else if (value instanceof Datasource) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Project) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Organization) {
|
||||
Organization o = (Organization) value;
|
||||
if (Objects.nonNull(o.getCountry())) {
|
||||
fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
|
||||
}
|
||||
|
||||
}
|
||||
} else if (value instanceof Relation) {
|
||||
// nothing to clean here
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
public static <T extends Oaf> boolean filter(T value) {
|
||||
if (Boolean.TRUE
|
||||
if (!(value instanceof Relation) && (Boolean.TRUE
|
||||
.equals(
|
||||
Optional
|
||||
.ofNullable(value)
|
||||
|
@ -99,15 +236,16 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
d -> Optional
|
||||
.ofNullable(d.getInvisible())
|
||||
.orElse(true))
|
||||
.orElse(true))
|
||||
.orElse(true))) {
|
||||
.orElse(false))
|
||||
.orElse(true)))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (value instanceof Datasource) {
|
||||
// nothing to evaluate here
|
||||
} else if (value instanceof Project) {
|
||||
// nothing to evaluate here
|
||||
final Project p = (Project) value;
|
||||
return Objects.nonNull(p.getCode()) && StringUtils.isNotBlank(p.getCode().getValue());
|
||||
} else if (value instanceof Organization) {
|
||||
// nothing to evaluate here
|
||||
} else if (value instanceof Relation) {
|
||||
|
@ -134,6 +272,14 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
}
|
||||
|
||||
public static <T extends Oaf> T cleanup(T value, VocabularyGroup vocs) {
|
||||
|
||||
if (value instanceof OafEntity) {
|
||||
|
||||
OafEntity e = (OafEntity) value;
|
||||
if (Objects.nonNull(e.getPid())) {
|
||||
e.setPid(processPidCleaning(e.getPid()));
|
||||
}
|
||||
|
||||
if (value instanceof Datasource) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Project) {
|
||||
|
@ -143,21 +289,16 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
|
||||
o.setCountry(ModelConstants.UNKNOWN_COUNTRY);
|
||||
}
|
||||
} else if (value instanceof Relation) {
|
||||
Relation r = (Relation) value;
|
||||
|
||||
Optional<String> validationDate = doCleanDate(r.getValidationDate());
|
||||
if (validationDate.isPresent()) {
|
||||
r.setValidationDate(validationDate.get());
|
||||
r.setValidated(true);
|
||||
} else {
|
||||
r.setValidationDate(null);
|
||||
r.setValidated(false);
|
||||
}
|
||||
} else if (value instanceof Result) {
|
||||
|
||||
Result r = (Result) value;
|
||||
|
||||
if (Objects.nonNull(r.getFulltext())
|
||||
&& (ModelConstants.SOFTWARE_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()) ||
|
||||
ModelConstants.DATASET_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()))) {
|
||||
r.setFulltext(null);
|
||||
|
||||
}
|
||||
|
||||
if (Objects.nonNull(r.getDateofacceptance())) {
|
||||
Optional<String> date = cleanDateField(r.getDateofacceptance());
|
||||
if (date.isPresent()) {
|
||||
|
@ -182,8 +323,18 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
if (Objects.nonNull(r.getPublisher()) && StringUtils.isBlank(r.getPublisher().getValue())) {
|
||||
if (Objects.nonNull(r.getPublisher())) {
|
||||
if (StringUtils.isBlank(r.getPublisher().getValue())) {
|
||||
r.setPublisher(null);
|
||||
} else {
|
||||
r
|
||||
.getPublisher()
|
||||
.setValue(
|
||||
r
|
||||
.getPublisher()
|
||||
.getValue()
|
||||
.replaceAll(NAME_CLEANING_REGEX, " "));
|
||||
}
|
||||
}
|
||||
if (Objects.isNull(r.getLanguage()) || StringUtils.isBlank(r.getLanguage().getClassid())) {
|
||||
r
|
||||
|
@ -191,8 +342,8 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
qualifier("und", "Undetermined", ModelConstants.DNET_LANGUAGES));
|
||||
}
|
||||
if (Objects.nonNull(r.getSubject())) {
|
||||
r
|
||||
.setSubject(
|
||||
List<Subject> subjects = Lists
|
||||
.newArrayList(
|
||||
r
|
||||
.getSubject()
|
||||
.stream()
|
||||
|
@ -200,8 +351,26 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
||||
.filter(sp -> Objects.nonNull(sp.getQualifier()))
|
||||
.filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
|
||||
.map(s -> {
|
||||
if ("dnet:result_subject".equals(s.getQualifier().getClassid())) {
|
||||
s.getQualifier().setClassid(ModelConstants.DNET_SUBJECT_TYPOLOGIES);
|
||||
s.getQualifier().setClassname(ModelConstants.DNET_SUBJECT_TYPOLOGIES);
|
||||
}
|
||||
return s;
|
||||
})
|
||||
.map(GraphCleaningFunctions::cleanValue)
|
||||
.collect(Collectors.toList()));
|
||||
.collect(
|
||||
Collectors
|
||||
.toMap(
|
||||
s -> Optional
|
||||
.ofNullable(s.getQualifier())
|
||||
.map(q -> q.getClassid() + s.getValue())
|
||||
.orElse(s.getValue()),
|
||||
Function.identity(),
|
||||
(s1, s2) -> Collections
|
||||
.min(Lists.newArrayList(s1, s2), new SubjectProvenanceComparator())))
|
||||
.values());
|
||||
r.setSubject(subjects);
|
||||
}
|
||||
if (Objects.nonNull(r.getTitle())) {
|
||||
r
|
||||
|
@ -250,9 +419,6 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.map(GraphCleaningFunctions::cleanValue)
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
if (Objects.nonNull(r.getPid())) {
|
||||
r.setPid(processPidCleaning(r.getPid()));
|
||||
}
|
||||
if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
|
||||
r
|
||||
.setResourcetype(
|
||||
|
@ -261,13 +427,15 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
if (Objects.nonNull(r.getInstance())) {
|
||||
|
||||
for (Instance i : r.getInstance()) {
|
||||
if (!vocs.termExists(ModelConstants.DNET_PUBLICATION_RESOURCE, i.getInstancetype().getClassid())) {
|
||||
if (!vocs
|
||||
.termExists(ModelConstants.DNET_PUBLICATION_RESOURCE, i.getInstancetype().getClassid())) {
|
||||
if (r instanceof Publication) {
|
||||
i
|
||||
.setInstancetype(
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
"0038", "Other literature type", ModelConstants.DNET_PUBLICATION_RESOURCE,
|
||||
"0038", "Other literature type",
|
||||
ModelConstants.DNET_PUBLICATION_RESOURCE,
|
||||
ModelConstants.DNET_PUBLICATION_RESOURCE));
|
||||
} else if (r instanceof Dataset) {
|
||||
i
|
||||
|
@ -311,7 +479,8 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
});
|
||||
});
|
||||
|
||||
if (Objects.isNull(i.getAccessright()) || StringUtils.isBlank(i.getAccessright().getClassid())) {
|
||||
if (Objects.isNull(i.getAccessright())
|
||||
|| StringUtils.isBlank(i.getAccessright().getClassid())) {
|
||||
i
|
||||
.setAccessright(
|
||||
accessRight(
|
||||
|
@ -321,7 +490,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
if (Objects.isNull(i.getHostedby()) || StringUtils.isBlank(i.getHostedby().getKey())) {
|
||||
i.setHostedby(ModelConstants.UNKNOWN_REPOSITORY);
|
||||
}
|
||||
if (Objects.isNull(i.getRefereed())) {
|
||||
if (Objects.isNull(i.getRefereed()) || StringUtils.isBlank(i.getRefereed().getClassid())) {
|
||||
i.setRefereed(qualifier("0000", "Unknown", ModelConstants.DNET_REVIEW_LEVELS));
|
||||
}
|
||||
if (Objects.nonNull(i.getDateofacceptance())) {
|
||||
|
@ -332,9 +501,15 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
i.setDateofacceptance(null);
|
||||
}
|
||||
}
|
||||
if (StringUtils.isNotBlank(i.getFulltext()) &&
|
||||
(ModelConstants.SOFTWARE_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()) ||
|
||||
ModelConstants.DATASET_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()))) {
|
||||
i.setFulltext(null);
|
||||
}
|
||||
}
|
||||
if (Objects.isNull(r.getBestaccessright()) || StringUtils.isBlank(r.getBestaccessright().getClassid())) {
|
||||
}
|
||||
if (Objects.isNull(r.getBestaccessright())
|
||||
|| StringUtils.isBlank(r.getBestaccessright().getClassid())) {
|
||||
Qualifier bestaccessrights = OafMapperUtils.createBestAccessRights(r.getInstance());
|
||||
if (Objects.isNull(bestaccessrights)) {
|
||||
r
|
||||
|
@ -355,6 +530,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.filter(Objects::nonNull)
|
||||
.filter(a -> StringUtils.isNotBlank(a.getFullname()))
|
||||
.filter(a -> StringUtils.isNotBlank(a.getFullname().replaceAll("[\\W]", "")))
|
||||
.map(GraphCleaningFunctions::cleanupAuthor)
|
||||
.collect(Collectors.toList()));
|
||||
|
||||
boolean nullRank = r
|
||||
|
@ -382,14 +558,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.filter(p -> StringUtils.isNotBlank(p.getValue()))
|
||||
.map(p -> {
|
||||
// hack to distinguish orcid from orcid_pending
|
||||
String pidProvenance = Optional
|
||||
.ofNullable(p.getDataInfo())
|
||||
.map(
|
||||
d -> Optional
|
||||
.ofNullable(d.getProvenanceaction())
|
||||
.map(Qualifier::getClassid)
|
||||
.orElse(""))
|
||||
.orElse("");
|
||||
String pidProvenance = getProvenance(p.getDataInfo());
|
||||
if (p
|
||||
.getQualifier()
|
||||
.getClassid()
|
||||
|
@ -437,11 +606,54 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
} else if (value instanceof Software) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} else if (value instanceof Relation) {
|
||||
Relation r = (Relation) value;
|
||||
|
||||
Optional<String> validationDate = doCleanDate(r.getValidationDate());
|
||||
if (validationDate.isPresent()) {
|
||||
r.setValidationDate(validationDate.get());
|
||||
r.setValidated(true);
|
||||
} else {
|
||||
r.setValidationDate(null);
|
||||
r.setValidated(false);
|
||||
}
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
private static Author cleanupAuthor(Author author) {
|
||||
if (StringUtils.isNotBlank(author.getFullname())) {
|
||||
author
|
||||
.setFullname(
|
||||
author
|
||||
.getFullname()
|
||||
.replaceAll(NAME_CLEANING_REGEX, " ")
|
||||
.replace("\"", "\\\""));
|
||||
}
|
||||
if (StringUtils.isNotBlank(author.getName())) {
|
||||
author
|
||||
.setName(
|
||||
author
|
||||
.getName()
|
||||
.replaceAll(NAME_CLEANING_REGEX, " ")
|
||||
.replace("\"", "\\\""));
|
||||
}
|
||||
if (StringUtils.isNotBlank(author.getSurname())) {
|
||||
author
|
||||
.setSurname(
|
||||
author
|
||||
.getSurname()
|
||||
.replaceAll(NAME_CLEANING_REGEX, " ")
|
||||
.replace("\"", "\\\""));
|
||||
}
|
||||
|
||||
return author;
|
||||
}
|
||||
|
||||
private static Optional<String> cleanDateField(Field<String> dateofacceptance) {
|
||||
return Optional
|
||||
.ofNullable(dateofacceptance)
|
||||
|
@ -491,7 +703,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.filter(sp -> !PID_BLACKLIST.contains(sp.getValue().trim().toLowerCase()))
|
||||
.filter(sp -> Objects.nonNull(sp.getQualifier()))
|
||||
.filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
|
||||
.map(CleaningFunctions::normalizePidValue)
|
||||
.map(PidCleaner::normalizePidValue)
|
||||
.filter(CleaningFunctions::pidFilter)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
@ -520,6 +732,11 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
return s;
|
||||
}
|
||||
|
||||
protected static Subject cleanValue(Subject s) {
|
||||
s.setValue(s.getValue().replaceAll(CLEANING_REGEX, " "));
|
||||
return s;
|
||||
}
|
||||
|
||||
protected static Field<String> cleanValue(Field<String> s) {
|
||||
s.setValue(s.getValue().replaceAll(CLEANING_REGEX, " "));
|
||||
return s;
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class GridCleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("(?<grid>\\d{4,6}\\.[0-9a-z]{1,2})");
|
||||
|
||||
public static String clean(String grid) {
|
||||
String s = grid
|
||||
.replaceAll("\\s", "")
|
||||
.toLowerCase();
|
||||
|
||||
Matcher m = PATTERN.matcher(s);
|
||||
if (m.find()) {
|
||||
return "grid." + m.group("grid");
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
// https://www.wikidata.org/wiki/Property:P213
|
||||
public class ISNICleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("([0]{4}) ?([0-9]{4}) ?([0-9]{4}) ?([0-9]{3}[0-9X])");
|
||||
|
||||
public static String clean(final String isni) {
|
||||
|
||||
Matcher m = PATTERN.matcher(isni);
|
||||
if (m.find()) {
|
||||
return String.join("", m.group(1), m.group(2), m.group(3), m.group(4));
|
||||
} else {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,6 +14,7 @@ import java.util.stream.Collectors;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
|
||||
|
@ -141,7 +142,7 @@ public class OafMapperUtils {
|
|||
}
|
||||
|
||||
public static Qualifier unknown(final String schemeid, final String schemename) {
|
||||
return qualifier("UNKNOWN", "Unknown", schemeid, schemename);
|
||||
return qualifier(UNKNOWN, "Unknown", schemeid, schemename);
|
||||
}
|
||||
|
||||
public static AccessRight accessRight(
|
||||
|
@ -189,6 +190,17 @@ public class OafMapperUtils {
|
|||
return q;
|
||||
}
|
||||
|
||||
public static Subject subject(
|
||||
final String value,
|
||||
final String classid,
|
||||
final String classname,
|
||||
final String schemeid,
|
||||
final String schemename,
|
||||
final DataInfo dataInfo) {
|
||||
|
||||
return subject(value, qualifier(classid, classname, schemeid, schemename), dataInfo);
|
||||
}
|
||||
|
||||
public static StructuredProperty structuredProperty(
|
||||
final String value,
|
||||
final String classid,
|
||||
|
@ -200,6 +212,20 @@ public class OafMapperUtils {
|
|||
return structuredProperty(value, qualifier(classid, classname, schemeid, schemename), dataInfo);
|
||||
}
|
||||
|
||||
public static Subject subject(
|
||||
final String value,
|
||||
final Qualifier qualifier,
|
||||
final DataInfo dataInfo) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
final Subject s = new Subject();
|
||||
s.setValue(value);
|
||||
s.setQualifier(qualifier);
|
||||
s.setDataInfo(dataInfo);
|
||||
return s;
|
||||
}
|
||||
|
||||
public static StructuredProperty structuredProperty(
|
||||
final String value,
|
||||
final Qualifier qualifier,
|
||||
|
@ -477,4 +503,15 @@ public class OafMapperUtils {
|
|||
rel.setProperties(properties);
|
||||
return rel;
|
||||
}
|
||||
|
||||
public static String getProvenance(DataInfo dataInfo) {
|
||||
return Optional
|
||||
.ofNullable(dataInfo)
|
||||
.map(
|
||||
d -> Optional
|
||||
.ofNullable(d.getProvenanceaction())
|
||||
.map(Qualifier::getClassid)
|
||||
.orElse(""))
|
||||
.orElse("");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class PICCleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("\\d{9}");
|
||||
|
||||
public static String clean(final String pic) {
|
||||
|
||||
Matcher m = PATTERN.matcher(pic);
|
||||
if (m.find()) {
|
||||
return m.group();
|
||||
} else {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
|
||||
public class PidCleaner {
|
||||
|
||||
/**
|
||||
* Utility method that normalises PID values on a per-type basis.
|
||||
* @param pid the PID whose value will be normalised.
|
||||
* @return the PID containing the normalised value.
|
||||
*/
|
||||
public static StructuredProperty normalizePidValue(StructuredProperty pid) {
|
||||
pid
|
||||
.setValue(
|
||||
normalizePidValue(
|
||||
pid.getQualifier().getClassid(),
|
||||
pid.getValue()));
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
public static String normalizePidValue(String pidType, String pidValue) {
|
||||
String value = Optional
|
||||
.ofNullable(pidValue)
|
||||
.map(String::trim)
|
||||
.orElseThrow(() -> new IllegalArgumentException("PID value cannot be empty"));
|
||||
|
||||
switch (pidType) {
|
||||
|
||||
// TODO add cleaning for more PID types as needed
|
||||
|
||||
// Result
|
||||
case "doi":
|
||||
return DoiCleaningRule.clean(value);
|
||||
case "pmid":
|
||||
return PmidCleaningRule.clean(value);
|
||||
case "pmc":
|
||||
return PmcCleaningRule.clean(value);
|
||||
case "handle":
|
||||
case "arXiv":
|
||||
return value;
|
||||
|
||||
// Organization
|
||||
case "GRID":
|
||||
return GridCleaningRule.clean(value);
|
||||
case "ISNI":
|
||||
return ISNICleaningRule.clean(value);
|
||||
case "ROR":
|
||||
return RorCleaningRule.clean(value);
|
||||
case "PIC":
|
||||
return PICCleaningRule.clean(value);
|
||||
case "FundRef":
|
||||
return FundRefCleaningRule.clean(value);
|
||||
default:
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class PmcCleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("PMC\\d{1,8}");
|
||||
|
||||
public static String clean(String pmc) {
|
||||
String s = pmc
|
||||
.replaceAll("\\s", "")
|
||||
.toUpperCase();
|
||||
|
||||
final Matcher m = PATTERN.matcher(s);
|
||||
|
||||
if (m.find()) {
|
||||
return m.group();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
// https://researchguides.stevens.edu/c.php?g=442331&p=6577176
|
||||
public class PmidCleaningRule {
|
||||
|
||||
public static final Pattern PATTERN = Pattern.compile("[1-9]{1,8}");
|
||||
|
||||
public static String clean(String pmid) {
|
||||
String s = pmid
|
||||
.toLowerCase()
|
||||
.replaceAll("\\s", "");
|
||||
|
||||
final Matcher m = PATTERN.matcher(s);
|
||||
|
||||
if (m.find()) {
|
||||
return m.group();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
// https://ror.readme.io/docs/ror-identifier-pattern
|
||||
public class RorCleaningRule {
|
||||
|
||||
public static final String ROR_PREFIX = "https://ror.org/";
|
||||
|
||||
private static final Pattern PATTERN = Pattern.compile("(?<ror>0[a-hj-km-np-tv-z|0-9]{6}[0-9]{2})");
|
||||
|
||||
public static String clean(String ror) {
|
||||
String s = ror
|
||||
.replaceAll("\\s", "")
|
||||
.toLowerCase();
|
||||
|
||||
Matcher m = PATTERN.matcher(s);
|
||||
|
||||
if (m.find()) {
|
||||
return ROR_PREFIX + m.group("ror");
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
|
||||
import java.util.Comparator;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Subject;
|
||||
|
||||
public class SubjectProvenanceComparator implements Comparator<Subject> {
|
||||
|
||||
@Override
|
||||
public int compare(Subject left, Subject right) {
|
||||
|
||||
String lProv = getProvenance(left.getDataInfo());
|
||||
String rProv = getProvenance(right.getDataInfo());
|
||||
|
||||
if (isBlank(lProv) && isBlank(rProv))
|
||||
return 0;
|
||||
if (isBlank(lProv))
|
||||
return 1;
|
||||
if (isBlank(rProv))
|
||||
return -1;
|
||||
if (lProv.equals(rProv))
|
||||
return 0;
|
||||
if (lProv.toLowerCase().contains("crosswalk"))
|
||||
return -1;
|
||||
if (rProv.toLowerCase().contains("crosswalk"))
|
||||
return 1;
|
||||
if (lProv.toLowerCase().contains("user"))
|
||||
return -1;
|
||||
if (rProv.toLowerCase().contains("user"))
|
||||
return 1;
|
||||
if (lProv.toLowerCase().contains("propagation"))
|
||||
return -1;
|
||||
if (rProv.toLowerCase().contains("propagation"))
|
||||
return 1;
|
||||
if (lProv.toLowerCase().contains("iis"))
|
||||
return -1;
|
||||
if (rProv.toLowerCase().contains("iis"))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
|
@ -23,12 +23,6 @@
|
|||
"paramLongName":"splitSize",
|
||||
"paramDescription": "the maximum size of the archive",
|
||||
"paramRequired": false
|
||||
},
|
||||
{
|
||||
"paramName":"rn",
|
||||
"paramLongName":"rename",
|
||||
"paramDescription": "if the file has to be renamed",
|
||||
"paramRequired": false
|
||||
}
|
||||
]
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "c",
|
||||
"paramLongName": "graphTableClassName",
|
||||
"paramDescription": "the graph entity class name",
|
||||
"paramName": "fi",
|
||||
"paramLongName": "filterInvisible",
|
||||
"paramDescription": "if true filters out invisible entities",
|
||||
"paramRequired": true
|
||||
}
|
||||
]
|
|
@ -0,0 +1,10 @@
|
|||
package eu.dnetlib.dhp.application.dedup.log
|
||||
|
||||
case class DedupLogModel(
|
||||
tag: String,
|
||||
configuration: String,
|
||||
entity: String,
|
||||
startTS: Long,
|
||||
endTS: Long,
|
||||
totalMs: Long
|
||||
) {}
|
|
@ -0,0 +1,14 @@
|
|||
package eu.dnetlib.dhp.application.dedup.log
|
||||
|
||||
import org.apache.spark.sql.{SaveMode, SparkSession}
|
||||
|
||||
class DedupLogWriter(path: String) {
|
||||
|
||||
def appendLog(dedupLogModel: DedupLogModel, spark: SparkSession): Unit = {
|
||||
import spark.implicits._
|
||||
val df = spark.createDataset[DedupLogModel](data = List(dedupLogModel))
|
||||
df.write.mode(SaveMode.Append).save(path)
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
package eu.dnetlib.dhp.common;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
public class MdStoreClientTest {
|
||||
|
||||
// @Test
|
||||
public void testMongoCollection() throws IOException {
|
||||
final MdstoreClient client = new MdstoreClient("mongodb://localhost:27017", "mdstore");
|
||||
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
|
||||
final List<MDStoreInfo> infos = client.mdStoreWithTimestamp("ODF", "store", "cleaned");
|
||||
|
||||
infos.forEach(System.out::println);
|
||||
|
||||
final String s = mapper.writeValueAsString(infos);
|
||||
|
||||
Path fileName = Paths.get("/Users/sandro/mdstore_info.json");
|
||||
|
||||
// Writing into the file
|
||||
Files.write(fileName, s.getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
}
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
|
||||
package eu.dnetlib.dhp.oa.merge;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Author;
|
||||
import eu.dnetlib.dhp.schema.oaf.Publication;
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||
import scala.Tuple2;
|
||||
|
||||
class AuthorMergerTest {
|
||||
|
||||
private String publicationsBasePath;
|
||||
|
||||
private List<List<Author>> authors;
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws Exception {
|
||||
|
||||
publicationsBasePath = Paths
|
||||
.get(AuthorMergerTest.class.getResource("/eu/dnetlib/dhp/oa/merge").toURI())
|
||||
.toFile()
|
||||
.getAbsolutePath();
|
||||
|
||||
authors = readSample(publicationsBasePath + "/publications_with_authors.json", Publication.class)
|
||||
.stream()
|
||||
.map(p -> p._2().getAuthor())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void mergeTest() { // used in the dedup: threshold set to 0.95
|
||||
|
||||
for (List<Author> authors1 : authors) {
|
||||
System.out.println("List " + (authors.indexOf(authors1) + 1));
|
||||
for (Author author : authors1) {
|
||||
System.out.println(authorToString(author));
|
||||
}
|
||||
}
|
||||
|
||||
List<Author> merge = AuthorMerger.merge(authors);
|
||||
|
||||
System.out.println("Merge ");
|
||||
for (Author author : merge) {
|
||||
System.out.println(authorToString(author));
|
||||
}
|
||||
|
||||
Assertions.assertEquals(7, merge.size());
|
||||
|
||||
}
|
||||
|
||||
public <T> List<Tuple2<String, T>> readSample(String path, Class<T> clazz) {
|
||||
List<Tuple2<String, T>> res = new ArrayList<>();
|
||||
BufferedReader reader;
|
||||
try {
|
||||
reader = new BufferedReader(new FileReader(path));
|
||||
String line = reader.readLine();
|
||||
while (line != null) {
|
||||
res
|
||||
.add(
|
||||
new Tuple2<>(
|
||||
MapDocumentUtil.getJPathString("$.id", line),
|
||||
new ObjectMapper().readValue(line, clazz)));
|
||||
// read next line
|
||||
line = reader.readLine();
|
||||
}
|
||||
reader.close();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
public String authorToString(Author a) {
|
||||
|
||||
String print = "Fullname = ";
|
||||
print += a.getFullname() + " pid = [";
|
||||
if (a.getPid() != null)
|
||||
for (StructuredProperty sp : a.getPid()) {
|
||||
print += sp.toComparableString() + " ";
|
||||
}
|
||||
print += "]";
|
||||
return print;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class GridCleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("grid.493784.5", GridCleaningRule.clean("grid.493784.5"));
|
||||
assertEquals("grid.493784.5x", GridCleaningRule.clean("grid.493784.5x"));
|
||||
assertEquals("grid.493784.5x", GridCleaningRule.clean("493784.5x"));
|
||||
assertEquals("", GridCleaningRule.clean("493x784.5x"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class ISNICleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("0000000463436020", ISNICleaningRule.clean("0000 0004 6343 6020"));
|
||||
assertEquals("0000000463436020", ISNICleaningRule.clean("0000000463436020"));
|
||||
assertEquals("", ISNICleaningRule.clean("Q30256598"));
|
||||
assertEquals("0000000493403529", ISNICleaningRule.clean("ISNI:0000000493403529"));
|
||||
assertEquals("000000008614884X", ISNICleaningRule.clean("0000 0000 8614 884X"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class PICCleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("887624982", PICCleaningRule.clean("887624982"));
|
||||
assertEquals("", PICCleaningRule.clean("887 624982"));
|
||||
assertEquals("887624982", PICCleaningRule.clean(" 887624982 "));
|
||||
assertEquals("887624982", PICCleaningRule.clean(" 887624982x "));
|
||||
assertEquals("887624982", PICCleaningRule.clean(" 88762498200 "));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class PmcCleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("PMC1234", PmcCleaningRule.clean("PMC1234"));
|
||||
assertEquals("PMC1234", PmcCleaningRule.clean(" PMC1234"));
|
||||
assertEquals("PMC12345678", PmcCleaningRule.clean("PMC12345678"));
|
||||
assertEquals("PMC12345678", PmcCleaningRule.clean("PMC123456789"));
|
||||
assertEquals("PMC12345678", PmcCleaningRule.clean("PMC 12345678"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class PmidCleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("1234", PmidCleaningRule.clean("01234"));
|
||||
assertEquals("1234567", PmidCleaningRule.clean("0123 4567"));
|
||||
assertEquals("123", PmidCleaningRule.clean("0123x4567"));
|
||||
assertEquals("", PmidCleaningRule.clean("abc"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class RorCleaningRuleTest {
|
||||
|
||||
@Test
|
||||
void testCleaning() {
|
||||
assertEquals("https://ror.org/05rpz9w55", RorCleaningRule.clean("https://ror.org/05rpz9w55"));
|
||||
assertEquals("https://ror.org/05rpz9w55", RorCleaningRule.clean("05rpz9w55"));
|
||||
assertEquals("", RorCleaningRule.clean("05rpz9w_55"));
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,110 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp</artifactId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-pace-core</artifactId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>net.alchim31.maven</groupId>
|
||||
<artifactId>scala-maven-plugin</artifactId>
|
||||
<version>${net.alchim31.maven.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>scala-compile-first</id>
|
||||
<phase>initialize</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
<goal>compile</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>scala-test-compile</id>
|
||||
<phase>process-test-resources</phase>
|
||||
<goals>
|
||||
<goal>testCompile</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<failOnMultipleScalaVersions>true</failOnMultipleScalaVersions>
|
||||
<scalaCompatVersion>${scala.binary.version}</scalaCompatVersion>
|
||||
<scalaVersion>${scala.version}</scalaVersion>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>edu.cmu</groupId>
|
||||
<artifactId>secondstring</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.gson</groupId>
|
||||
<artifactId>gson</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>stringtemplate</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.reflections</groupId>
|
||||
<artifactId>reflections</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.jayway.jsonpath</groupId>
|
||||
<artifactId>json-path</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.ibm.icu</groupId>
|
||||
<artifactId>icu4j</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-sql_${scala.binary.version}</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,46 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
public abstract class AbstractClusteringFunction extends AbstractPaceFunctions implements ClusteringFunction {
|
||||
|
||||
protected Map<String, Integer> params;
|
||||
|
||||
public AbstractClusteringFunction(final Map<String, Integer> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
protected abstract Collection<String> doApply(Config conf, String s);
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(Config conf, List<String> fields) {
|
||||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(this::normalize)
|
||||
.map(s -> filterAllStopWords(s))
|
||||
.map(s -> doApply(conf, s))
|
||||
.map(c -> filterBlacklisted(c, ngramBlacklist))
|
||||
.flatMap(c -> c.stream())
|
||||
.filter(StringUtils::isNotBlank)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
}
|
||||
|
||||
public Map<String, Integer> getParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
protected Integer param(String name) {
|
||||
return params.get(name);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("acronyms")
|
||||
public class Acronyms extends AbstractClusteringFunction {
|
||||
|
||||
public Acronyms(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return extractAcronyms(s, param("max"), param("minLen"), param("maxLen"));
|
||||
}
|
||||
|
||||
private Set<String> extractAcronyms(final String s, int maxAcronyms, int minLen, int maxLen) {
|
||||
|
||||
final Set<String> acronyms = Sets.newLinkedHashSet();
|
||||
|
||||
for (int i = 0; i < maxAcronyms; i++) {
|
||||
|
||||
final StringTokenizer st = new StringTokenizer(s);
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
|
||||
while (st.hasMoreTokens()) {
|
||||
final String token = st.nextToken();
|
||||
if (sb.length() > maxLen) {
|
||||
break;
|
||||
}
|
||||
if (token.length() > 1 && i < token.length()) {
|
||||
sb.append(token.charAt(i));
|
||||
}
|
||||
}
|
||||
String acronym = sb.toString();
|
||||
if (acronym.length() > minLen) {
|
||||
acronyms.add(acronym);
|
||||
}
|
||||
}
|
||||
return acronyms;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface ClusteringClass {
|
||||
|
||||
public String value();
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
public interface ClusteringFunction {
|
||||
|
||||
public Collection<String> apply(Config config, List<String> fields);
|
||||
|
||||
public Map<String, Integer> getParams();
|
||||
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("immutablefieldvalue")
|
||||
public class ImmutableFieldValue extends AbstractClusteringFunction {
|
||||
|
||||
public ImmutableFieldValue(final Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, final String s) {
|
||||
final List<String> res = Lists.newArrayList();
|
||||
|
||||
res.add(s);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("keywordsclustering")
|
||||
public class KeywordsClustering extends AbstractClusteringFunction {
|
||||
|
||||
public KeywordsClustering(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, String s) {
|
||||
|
||||
// takes city codes and keywords codes without duplicates
|
||||
Set<String> keywords = getKeywords(s, conf.translationMap(), params.getOrDefault("windowSize", 4));
|
||||
Set<String> cities = getCities(s, params.getOrDefault("windowSize", 4));
|
||||
|
||||
// list of combination to return as result
|
||||
final Collection<String> combinations = new LinkedHashSet<String>();
|
||||
|
||||
for (String keyword : keywordsToCodes(keywords, conf.translationMap())) {
|
||||
for (String city : citiesToCodes(cities)) {
|
||||
combinations.add(keyword + "-" + city);
|
||||
if (combinations.size() >= params.getOrDefault("max", 2)) {
|
||||
return combinations;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return combinations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(final Config conf, List<String> fields) {
|
||||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(this::cleanup)
|
||||
.map(this::normalize)
|
||||
.map(s -> filterAllStopWords(s))
|
||||
.map(s -> doApply(conf, s))
|
||||
.map(c -> filterBlacklisted(c, ngramBlacklist))
|
||||
.flatMap(c -> c.stream())
|
||||
.filter(StringUtils::isNotBlank)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
|
||||
@ClusteringClass("lnfi")
|
||||
public class LastNameFirstInitial extends AbstractClusteringFunction {
|
||||
|
||||
private boolean DEFAULT_AGGRESSIVE = true;
|
||||
|
||||
public LastNameFirstInitial(final Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(Config conf, List<String> fields) {
|
||||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(this::normalize)
|
||||
.map(s -> doApply(conf, s))
|
||||
.map(c -> filterBlacklisted(c, ngramBlacklist))
|
||||
.flatMap(c -> c.stream())
|
||||
.filter(StringUtils::isNotBlank)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String normalize(final String s) {
|
||||
return fixAliases(transliterate(nfd(unicodeNormalization(s))))
|
||||
// do not compact the regexes in a single expression, would cause StackOverflowError in case of large input
|
||||
// strings
|
||||
.replaceAll("[^ \\w]+", "")
|
||||
.replaceAll("(\\p{InCombiningDiacriticalMarks})+", "")
|
||||
.replaceAll("(\\p{Punct})+", " ")
|
||||
.replaceAll("(\\d)+", " ")
|
||||
.replaceAll("(\\n)+", " ")
|
||||
.trim();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, final String s) {
|
||||
|
||||
final List<String> res = Lists.newArrayList();
|
||||
|
||||
final boolean aggressive = (Boolean) (getParams().containsKey("aggressive") ? getParams().get("aggressive")
|
||||
: DEFAULT_AGGRESSIVE);
|
||||
|
||||
Person p = new Person(s, aggressive);
|
||||
|
||||
if (p.isAccurate()) {
|
||||
String lastName = p.getNormalisedSurname().toLowerCase();
|
||||
String firstInitial = p.getNormalisedFirstName().toLowerCase().substring(0, 1);
|
||||
|
||||
res.add(firstInitial.concat(lastName));
|
||||
} else { // is not accurate, meaning it has no defined name and surname
|
||||
List<String> fullname = Arrays.asList(p.getNormalisedFullname().split(" "));
|
||||
if (fullname.size() == 1) {
|
||||
res.add(p.getNormalisedFullname().toLowerCase());
|
||||
} else if (fullname.size() == 2) {
|
||||
res.add(fullname.get(0).substring(0, 1).concat(fullname.get(1)).toLowerCase());
|
||||
res.add(fullname.get(1).substring(0, 1).concat(fullname.get(0)).toLowerCase());
|
||||
} else {
|
||||
res.add(fullname.get(0).substring(0, 1).concat(fullname.get(fullname.size() - 1)).toLowerCase());
|
||||
res.add(fullname.get(fullname.size() - 1).substring(0, 1).concat(fullname.get(0)).toLowerCase());
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("lowercase")
|
||||
public class LowercaseClustering extends AbstractClusteringFunction {
|
||||
|
||||
public LowercaseClustering(final Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(Config conf, List<String> fields) {
|
||||
Collection<String> c = Sets.newLinkedHashSet();
|
||||
for (String f : fields) {
|
||||
c.addAll(doApply(conf, f));
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, final String s) {
|
||||
if (StringUtils.isBlank(s)) {
|
||||
return Lists.newArrayList();
|
||||
}
|
||||
return Lists.newArrayList(s.toLowerCase().trim());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
|
||||
public class NGramUtils extends AbstractPaceFunctions {
|
||||
static private final NGramUtils NGRAMUTILS = new NGramUtils();
|
||||
|
||||
private static final int SIZE = 100;
|
||||
|
||||
private static final Set<String> stopwords = AbstractPaceFunctions
|
||||
.loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
|
||||
|
||||
public static String cleanupForOrdering(String s) {
|
||||
return (NGRAMUTILS.filterStopWords(NGRAMUTILS.normalize(s), stopwords) + StringUtils.repeat(" ", SIZE))
|
||||
.substring(0, SIZE)
|
||||
.replaceAll(" ", "");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("ngrampairs")
|
||||
public class NgramPairs extends Ngrams {
|
||||
|
||||
public NgramPairs(Map<String, Integer> params) {
|
||||
super(params, false);
|
||||
}
|
||||
|
||||
public NgramPairs(Map<String, Integer> params, boolean sorted) {
|
||||
super(params, sorted);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return ngramPairs(Lists.newArrayList(getNgrams(s, param("ngramLen"), param("max") * 2, 1, 2)), param("max"));
|
||||
}
|
||||
|
||||
protected Collection<String> ngramPairs(final List<String> ngrams, int maxNgrams) {
|
||||
Collection<String> res = Lists.newArrayList();
|
||||
int j = 0;
|
||||
for (int i = 0; i < ngrams.size() && res.size() < maxNgrams; i++) {
|
||||
if (++j >= ngrams.size()) {
|
||||
break;
|
||||
}
|
||||
res.add(ngrams.get(i) + ngrams.get(j));
|
||||
// System.out.println("-- " + concatNgrams);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("ngrams")
|
||||
public class Ngrams extends AbstractClusteringFunction {
|
||||
|
||||
private final boolean sorted;
|
||||
|
||||
public Ngrams(Map<String, Integer> params) {
|
||||
this(params, false);
|
||||
}
|
||||
|
||||
public Ngrams(Map<String, Integer> params, boolean sorted) {
|
||||
super(params);
|
||||
this.sorted = sorted;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return getNgrams(s, param("ngramLen"), param("max"), param("maxPerToken"), param("minNgramLen"));
|
||||
}
|
||||
|
||||
protected Collection<String> getNgrams(String s, int ngramLen, int max, int maxPerToken, int minNgramLen) {
|
||||
|
||||
final Collection<String> ngrams = sorted ? new TreeSet<>() : new LinkedHashSet<String>();
|
||||
final StringTokenizer st = new StringTokenizer(s);
|
||||
|
||||
while (st.hasMoreTokens()) {
|
||||
final String token = st.nextToken();
|
||||
if (!token.isEmpty()) {
|
||||
for (int i = 0; i < maxPerToken && ngramLen + i <= token.length(); i++) {
|
||||
String ngram = token.substring(i, Math.min(ngramLen + i, token.length())).trim();
|
||||
|
||||
if (ngram.length() >= minNgramLen) {
|
||||
ngrams.add(ngram);
|
||||
|
||||
if (ngrams.size() >= max) {
|
||||
return ngrams;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// System.out.println(ngrams + " n: " + ngrams.size());
|
||||
return ngrams;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
|
||||
@ClusteringClass("personClustering")
|
||||
public class PersonClustering extends AbstractPaceFunctions implements ClusteringFunction {
|
||||
|
||||
private Map<String, Integer> params;
|
||||
|
||||
private static final int MAX_TOKENS = 5;
|
||||
|
||||
public PersonClustering(final Map<String, Integer> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(final Config conf, final List<String> fields) {
|
||||
final Set<String> hashes = Sets.newHashSet();
|
||||
|
||||
for (final String f : fields) {
|
||||
|
||||
final Person person = new Person(f, false);
|
||||
|
||||
if (StringUtils.isNotBlank(person.getNormalisedFirstName())
|
||||
&& StringUtils.isNotBlank(person.getNormalisedSurname())) {
|
||||
hashes.add(firstLC(person.getNormalisedFirstName()) + person.getNormalisedSurname().toLowerCase());
|
||||
} else {
|
||||
for (final String token1 : tokens(f, MAX_TOKENS)) {
|
||||
for (final String token2 : tokens(f, MAX_TOKENS)) {
|
||||
if (!token1.equals(token2)) {
|
||||
hashes.add(firstLC(token1) + token2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hashes;
|
||||
}
|
||||
|
||||
// @Override
|
||||
// public Collection<String> apply(final List<Field> fields) {
|
||||
// final Set<String> hashes = Sets.newHashSet();
|
||||
//
|
||||
// for (final Field f : fields) {
|
||||
//
|
||||
// final GTAuthor gta = GTAuthor.fromOafJson(f.stringValue());
|
||||
//
|
||||
// final Author a = gta.getAuthor();
|
||||
//
|
||||
// if (StringUtils.isNotBlank(a.getFirstname()) && StringUtils.isNotBlank(a.getSecondnames())) {
|
||||
// hashes.add(firstLC(a.getFirstname()) + a.getSecondnames().toLowerCase());
|
||||
// } else {
|
||||
// for (final String token1 : tokens(f.stringValue(), MAX_TOKENS)) {
|
||||
// for (final String token2 : tokens(f.stringValue(), MAX_TOKENS)) {
|
||||
// if (!token1.equals(token2)) {
|
||||
// hashes.add(firstLC(token1) + token2);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return hashes;
|
||||
// }
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
|
||||
@ClusteringClass("personHash")
|
||||
public class PersonHash extends AbstractClusteringFunction {
|
||||
|
||||
private boolean DEFAULT_AGGRESSIVE = false;
|
||||
|
||||
public PersonHash(final Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, final String s) {
|
||||
final List<String> res = Lists.newArrayList();
|
||||
|
||||
final boolean aggressive = (Boolean) (getParams().containsKey("aggressive") ? getParams().get("aggressive")
|
||||
: DEFAULT_AGGRESSIVE);
|
||||
|
||||
res.add(new Person(s, aggressive).hash());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
public class RandomClusteringFunction extends AbstractClusteringFunction {
|
||||
|
||||
public RandomClusteringFunction(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, String s) {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("sortedngrampairs")
|
||||
public class SortedNgramPairs extends NgramPairs {
|
||||
|
||||
public SortedNgramPairs(Map<String, Integer> params) {
|
||||
super(params, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
|
||||
final List<String> tokens = Lists.newArrayList(Splitter.on(" ").omitEmptyStrings().trimResults().split(s));
|
||||
|
||||
Collections.sort(tokens);
|
||||
|
||||
return ngramPairs(
|
||||
Lists.newArrayList(getNgrams(Joiner.on(" ").join(tokens), param("ngramLen"), param("max") * 2, 1, 2)),
|
||||
param("max"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("spacetrimmingfieldvalue")
|
||||
public class SpaceTrimmingFieldValue extends AbstractClusteringFunction {
|
||||
|
||||
public SpaceTrimmingFieldValue(final Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, final String s) {
|
||||
final List<String> res = Lists.newArrayList();
|
||||
|
||||
res
|
||||
.add(
|
||||
StringUtils.isBlank(s) ? RandomStringUtils.random(getParams().get("randomLength"))
|
||||
: s.toLowerCase().replaceAll("\\s+", ""));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("suffixprefix")
|
||||
public class SuffixPrefix extends AbstractClusteringFunction {
|
||||
|
||||
public SuffixPrefix(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return suffixPrefix(s, param("len"), param("max"));
|
||||
}
|
||||
|
||||
private Collection<String> suffixPrefix(String s, int len, int max) {
|
||||
final Set<String> bigrams = Sets.newLinkedHashSet();
|
||||
int i = 0;
|
||||
while (++i < s.length() && bigrams.size() < max) {
|
||||
int j = s.indexOf(" ", i);
|
||||
|
||||
int offset = j + len + 1 < s.length() ? j + len + 1 : s.length();
|
||||
|
||||
if (j - len > 0) {
|
||||
String bigram = s.substring(j - len, offset).replaceAll(" ", "").trim();
|
||||
if (bigram.length() >= 4) {
|
||||
bigrams.add(bigram);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bigrams;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("urlclustering")
|
||||
public class UrlClustering extends AbstractPaceFunctions implements ClusteringFunction {
|
||||
|
||||
protected Map<String, Integer> params;
|
||||
|
||||
public UrlClustering(final Map<String, Integer> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> apply(final Config conf, List<String> fields) {
|
||||
try {
|
||||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(this::asUrl)
|
||||
.map(URL::getHost)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
} catch (IllegalStateException e) {
|
||||
return new HashSet<>();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getParams() {
|
||||
return null;
|
||||
}
|
||||
|
||||
private URL asUrl(String value) {
|
||||
try {
|
||||
return new URL(value);
|
||||
} catch (MalformedURLException e) {
|
||||
// should not happen as checked by pace typing
|
||||
throw new IllegalStateException("invalid URL: " + value);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("wordsStatsSuffixPrefixChain")
|
||||
public class WordsStatsSuffixPrefixChain extends AbstractClusteringFunction {
|
||||
|
||||
public WordsStatsSuffixPrefixChain(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return suffixPrefixChain(s, param("mod"));
|
||||
}
|
||||
|
||||
private Collection<String> suffixPrefixChain(String s, int mod) {
|
||||
|
||||
// create the list of words from the string (remove short words)
|
||||
List<String> wordsList = Arrays
|
||||
.stream(s.split(" "))
|
||||
.filter(si -> si.length() > 3)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
final int words = wordsList.size();
|
||||
final int letters = s.length();
|
||||
|
||||
// create the prefix: number of words + number of letters/mod
|
||||
String prefix = words + "-" + letters / mod + "-";
|
||||
|
||||
return doSuffixPrefixChain(wordsList, prefix);
|
||||
|
||||
}
|
||||
|
||||
private Collection<String> doSuffixPrefixChain(List<String> wordsList, String prefix) {
|
||||
|
||||
Set<String> set = Sets.newLinkedHashSet();
|
||||
switch (wordsList.size()) {
|
||||
case 0:
|
||||
case 1:
|
||||
break;
|
||||
case 2:
|
||||
set
|
||||
.add(
|
||||
prefix +
|
||||
suffix(wordsList.get(0), 3) +
|
||||
prefix(wordsList.get(1), 3));
|
||||
|
||||
set
|
||||
.add(
|
||||
prefix +
|
||||
prefix(wordsList.get(0), 3) +
|
||||
suffix(wordsList.get(1), 3));
|
||||
|
||||
break;
|
||||
default:
|
||||
set
|
||||
.add(
|
||||
prefix +
|
||||
suffix(wordsList.get(0), 3) +
|
||||
prefix(wordsList.get(1), 3) +
|
||||
suffix(wordsList.get(2), 3));
|
||||
|
||||
set
|
||||
.add(
|
||||
prefix +
|
||||
prefix(wordsList.get(0), 3) +
|
||||
suffix(wordsList.get(1), 3) +
|
||||
prefix(wordsList.get(2), 3));
|
||||
break;
|
||||
}
|
||||
|
||||
return set;
|
||||
|
||||
}
|
||||
|
||||
private String suffix(String s, int len) {
|
||||
return s.substring(s.length() - len);
|
||||
}
|
||||
|
||||
private String prefix(String s, int len) {
|
||||
return s.substring(0, len);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
|
||||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("wordssuffixprefix")
|
||||
public class WordsSuffixPrefix extends AbstractClusteringFunction {
|
||||
|
||||
public WordsSuffixPrefix(Map<String, Integer> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return suffixPrefix(s, param("len"), param("max"));
|
||||
}
|
||||
|
||||
private Collection<String> suffixPrefix(String s, int len, int max) {
|
||||
|
||||
final int words = s.split(" ").length;
|
||||
|
||||
// adjust the token length according to the number of words
|
||||
switch (words) {
|
||||
case 1:
|
||||
return Sets.newLinkedHashSet();
|
||||
case 2:
|
||||
return doSuffixPrefix(s, len + 2, max, words);
|
||||
case 3:
|
||||
return doSuffixPrefix(s, len + 1, max, words);
|
||||
default:
|
||||
return doSuffixPrefix(s, len, max, words);
|
||||
}
|
||||
}
|
||||
|
||||
private Collection<String> doSuffixPrefix(String s, int len, int max, int words) {
|
||||
final Set<String> bigrams = Sets.newLinkedHashSet();
|
||||
int i = 0;
|
||||
while (++i < s.length() && bigrams.size() < max) {
|
||||
int j = s.indexOf(" ", i);
|
||||
|
||||
int offset = j + len + 1 < s.length() ? j + len + 1 : s.length();
|
||||
|
||||
if (j - len > 0) {
|
||||
String bigram = s.substring(j - len, offset).replaceAll(" ", "").trim();
|
||||
if (bigram.length() >= 4) {
|
||||
bigrams.add(words + bigram);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bigrams;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,357 @@
|
|||
|
||||
package eu.dnetlib.pace.common;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.text.Normalizer;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.ibm.icu.text.Transliterator;
|
||||
|
||||
import eu.dnetlib.pace.clustering.NGramUtils;
|
||||
|
||||
/**
|
||||
* Set of common functions for the framework
|
||||
*
|
||||
* @author claudio
|
||||
*/
|
||||
public abstract class AbstractPaceFunctions {
|
||||
|
||||
// city map to be used when translating the city names into codes
|
||||
private static Map<String, String> cityMap = AbstractPaceFunctions
|
||||
.loadMapFromClasspath("/eu/dnetlib/pace/config/city_map.csv");
|
||||
|
||||
// list of stopwords in different languages
|
||||
protected static Set<String> stopwords_gr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_gr.txt");
|
||||
protected static Set<String> stopwords_en = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
|
||||
protected static Set<String> stopwords_de = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_de.txt");
|
||||
protected static Set<String> stopwords_es = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_es.txt");
|
||||
protected static Set<String> stopwords_fr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_fr.txt");
|
||||
protected static Set<String> stopwords_it = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_it.txt");
|
||||
protected static Set<String> stopwords_pt = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_pt.txt");
|
||||
|
||||
// transliterator
|
||||
protected static Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
|
||||
// blacklist of ngrams: to avoid generic keys
|
||||
protected static Set<String> ngramBlacklist = loadFromClasspath("/eu/dnetlib/pace/config/ngram_blacklist.txt");
|
||||
|
||||
// html regex for normalization
|
||||
public static final Pattern HTML_REGEX = Pattern.compile("<[^>]*>");
|
||||
|
||||
private static final String alpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ";
|
||||
private static final String aliases_from = "⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎àáâäæãåāèéêëēėęəîïíīįìôöòóœøōõûüùúūßśšłžźżçćčñń";
|
||||
private static final String aliases_to = "0123456789+-=()n0123456789+-=()aaaaaaaaeeeeeeeeiiiiiioooooooouuuuussslzzzcccnn";
|
||||
|
||||
// doi prefix for normalization
|
||||
public static final Pattern DOI_PREFIX = Pattern.compile("(https?:\\/\\/dx\\.doi\\.org\\/)|(doi:)");
|
||||
|
||||
private static Pattern numberPattern = Pattern.compile("-?\\d+(\\.\\d+)?");
|
||||
|
||||
private static Pattern hexUnicodePattern = Pattern.compile("\\\\u(\\p{XDigit}{4})");
|
||||
|
||||
protected String concat(final List<String> l) {
|
||||
return Joiner.on(" ").skipNulls().join(l);
|
||||
}
|
||||
|
||||
protected String cleanup(final String s) {
|
||||
final String s1 = HTML_REGEX.matcher(s).replaceAll("");
|
||||
final String s2 = unicodeNormalization(s1.toLowerCase());
|
||||
final String s3 = nfd(s2);
|
||||
final String s4 = fixXML(s3);
|
||||
final String s5 = s4.replaceAll("([0-9]+)", " $1 ");
|
||||
final String s6 = transliterate(s5);
|
||||
final String s7 = fixAliases(s6);
|
||||
final String s8 = s7.replaceAll("[^\\p{ASCII}]", "");
|
||||
final String s9 = s8.replaceAll("[\\p{Punct}]", " ");
|
||||
final String s10 = s9.replaceAll("\\n", " ");
|
||||
final String s11 = s10.replaceAll("(?m)\\s+", " ");
|
||||
final String s12 = s11.trim();
|
||||
return s12;
|
||||
}
|
||||
|
||||
protected String fixXML(final String a) {
|
||||
|
||||
return a
|
||||
.replaceAll("–", " ")
|
||||
.replaceAll("&", " ")
|
||||
.replaceAll(""", " ")
|
||||
.replaceAll("−", " ");
|
||||
}
|
||||
|
||||
protected boolean checkNumbers(final String a, final String b) {
|
||||
final String numbersA = getNumbers(a);
|
||||
final String numbersB = getNumbers(b);
|
||||
final String romansA = getRomans(a);
|
||||
final String romansB = getRomans(b);
|
||||
return !numbersA.equals(numbersB) || !romansA.equals(romansB);
|
||||
}
|
||||
|
||||
protected String getRomans(final String s) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
for (final String t : s.split(" ")) {
|
||||
sb.append(isRoman(t) ? t : "");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
protected boolean isRoman(final String s) {
|
||||
return s
|
||||
.replaceAll("^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$", "qwertyuiop")
|
||||
.equals("qwertyuiop");
|
||||
}
|
||||
|
||||
protected String getNumbers(final String s) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
for (final String t : s.split(" ")) {
|
||||
sb.append(isNumber(t) ? t : "");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public boolean isNumber(String strNum) {
|
||||
if (strNum == null) {
|
||||
return false;
|
||||
}
|
||||
return numberPattern.matcher(strNum).matches();
|
||||
}
|
||||
|
||||
protected static String fixAliases(final String s) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
|
||||
s.chars().forEach(ch -> {
|
||||
final int i = StringUtils.indexOf(aliases_from, ch);
|
||||
sb.append(i >= 0 ? aliases_to.charAt(i) : (char) ch);
|
||||
});
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
protected static String transliterate(final String s) {
|
||||
try {
|
||||
return transliterator.transliterate(s);
|
||||
} catch (Exception e) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
protected String removeSymbols(final String s) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
|
||||
s.chars().forEach(ch -> {
|
||||
sb.append(StringUtils.contains(alpha, ch) ? (char) ch : ' ');
|
||||
});
|
||||
|
||||
return sb.toString().replaceAll("\\s+", " ");
|
||||
}
|
||||
|
||||
protected boolean notNull(final String s) {
|
||||
return s != null;
|
||||
}
|
||||
|
||||
protected String normalize(final String s) {
|
||||
return fixAliases(transliterate(nfd(unicodeNormalization(s))))
|
||||
.toLowerCase()
|
||||
// do not compact the regexes in a single expression, would cause StackOverflowError in case of large input
|
||||
// strings
|
||||
.replaceAll("[^ \\w]+", "")
|
||||
.replaceAll("(\\p{InCombiningDiacriticalMarks})+", "")
|
||||
.replaceAll("(\\p{Punct})+", " ")
|
||||
.replaceAll("(\\d)+", " ")
|
||||
.replaceAll("(\\n)+", " ")
|
||||
.trim();
|
||||
}
|
||||
|
||||
public String nfd(final String s) {
|
||||
return Normalizer.normalize(s, Normalizer.Form.NFD);
|
||||
}
|
||||
|
||||
public String utf8(final String s) {
|
||||
byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
|
||||
return new String(bytes, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
public String unicodeNormalization(final String s) {
|
||||
|
||||
Matcher m = hexUnicodePattern.matcher(s);
|
||||
StringBuffer buf = new StringBuffer(s.length());
|
||||
while (m.find()) {
|
||||
String ch = String.valueOf((char) Integer.parseInt(m.group(1), 16));
|
||||
m.appendReplacement(buf, Matcher.quoteReplacement(ch));
|
||||
}
|
||||
m.appendTail(buf);
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
protected String filterStopWords(final String s, final Set<String> stopwords) {
|
||||
final StringTokenizer st = new StringTokenizer(s);
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
while (st.hasMoreTokens()) {
|
||||
final String token = st.nextToken();
|
||||
if (!stopwords.contains(token)) {
|
||||
sb.append(token);
|
||||
sb.append(" ");
|
||||
}
|
||||
}
|
||||
return sb.toString().trim();
|
||||
}
|
||||
|
||||
public String filterAllStopWords(String s) {
|
||||
|
||||
s = filterStopWords(s, stopwords_en);
|
||||
s = filterStopWords(s, stopwords_de);
|
||||
s = filterStopWords(s, stopwords_it);
|
||||
s = filterStopWords(s, stopwords_fr);
|
||||
s = filterStopWords(s, stopwords_pt);
|
||||
s = filterStopWords(s, stopwords_es);
|
||||
s = filterStopWords(s, stopwords_gr);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
protected Collection<String> filterBlacklisted(final Collection<String> set, final Set<String> ngramBlacklist) {
|
||||
final Set<String> newset = Sets.newLinkedHashSet();
|
||||
for (final String s : set) {
|
||||
if (!ngramBlacklist.contains(s)) {
|
||||
newset.add(s);
|
||||
}
|
||||
}
|
||||
return newset;
|
||||
}
|
||||
|
||||
public static Set<String> loadFromClasspath(final String classpath) {
|
||||
|
||||
Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
|
||||
final Set<String> h = Sets.newHashSet();
|
||||
try {
|
||||
for (final String s : IOUtils
|
||||
.readLines(NGramUtils.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
|
||||
h.add(fixAliases(transliterator.transliterate(s))); // transliteration of the stopwords
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
return Sets.newHashSet();
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
public static Map<String, String> loadMapFromClasspath(final String classpath) {
|
||||
|
||||
Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
|
||||
final Map<String, String> m = new HashMap<>();
|
||||
try {
|
||||
for (final String s : IOUtils
|
||||
.readLines(AbstractPaceFunctions.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
|
||||
// string is like this: code;word1;word2;word3
|
||||
String[] line = s.split(";");
|
||||
String value = line[0];
|
||||
for (int i = 1; i < line.length; i++) {
|
||||
m.put(fixAliases(transliterator.transliterate(line[i].toLowerCase())), value);
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
public String removeKeywords(String s, Set<String> keywords) {
|
||||
|
||||
s = " " + s + " ";
|
||||
for (String k : keywords) {
|
||||
s = s.replaceAll(k.toLowerCase(), "");
|
||||
}
|
||||
|
||||
return s.trim();
|
||||
}
|
||||
|
||||
public double commonElementsPercentage(Set<String> s1, Set<String> s2) {
|
||||
|
||||
double longer = Math.max(s1.size(), s2.size());
|
||||
return (double) s1.stream().filter(s2::contains).count() / longer;
|
||||
}
|
||||
|
||||
// convert the set of keywords to codes
|
||||
public Set<String> toCodes(Set<String> keywords, Map<String, String> translationMap) {
|
||||
return keywords.stream().map(s -> translationMap.get(s)).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
public Set<String> keywordsToCodes(Set<String> keywords, Map<String, String> translationMap) {
|
||||
return toCodes(keywords, translationMap);
|
||||
}
|
||||
|
||||
public Set<String> citiesToCodes(Set<String> keywords) {
|
||||
return toCodes(keywords, cityMap);
|
||||
}
|
||||
|
||||
protected String firstLC(final String s) {
|
||||
return StringUtils.substring(s, 0, 1).toLowerCase();
|
||||
}
|
||||
|
||||
protected Iterable<String> tokens(final String s, final int maxTokens) {
|
||||
return Iterables.limit(Splitter.on(" ").omitEmptyStrings().trimResults().split(s), maxTokens);
|
||||
}
|
||||
|
||||
public String normalizePid(String pid) {
|
||||
return DOI_PREFIX.matcher(pid.toLowerCase()).replaceAll("");
|
||||
}
|
||||
|
||||
// get the list of keywords into the input string
|
||||
public Set<String> getKeywords(String s1, Map<String, String> translationMap, int windowSize) {
|
||||
|
||||
String s = s1;
|
||||
|
||||
List<String> tokens = Arrays.asList(s.toLowerCase().split(" "));
|
||||
|
||||
Set<String> codes = new HashSet<>();
|
||||
|
||||
if (tokens.size() < windowSize)
|
||||
windowSize = tokens.size();
|
||||
|
||||
int length = windowSize;
|
||||
|
||||
while (length != 0) {
|
||||
|
||||
for (int i = 0; i <= tokens.size() - length; i++) {
|
||||
String candidate = concat(tokens.subList(i, i + length));
|
||||
if (translationMap.containsKey(candidate)) {
|
||||
codes.add(candidate);
|
||||
s = s.replace(candidate, "").trim();
|
||||
}
|
||||
}
|
||||
|
||||
tokens = Arrays.asList(s.split(" "));
|
||||
length -= 1;
|
||||
}
|
||||
|
||||
return codes;
|
||||
}
|
||||
|
||||
public Set<String> getCities(String s1, int windowSize) {
|
||||
return getKeywords(s1, cityMap, windowSize);
|
||||
}
|
||||
|
||||
public static <T> String readFromClasspath(final String filename, final Class<T> clazz) {
|
||||
final StringWriter sw = new StringWriter();
|
||||
try {
|
||||
IOUtils.copy(clazz.getResourceAsStream(filename), sw, StandardCharsets.UTF_8);
|
||||
return sw.toString();
|
||||
} catch (final IOException e) {
|
||||
throw new RuntimeException("cannot load resource from classpath: " + filename);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
|
||||
package eu.dnetlib.pace.config;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import eu.dnetlib.pace.model.ClusteringDef;
|
||||
import eu.dnetlib.pace.model.FieldDef;
|
||||
import eu.dnetlib.pace.tree.support.TreeNodeDef;
|
||||
|
||||
/**
|
||||
* Interface for PACE configuration bean.
|
||||
*
|
||||
* @author claudio
|
||||
*/
|
||||
public interface Config {
|
||||
|
||||
/**
|
||||
* Field configuration definitions.
|
||||
*
|
||||
* @return the list of definitions
|
||||
*/
|
||||
public List<FieldDef> model();
|
||||
|
||||
/**
|
||||
* Decision Tree definition
|
||||
*
|
||||
* @return the map representing the decision tree
|
||||
*/
|
||||
public Map<String, TreeNodeDef> decisionTree();
|
||||
|
||||
/**
|
||||
* Clusterings.
|
||||
*
|
||||
* @return the list
|
||||
*/
|
||||
public List<ClusteringDef> clusterings();
|
||||
|
||||
/**
|
||||
* Blacklists.
|
||||
*
|
||||
* @return the map
|
||||
*/
|
||||
public Map<String, Predicate<String>> blacklists();
|
||||
|
||||
/**
|
||||
* Translation map.
|
||||
*
|
||||
* @return the map
|
||||
* */
|
||||
public Map<String, String> translationMap();
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
|
||||
package eu.dnetlib.pace.config;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.AbstractMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.regex.PatternSyntaxException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.antlr.stringtemplate.StringTemplate;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import eu.dnetlib.pace.model.ClusteringDef;
|
||||
import eu.dnetlib.pace.model.FieldDef;
|
||||
import eu.dnetlib.pace.tree.support.TreeNodeDef;
|
||||
import eu.dnetlib.pace.util.PaceException;
|
||||
|
||||
public class DedupConfig implements Config, Serializable {
|
||||
private static String CONFIG_TEMPLATE = "dedupConfig.st";
|
||||
|
||||
private PaceConfig pace;
|
||||
|
||||
private WfConfig wf;
|
||||
|
||||
@JsonIgnore
|
||||
private Map<String, Predicate<String>> blacklists;
|
||||
|
||||
private static Map<String, String> defaults = Maps.newHashMap();
|
||||
|
||||
static {
|
||||
defaults.put("dedupRun", "001");
|
||||
defaults.put("entityType", "result");
|
||||
defaults.put("subEntityType", "resulttype");
|
||||
defaults.put("subEntityValue", "publication");
|
||||
defaults.put("orderField", "title");
|
||||
defaults.put("queueMaxSize", "2000");
|
||||
defaults.put("groupMaxSize", "10");
|
||||
defaults.put("slidingWindowSize", "200");
|
||||
defaults.put("rootBuilder", "result");
|
||||
defaults.put("includeChildren", "true");
|
||||
defaults.put("maxIterations", "20");
|
||||
defaults.put("idPath", "$.id");
|
||||
}
|
||||
|
||||
public DedupConfig() {
|
||||
}
|
||||
|
||||
public static DedupConfig load(final String json) {
|
||||
|
||||
final DedupConfig config;
|
||||
try {
|
||||
config = new ObjectMapper().readValue(json, DedupConfig.class);
|
||||
config.getPace().initModel();
|
||||
config.getPace().initTranslationMap();
|
||||
|
||||
config.blacklists = config
|
||||
.getPace()
|
||||
.getBlacklists()
|
||||
.entrySet()
|
||||
.stream()
|
||||
.map(
|
||||
e -> new AbstractMap.SimpleEntry<String, List<Pattern>>(e.getKey(),
|
||||
e
|
||||
.getValue()
|
||||
.stream()
|
||||
.filter(s -> !StringUtils.isBlank(s))
|
||||
.map(Pattern::compile)
|
||||
.collect(Collectors.toList())))
|
||||
.collect(
|
||||
Collectors
|
||||
.toMap(
|
||||
e -> e.getKey(),
|
||||
e -> (Predicate<String> & Serializable) s -> e
|
||||
.getValue()
|
||||
.stream()
|
||||
.filter(p -> p.matcher(s).matches())
|
||||
.findFirst()
|
||||
.isPresent()))
|
||||
|
||||
;
|
||||
|
||||
return config;
|
||||
} catch (IOException | PatternSyntaxException e) {
|
||||
throw new PaceException("Error in parsing configuration json", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static DedupConfig loadDefault() throws IOException {
|
||||
return loadDefault(new HashMap<String, String>());
|
||||
}
|
||||
|
||||
public static DedupConfig loadDefault(final Map<String, String> params) throws IOException {
|
||||
|
||||
final StringTemplate template = new StringTemplate(new DedupConfig().readFromClasspath(CONFIG_TEMPLATE));
|
||||
|
||||
for (final Entry<String, String> e : defaults.entrySet()) {
|
||||
template.setAttribute(e.getKey(), e.getValue());
|
||||
}
|
||||
for (final Entry<String, String> e : params.entrySet()) {
|
||||
if (template.getAttribute(e.getKey()) != null) {
|
||||
template.getAttributes().computeIfPresent(e.getKey(), (o, o2) -> e.getValue());
|
||||
} else {
|
||||
template.setAttribute(e.getKey(), e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
final String json = template.toString();
|
||||
return load(json);
|
||||
}
|
||||
|
||||
private String readFromClasspath(final String resource) throws IOException {
|
||||
return IOUtils.toString(getClass().getResource(resource), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
public PaceConfig getPace() {
|
||||
return pace;
|
||||
}
|
||||
|
||||
public void setPace(final PaceConfig pace) {
|
||||
this.pace = pace;
|
||||
}
|
||||
|
||||
public WfConfig getWf() {
|
||||
return wf;
|
||||
}
|
||||
|
||||
public void setWf(final WfConfig wf) {
|
||||
this.wf = wf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
return new ObjectMapper().writeValueAsString(this);
|
||||
} catch (IOException e) {
|
||||
throw new PaceException("unable to serialise configuration", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, TreeNodeDef> decisionTree() {
|
||||
return getPace().getDecisionTree();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<FieldDef> model() {
|
||||
return getPace().getModel();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ClusteringDef> clusterings() {
|
||||
return getPace().getClustering();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Predicate<String>> blacklists() {
|
||||
return blacklists;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> translationMap() {
|
||||
return getPace().translationMap();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
|
||||
package eu.dnetlib.pace.config;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.ibm.icu.text.Transliterator;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
import eu.dnetlib.pace.model.ClusteringDef;
|
||||
import eu.dnetlib.pace.model.FieldDef;
|
||||
import eu.dnetlib.pace.tree.support.TreeNodeDef;
|
||||
import eu.dnetlib.pace.util.PaceResolver;
|
||||
|
||||
public class PaceConfig extends AbstractPaceFunctions implements Serializable {
|
||||
|
||||
private List<FieldDef> model;
|
||||
|
||||
private List<ClusteringDef> clustering;
|
||||
private Map<String, TreeNodeDef> decisionTree;
|
||||
|
||||
private Map<String, List<String>> blacklists;
|
||||
private Map<String, List<String>> synonyms;
|
||||
|
||||
@JsonIgnore
|
||||
private Map<String, String> translationMap;
|
||||
|
||||
public Map<String, FieldDef> getModelMap() {
|
||||
return modelMap;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
private Map<String, FieldDef> modelMap;
|
||||
|
||||
@JsonIgnore
|
||||
public static PaceResolver resolver = new PaceResolver();
|
||||
|
||||
public PaceConfig() {
|
||||
}
|
||||
|
||||
public void initModel() {
|
||||
modelMap = Maps.newHashMap();
|
||||
for (FieldDef fd : getModel()) {
|
||||
modelMap.put(fd.getName(), fd);
|
||||
}
|
||||
}
|
||||
|
||||
public void initTranslationMap() {
|
||||
translationMap = Maps.newHashMap();
|
||||
|
||||
Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
for (String key : synonyms.keySet()) {
|
||||
for (String term : synonyms.get(key)) {
|
||||
translationMap
|
||||
.put(
|
||||
fixAliases(transliterator.transliterate(term.toLowerCase())),
|
||||
key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> translationMap() {
|
||||
return translationMap;
|
||||
}
|
||||
|
||||
public List<FieldDef> getModel() {
|
||||
return model;
|
||||
}
|
||||
|
||||
public void setModel(final List<FieldDef> model) {
|
||||
this.model = model;
|
||||
}
|
||||
|
||||
public List<ClusteringDef> getClustering() {
|
||||
return clustering;
|
||||
}
|
||||
|
||||
public void setClustering(final List<ClusteringDef> clustering) {
|
||||
this.clustering = clustering;
|
||||
}
|
||||
|
||||
public Map<String, TreeNodeDef> getDecisionTree() {
|
||||
return decisionTree;
|
||||
}
|
||||
|
||||
public void setDecisionTree(Map<String, TreeNodeDef> decisionTree) {
|
||||
this.decisionTree = decisionTree;
|
||||
}
|
||||
|
||||
public Map<String, List<String>> getBlacklists() {
|
||||
return blacklists;
|
||||
}
|
||||
|
||||
public void setBlacklists(final Map<String, List<String>> blacklists) {
|
||||
this.blacklists = blacklists;
|
||||
}
|
||||
|
||||
public Map<String, List<String>> getSynonyms() {
|
||||
return synonyms;
|
||||
}
|
||||
|
||||
public void setSynonyms(Map<String, List<String>> synonyms) {
|
||||
this.synonyms = synonyms;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
package eu.dnetlib.pace.config;
|
||||
|
||||
public enum Type {
|
||||
String, Int, List, JSON, URL, StringConcat, DoubleArray
|
||||
}
|
|
@ -0,0 +1,294 @@
|
|||
|
||||
package eu.dnetlib.pace.config;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.util.PaceException;
|
||||
|
||||
public class WfConfig implements Serializable {
|
||||
|
||||
/**
|
||||
* Entity type.
|
||||
*/
|
||||
private String entityType = "";
|
||||
|
||||
/**
|
||||
* Sub-Entity type refers to one of fields declared in the model. See eu.dnetlib.pace.config.PaceConfig.modelMap
|
||||
*/
|
||||
private String subEntityType = "";
|
||||
|
||||
/**
|
||||
* Sub-Entity value declares a value for subTypes to be considered.
|
||||
*/
|
||||
private String subEntityValue = "";
|
||||
|
||||
/**
|
||||
* Field name used to sort the values in the reducer phase.
|
||||
*/
|
||||
private String orderField = "";
|
||||
|
||||
/**
|
||||
* Column Families involved in the relations redirection.
|
||||
*/
|
||||
private List<String> rootBuilder = Lists.newArrayList();
|
||||
|
||||
/**
|
||||
* Set of datasource namespace prefixes that won't be deduplicated.
|
||||
*/
|
||||
private Set<String> skipList = Sets.newHashSet();
|
||||
|
||||
/**
|
||||
* Subprefix used to build the root id, allows multiple dedup runs.
|
||||
*/
|
||||
private String dedupRun = "";
|
||||
|
||||
/**
|
||||
* Similarity threshold.
|
||||
*/
|
||||
private double threshold = 0;
|
||||
|
||||
/** The queue max size. */
|
||||
private int queueMaxSize = 2000;
|
||||
|
||||
/** The group max size. */
|
||||
private int groupMaxSize;
|
||||
|
||||
/** The sliding window size. */
|
||||
private int slidingWindowSize;
|
||||
|
||||
/** The configuration id. */
|
||||
private String configurationId;
|
||||
|
||||
/** The include children. */
|
||||
private boolean includeChildren;
|
||||
|
||||
/** Default maximum number of allowed children. */
|
||||
private final static int MAX_CHILDREN = 10;
|
||||
|
||||
/** Maximum number of allowed children. */
|
||||
private int maxChildren = MAX_CHILDREN;
|
||||
|
||||
/** Default maximum number of iterations. */
|
||||
private final static int MAX_ITERATIONS = 20;
|
||||
|
||||
/** Maximum number of iterations */
|
||||
private int maxIterations = MAX_ITERATIONS;
|
||||
|
||||
/** The Jquery path to retrieve the identifier */
|
||||
private String idPath = "$.id";
|
||||
|
||||
public WfConfig() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new dedup config.
|
||||
*
|
||||
* @param entityType
|
||||
* the entity type
|
||||
* @param orderField
|
||||
* the order field
|
||||
* @param rootBuilder
|
||||
* the root builder families
|
||||
* @param dedupRun
|
||||
* the dedup run
|
||||
* @param skipList
|
||||
* the skip list
|
||||
* @param queueMaxSize
|
||||
* the queue max size
|
||||
* @param groupMaxSize
|
||||
* the group max size
|
||||
* @param slidingWindowSize
|
||||
* the sliding window size
|
||||
* @param includeChildren
|
||||
* allows the children to be included in the representative records or not.
|
||||
* @param maxIterations
|
||||
* the maximum number of iterations
|
||||
* @param idPath
|
||||
* the path for the id of the entity
|
||||
*/
|
||||
public WfConfig(final String entityType, final String orderField, final List<String> rootBuilder,
|
||||
final String dedupRun,
|
||||
final Set<String> skipList, final int queueMaxSize, final int groupMaxSize, final int slidingWindowSize,
|
||||
final boolean includeChildren, final int maxIterations, final String idPath) {
|
||||
super();
|
||||
this.entityType = entityType;
|
||||
this.orderField = orderField;
|
||||
this.rootBuilder = rootBuilder;
|
||||
this.dedupRun = cleanupStringNumber(dedupRun);
|
||||
this.skipList = skipList;
|
||||
this.queueMaxSize = queueMaxSize;
|
||||
this.groupMaxSize = groupMaxSize;
|
||||
this.slidingWindowSize = slidingWindowSize;
|
||||
this.includeChildren = includeChildren;
|
||||
this.maxIterations = maxIterations;
|
||||
this.idPath = idPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup string number.
|
||||
*
|
||||
* @param s
|
||||
* the s
|
||||
* @return the string
|
||||
*/
|
||||
private String cleanupStringNumber(final String s) {
|
||||
return s.contains("'") ? s.replaceAll("'", "") : s;
|
||||
}
|
||||
|
||||
public boolean hasSubType() {
|
||||
return StringUtils.isNotBlank(getSubEntityType()) && StringUtils.isNotBlank(getSubEntityValue());
|
||||
}
|
||||
|
||||
public String getEntityType() {
|
||||
return entityType;
|
||||
}
|
||||
|
||||
public void setEntityType(final String entityType) {
|
||||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
public String getSubEntityType() {
|
||||
return subEntityType;
|
||||
}
|
||||
|
||||
public void setSubEntityType(final String subEntityType) {
|
||||
this.subEntityType = subEntityType;
|
||||
}
|
||||
|
||||
public String getSubEntityValue() {
|
||||
return subEntityValue;
|
||||
}
|
||||
|
||||
public void setSubEntityValue(final String subEntityValue) {
|
||||
this.subEntityValue = subEntityValue;
|
||||
}
|
||||
|
||||
public String getOrderField() {
|
||||
return orderField;
|
||||
}
|
||||
|
||||
public void setOrderField(final String orderField) {
|
||||
this.orderField = orderField;
|
||||
}
|
||||
|
||||
public List<String> getRootBuilder() {
|
||||
return rootBuilder;
|
||||
}
|
||||
|
||||
public void setRootBuilder(final List<String> rootBuilder) {
|
||||
this.rootBuilder = rootBuilder;
|
||||
}
|
||||
|
||||
public Set<String> getSkipList() {
|
||||
return skipList != null ? skipList : new HashSet<String>();
|
||||
}
|
||||
|
||||
public void setSkipList(final Set<String> skipList) {
|
||||
this.skipList = skipList;
|
||||
}
|
||||
|
||||
public String getDedupRun() {
|
||||
return dedupRun;
|
||||
}
|
||||
|
||||
public void setDedupRun(final String dedupRun) {
|
||||
this.dedupRun = dedupRun;
|
||||
}
|
||||
|
||||
public double getThreshold() {
|
||||
return threshold;
|
||||
}
|
||||
|
||||
public void setThreshold(final double threshold) {
|
||||
this.threshold = threshold;
|
||||
}
|
||||
|
||||
public int getQueueMaxSize() {
|
||||
return queueMaxSize;
|
||||
}
|
||||
|
||||
public void setQueueMaxSize(final int queueMaxSize) {
|
||||
this.queueMaxSize = queueMaxSize;
|
||||
}
|
||||
|
||||
public int getGroupMaxSize() {
|
||||
return groupMaxSize;
|
||||
}
|
||||
|
||||
public void setGroupMaxSize(final int groupMaxSize) {
|
||||
this.groupMaxSize = groupMaxSize;
|
||||
}
|
||||
|
||||
public int getSlidingWindowSize() {
|
||||
return slidingWindowSize;
|
||||
}
|
||||
|
||||
public void setSlidingWindowSize(final int slidingWindowSize) {
|
||||
this.slidingWindowSize = slidingWindowSize;
|
||||
}
|
||||
|
||||
public String getConfigurationId() {
|
||||
return configurationId;
|
||||
}
|
||||
|
||||
public void setConfigurationId(final String configurationId) {
|
||||
this.configurationId = configurationId;
|
||||
}
|
||||
|
||||
public boolean isIncludeChildren() {
|
||||
return includeChildren;
|
||||
}
|
||||
|
||||
public void setIncludeChildren(final boolean includeChildren) {
|
||||
this.includeChildren = includeChildren;
|
||||
}
|
||||
|
||||
public int getMaxChildren() {
|
||||
return maxChildren;
|
||||
}
|
||||
|
||||
public void setMaxChildren(final int maxChildren) {
|
||||
this.maxChildren = maxChildren;
|
||||
}
|
||||
|
||||
public int getMaxIterations() {
|
||||
return maxIterations;
|
||||
}
|
||||
|
||||
public void setMaxIterations(int maxIterations) {
|
||||
this.maxIterations = maxIterations;
|
||||
}
|
||||
|
||||
public String getIdPath() {
|
||||
return idPath;
|
||||
}
|
||||
|
||||
public void setIdPath(String idPath) {
|
||||
this.idPath = idPath;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
return new ObjectMapper().writeValueAsString(this);
|
||||
} catch (IOException e) {
|
||||
throw new PaceException("unable to serialise " + this.getClass().getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
|
||||
package eu.dnetlib.pace.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.pace.clustering.ClusteringFunction;
|
||||
import eu.dnetlib.pace.config.PaceConfig;
|
||||
import eu.dnetlib.pace.util.PaceException;
|
||||
|
||||
public class ClusteringDef implements Serializable {
|
||||
|
||||
private String name;
|
||||
|
||||
private List<String> fields;
|
||||
|
||||
private Map<String, Integer> params;
|
||||
|
||||
public ClusteringDef() {
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(final String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public ClusteringFunction clusteringFunction() {
|
||||
return PaceConfig.resolver.getClusteringFunction(getName(), params);
|
||||
}
|
||||
|
||||
public List<String> getFields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
public void setFields(final List<String> fields) {
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
public Map<String, Integer> getParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
public void setParams(final Map<String, Integer> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
return new ObjectMapper().writeValueAsString(this);
|
||||
} catch (IOException e) {
|
||||
throw new PaceException("unable to serialise " + this.getClass().getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
|
||||
package eu.dnetlib.pace.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.pace.config.Type;
|
||||
|
||||
/**
|
||||
* The schema is composed by field definitions (FieldDef). Each field has a type, a name, and an associated compare algorithm.
|
||||
*/
|
||||
public class FieldDef implements Serializable {
|
||||
|
||||
public final static String PATH_SEPARATOR = "/";
|
||||
|
||||
private String name;
|
||||
|
||||
private String path;
|
||||
|
||||
private Type type;
|
||||
|
||||
private boolean overrideMatch;
|
||||
|
||||
/**
|
||||
* Sets maximum size for the repeatable fields in the model. -1 for unbounded size.
|
||||
*/
|
||||
private int size = -1;
|
||||
|
||||
/**
|
||||
* Sets maximum length for field values in the model. -1 for unbounded length.
|
||||
*/
|
||||
private int length = -1;
|
||||
|
||||
public FieldDef() {
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public List<String> getPathList() {
|
||||
return Lists.newArrayList(Splitter.on(PATH_SEPARATOR).split(getPath()));
|
||||
}
|
||||
|
||||
public Type getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(final Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public boolean isOverrideMatch() {
|
||||
return overrideMatch;
|
||||
}
|
||||
|
||||
public void setOverrideMatch(final boolean overrideMatch) {
|
||||
this.overrideMatch = overrideMatch;
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public void setSize(int size) {
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
return new ObjectMapper().writeValueAsString(this);
|
||||
} catch (JsonProcessingException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
|
||||
package eu.dnetlib.pace.model;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.text.Normalizer;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.hash.Hashing;
|
||||
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
import eu.dnetlib.pace.util.Capitalise;
|
||||
import eu.dnetlib.pace.util.DotAbbreviations;
|
||||
|
||||
public class Person {
|
||||
|
||||
private static final String UTF8 = "UTF-8";
|
||||
private List<String> name = Lists.newArrayList();
|
||||
private List<String> surname = Lists.newArrayList();
|
||||
private List<String> fullname = Lists.newArrayList();
|
||||
private final String original;
|
||||
|
||||
private static Set<String> particles = null;
|
||||
|
||||
public Person(String s, final boolean aggressive) {
|
||||
original = s;
|
||||
s = Normalizer.normalize(s, Normalizer.Form.NFD);
|
||||
s = s.replaceAll("\\(.+\\)", "");
|
||||
s = s.replaceAll("\\[.+\\]", "");
|
||||
s = s.replaceAll("\\{.+\\}", "");
|
||||
s = s.replaceAll("\\s+-\\s+", "-");
|
||||
s = s.replaceAll("[\\p{Punct}&&[^,-]]", " ");
|
||||
s = s.replaceAll("\\d", " ");
|
||||
s = s.replaceAll("\\n", " ");
|
||||
s = s.replaceAll("\\.", " ");
|
||||
s = s.replaceAll("\\s+", " ");
|
||||
|
||||
if (aggressive) {
|
||||
s = s.replaceAll("[\\p{InCombiningDiacriticalMarks}&&[^,-]]", "");
|
||||
// s = s.replaceAll("[\\W&&[^,-]]", "");
|
||||
}
|
||||
|
||||
if (s.contains(",")) { // if the name contains a comma it is easy derivable the name and the surname
|
||||
final String[] arr = s.split(",");
|
||||
if (arr.length == 1) {
|
||||
fullname = splitTerms(arr[0]);
|
||||
} else if (arr.length > 1) {
|
||||
surname = splitTerms(arr[0]);
|
||||
name = splitTerms(arr[1]);
|
||||
fullname.addAll(surname);
|
||||
fullname.addAll(name);
|
||||
}
|
||||
} else {
|
||||
fullname = splitTerms(s);
|
||||
|
||||
int lastInitialPosition = fullname.size();
|
||||
boolean hasSurnameInUpperCase = false;
|
||||
|
||||
for (int i = 0; i < fullname.size(); i++) {
|
||||
final String term = fullname.get(i);
|
||||
if (term.length() == 1) {
|
||||
lastInitialPosition = i;
|
||||
} else if (term.equals(term.toUpperCase())) {
|
||||
hasSurnameInUpperCase = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (lastInitialPosition < (fullname.size() - 1)) { // Case: Michele G. Artini
|
||||
name = fullname.subList(0, lastInitialPosition + 1);
|
||||
surname = fullname.subList(lastInitialPosition + 1, fullname.size());
|
||||
} else if (hasSurnameInUpperCase) { // Case: Michele ARTINI
|
||||
for (final String term : fullname) {
|
||||
if ((term.length() > 1) && term.equals(term.toUpperCase())) {
|
||||
surname.add(term);
|
||||
} else {
|
||||
name.add(term);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> splitTerms(final String s) {
|
||||
if (particles == null) {
|
||||
particles = AbstractPaceFunctions.loadFromClasspath("/eu/dnetlib/pace/config/name_particles.txt");
|
||||
}
|
||||
|
||||
final List<String> list = Lists.newArrayList();
|
||||
for (final String part : Splitter.on(" ").omitEmptyStrings().split(s)) {
|
||||
if (!particles.contains(part.toLowerCase())) {
|
||||
list.add(part);
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
public List<String> getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getNameString() {
|
||||
return Joiner.on(" ").join(getName());
|
||||
}
|
||||
|
||||
public List<String> getSurname() {
|
||||
return surname;
|
||||
}
|
||||
|
||||
public List<String> getFullname() {
|
||||
return fullname;
|
||||
}
|
||||
|
||||
public String getOriginal() {
|
||||
return original;
|
||||
}
|
||||
|
||||
public String hash() {
|
||||
return Hashing.murmur3_128().hashString(getNormalisedFullname(), Charset.forName(UTF8)).toString();
|
||||
}
|
||||
|
||||
public String getNormalisedFirstName() {
|
||||
return Joiner.on(" ").join(getCapitalFirstnames());
|
||||
}
|
||||
|
||||
public String getNormalisedSurname() {
|
||||
return Joiner.on(" ").join(getCapitalSurname());
|
||||
}
|
||||
|
||||
public String getSurnameString() {
|
||||
return Joiner.on(" ").join(getSurname());
|
||||
}
|
||||
|
||||
public String getNormalisedFullname() {
|
||||
return isAccurate() ? getNormalisedSurname() + ", " + getNormalisedFirstName() : Joiner.on(" ").join(fullname);
|
||||
}
|
||||
|
||||
public List<String> getCapitalFirstnames() {
|
||||
return Lists.newArrayList(Iterables.transform(getNameWithAbbreviations(), new Capitalise()));
|
||||
}
|
||||
|
||||
public List<String> getCapitalSurname() {
|
||||
return Lists.newArrayList(Iterables.transform(surname, new Capitalise()));
|
||||
}
|
||||
|
||||
public List<String> getNameWithAbbreviations() {
|
||||
return Lists.newArrayList(Iterables.transform(name, new DotAbbreviations()));
|
||||
}
|
||||
|
||||
public boolean isAccurate() {
|
||||
return ((name != null) && (surname != null) && !name.isEmpty() && !surname.isEmpty());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
|
||||
package eu.dnetlib.pace.model;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
public class PersonComparatorUtils {
|
||||
|
||||
private static final int MAX_FULLNAME_LENGTH = 50;
|
||||
|
||||
public static Set<String> getNgramsForPerson(String fullname) {
|
||||
|
||||
Set<String> set = Sets.newHashSet();
|
||||
|
||||
if (fullname.length() > MAX_FULLNAME_LENGTH) {
|
||||
return set;
|
||||
}
|
||||
|
||||
Person p = new Person(fullname, true);
|
||||
|
||||
if (p.isAccurate()) {
|
||||
for (String name : p.getName()) {
|
||||
for (String surname : p.getSurname()) {
|
||||
set.add((name.charAt(0) + "_" + surname).toLowerCase());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
List<String> list = p.getFullname();
|
||||
for (int i = 0; i < list.size(); i++) {
|
||||
if (list.get(i).length() > 1) {
|
||||
for (int j = 0; j < list.size(); j++) {
|
||||
if (i != j) {
|
||||
set.add((list.get(j).charAt(0) + "_" + list.get(i)).toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return set;
|
||||
}
|
||||
|
||||
public static boolean areSimilar(String s1, String s2) {
|
||||
Person p1 = new Person(s1, true);
|
||||
Person p2 = new Person(s2, true);
|
||||
|
||||
if (p1.isAccurate() && p2.isAccurate()) {
|
||||
return verifyNames(p1.getName(), p2.getName()) && verifySurnames(p1.getSurname(), p2.getSurname());
|
||||
} else {
|
||||
return verifyFullnames(p1.getFullname(), p2.getFullname());
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean verifyNames(List<String> list1, List<String> list2) {
|
||||
return verifySimilarity(extractExtendedNames(list1), extractExtendedNames(list2))
|
||||
&& verifySimilarity(extractInitials(list1), extractInitials(list2));
|
||||
}
|
||||
|
||||
private static boolean verifySurnames(List<String> list1, List<String> list2) {
|
||||
if (list1.size() != list2.size()) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < list1.size(); i++) {
|
||||
if (!list1.get(i).equalsIgnoreCase(list2.get(i))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static boolean verifyFullnames(List<String> list1, List<String> list2) {
|
||||
Collections.sort(list1);
|
||||
Collections.sort(list2);
|
||||
return verifySimilarity(extractExtendedNames(list1), extractExtendedNames(list2))
|
||||
&& verifySimilarity(extractInitials(list1), extractInitials(list2));
|
||||
}
|
||||
|
||||
private static List<String> extractExtendedNames(List<String> list) {
|
||||
ArrayList<String> res = Lists.newArrayList();
|
||||
for (String s : list) {
|
||||
if (s.length() > 1) {
|
||||
res.add(s.toLowerCase());
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
private static List<String> extractInitials(List<String> list) {
|
||||
ArrayList<String> res = Lists.newArrayList();
|
||||
for (String s : list) {
|
||||
res.add(s.substring(0, 1).toLowerCase());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
private static boolean verifySimilarity(List<String> list1, List<String> list2) {
|
||||
if (list1.size() > list2.size()) {
|
||||
return verifySimilarity(list2, list1);
|
||||
}
|
||||
|
||||
// NB: List2 is greater than list1 (or equal)
|
||||
int pos = -1;
|
||||
for (String s : list1) {
|
||||
int curr = list2.indexOf(s);
|
||||
if (curr > pos) {
|
||||
list2.set(curr, "*"); // I invalidate the found element, example: "amm - amm"
|
||||
pos = curr;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
|
||||
package eu.dnetlib.pace.model;
|
||||
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.spark.sql.Row;
|
||||
|
||||
import eu.dnetlib.pace.clustering.NGramUtils;
|
||||
|
||||
/**
|
||||
* The Class MapDocumentComparator.
|
||||
*/
|
||||
public class RowDataOrderingComparator implements Comparator<Row> {
|
||||
|
||||
/** The comparator field. */
|
||||
private final int comparatorField;
|
||||
private final int identityFieldPosition;
|
||||
|
||||
/**
|
||||
* Instantiates a new map document comparator.
|
||||
*
|
||||
* @param comparatorField
|
||||
* the comparator field
|
||||
*/
|
||||
public RowDataOrderingComparator(final int comparatorField, int identityFieldPosition) {
|
||||
this.comparatorField = comparatorField;
|
||||
this.identityFieldPosition = identityFieldPosition;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public int compare(final Row d1, final Row d2) {
|
||||
if (d1 == null)
|
||||
return d2 == null ? 0 : -1;
|
||||
else if (d2 == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
final String o1 = d1.getString(comparatorField);
|
||||
final String o2 = d2.getString(comparatorField);
|
||||
|
||||
if (o1 == null)
|
||||
return o2 == null ? 0 : -1;
|
||||
else if (o2 == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
final String to1 = NGramUtils.cleanupForOrdering(o1);
|
||||
final String to2 = NGramUtils.cleanupForOrdering(o2);
|
||||
|
||||
int res = to1.compareTo(to2);
|
||||
if (res == 0) {
|
||||
res = o1.compareTo(o2);
|
||||
if (res == 0) {
|
||||
return d1.getString(identityFieldPosition).compareTo(d2.getString(identityFieldPosition));
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package eu.dnetlib.pace.model
|
||||
|
||||
import eu.dnetlib.pace.config.{DedupConfig, Type}
|
||||
import eu.dnetlib.pace.util.{BlockProcessor, SparkReporter}
|
||||
import org.apache.spark.SparkContext
|
||||
import org.apache.spark.sql.catalyst.expressions.Literal
|
||||
import org.apache.spark.sql.expressions._
|
||||
import org.apache.spark.sql.functions.{col, lit, udf}
|
||||
import org.apache.spark.sql.types._
|
||||
import org.apache.spark.sql.{Column, Dataset, Row, functions}
|
||||
|
||||
import java.util.function.Predicate
|
||||
import java.util.stream.Collectors
|
||||
import scala.collection.JavaConversions._
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.collection.mutable
|
||||
case class SparkDeduper(conf: DedupConfig) extends Serializable {
|
||||
|
||||
val model: SparkModel = SparkModel(conf)
|
||||
|
||||
val dedup: (Dataset[Row] => Dataset[Row]) = df => {
|
||||
df.transform(filterAndCleanup)
|
||||
.transform(generateClustersWithCollect)
|
||||
.transform(processBlocks)
|
||||
}
|
||||
|
||||
|
||||
val filterAndCleanup: (Dataset[Row] => Dataset[Row]) = df => {
|
||||
val df_with_filters = conf.getPace.getModel.asScala.foldLeft(df)((res, fdef) => {
|
||||
if (conf.blacklists.containsKey(fdef.getName)) {
|
||||
res.withColumn(
|
||||
fdef.getName + "_filtered",
|
||||
filterColumnUDF(fdef).apply(new Column(fdef.getName))
|
||||
)
|
||||
} else {
|
||||
res
|
||||
}
|
||||
})
|
||||
|
||||
df_with_filters
|
||||
}
|
||||
|
||||
def filterColumnUDF(fdef: FieldDef): UserDefinedFunction = {
|
||||
val blacklist: Predicate[String] = conf.blacklists().get(fdef.getName)
|
||||
|
||||
if (blacklist == null) {
|
||||
throw new IllegalArgumentException("Column: " + fdef.getName + " does not have any filter")
|
||||
} else {
|
||||
fdef.getType match {
|
||||
case Type.List | Type.JSON =>
|
||||
udf[Array[String], Array[String]](values => {
|
||||
values.filter((v: String) => !blacklist.test(v))
|
||||
})
|
||||
|
||||
case _ =>
|
||||
udf[String, String](v => {
|
||||
if (blacklist.test(v)) ""
|
||||
else v
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val generateClustersWithCollect: (Dataset[Row] => Dataset[Row]) = df_with_filters => {
|
||||
var df_with_clustering_keys: Dataset[Row] = null
|
||||
|
||||
for ((cd, idx) <- conf.clusterings().zipWithIndex) {
|
||||
val inputColumns = cd.getFields().foldLeft(Seq[Column]())((acc, fName) => {
|
||||
val column = if (conf.blacklists.containsKey(fName))
|
||||
Seq(col(fName + "_filtered"))
|
||||
else
|
||||
Seq(col(fName))
|
||||
|
||||
acc ++ column
|
||||
})
|
||||
|
||||
// Add 'key' column with the value generated by the given clustering definition
|
||||
val ds: Dataset[Row] = df_with_filters
|
||||
.withColumn("clustering", lit(cd.getName + "::" + idx))
|
||||
.withColumn("key", functions.explode(clusterValuesUDF(cd).apply(functions.array(inputColumns: _*))))
|
||||
// Add position column having the position of the row within the set of rows having the same key value ordered by the sorting value
|
||||
.withColumn("position", functions.row_number().over(Window.partitionBy("key").orderBy(col(model.orderingFieldName), col(model.identifierFieldName))))
|
||||
|
||||
if (df_with_clustering_keys == null)
|
||||
df_with_clustering_keys = ds
|
||||
else
|
||||
df_with_clustering_keys = df_with_clustering_keys.union(ds)
|
||||
}
|
||||
|
||||
//TODO: analytics
|
||||
|
||||
val df_with_blocks = df_with_clustering_keys
|
||||
// filter out rows with position exceeding the maxqueuesize parameter
|
||||
.filter(col("position").leq(conf.getWf.getQueueMaxSize))
|
||||
.groupBy("clustering", "key")
|
||||
.agg(functions.collect_set(functions.struct(model.schema.fieldNames.map(col): _*)).as("block"))
|
||||
.filter(functions.size(new Column("block")).gt(1))
|
||||
|
||||
df_with_blocks
|
||||
}
|
||||
|
||||
def clusterValuesUDF(cd: ClusteringDef) = {
|
||||
udf[mutable.WrappedArray[String], mutable.WrappedArray[Any]](values => {
|
||||
values.flatMap(f => cd.clusteringFunction().apply(conf, Seq(f.toString).asJava).asScala)
|
||||
})
|
||||
}
|
||||
|
||||
val processBlocks: (Dataset[Row] => Dataset[Row]) = df => {
|
||||
df.filter(functions.size(new Column("block")).geq(new Literal(2, DataTypes.IntegerType)))
|
||||
.withColumn("relations", processBlock(df.sqlContext.sparkContext).apply(new Column("block")))
|
||||
.select(functions.explode(new Column("relations")).as("relation"))
|
||||
}
|
||||
|
||||
def processBlock(implicit sc: SparkContext) = {
|
||||
val accumulators = SparkReporter.constructAccumulator(conf, sc)
|
||||
|
||||
udf[Array[(String, String)], mutable.WrappedArray[Row]](block => {
|
||||
val reporter = new SparkReporter(accumulators)
|
||||
|
||||
val mapDocuments = block.asJava.stream()
|
||||
.sorted(new RowDataOrderingComparator(model.orderingFieldPosition, model.identityFieldPosition))
|
||||
.limit(conf.getWf.getQueueMaxSize)
|
||||
.collect(Collectors.toList[Row]())
|
||||
|
||||
new BlockProcessor(conf, model.identityFieldPosition, model.orderingFieldPosition).processSortedRows(mapDocuments, reporter)
|
||||
|
||||
reporter.getRelations.asScala.toArray
|
||||
}).asNondeterministic()
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package eu.dnetlib.pace.model
|
||||
|
||||
import com.jayway.jsonpath.{Configuration, JsonPath}
|
||||
import eu.dnetlib.pace.config.{DedupConfig, Type}
|
||||
import eu.dnetlib.pace.util.MapDocumentUtil
|
||||
import org.apache.spark.sql.catalyst.encoders.RowEncoder
|
||||
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
|
||||
import org.apache.spark.sql.types.{DataTypes, Metadata, StructField, StructType}
|
||||
import org.apache.spark.sql.{Dataset, Row}
|
||||
|
||||
import java.util.regex.Pattern
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
case class SparkModel(conf: DedupConfig) {
|
||||
private val URL_REGEX: Pattern = Pattern.compile("^\\s*(http|https|ftp)\\://.*")
|
||||
|
||||
private val CONCAT_REGEX: Pattern = Pattern.compile("\\|\\|\\|")
|
||||
|
||||
val identifierFieldName = "identifier"
|
||||
|
||||
val orderingFieldName = if (!conf.getWf.getOrderField.isEmpty) conf.getWf.getOrderField else identifierFieldName
|
||||
|
||||
val schema: StructType = {
|
||||
// create an implicit identifier field
|
||||
val identifier = new FieldDef()
|
||||
identifier.setName(identifierFieldName)
|
||||
identifier.setType(Type.String)
|
||||
|
||||
// Construct a Spark StructType representing the schema of the model
|
||||
(Seq(identifier) ++ conf.getPace.getModel.asScala)
|
||||
.foldLeft(
|
||||
new StructType()
|
||||
)((resType, fieldDef) => {
|
||||
resType.add(fieldDef.getType match {
|
||||
case Type.List | Type.JSON =>
|
||||
StructField(fieldDef.getName, DataTypes.createArrayType(DataTypes.StringType), true, Metadata.empty)
|
||||
case Type.DoubleArray =>
|
||||
StructField(fieldDef.getName, DataTypes.createArrayType(DataTypes.DoubleType), true, Metadata.empty)
|
||||
case _ =>
|
||||
StructField(fieldDef.getName, DataTypes.StringType, true, Metadata.empty)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
}
|
||||
|
||||
val identityFieldPosition: Int = schema.fieldIndex(identifierFieldName)
|
||||
|
||||
val orderingFieldPosition: Int = schema.fieldIndex(orderingFieldName)
|
||||
|
||||
val parseJsonDataset: (Dataset[String] => Dataset[Row]) = df => {
|
||||
df.map(r => rowFromJson(r))(RowEncoder(schema))
|
||||
}
|
||||
|
||||
def rowFromJson(json: String): Row = {
|
||||
val documentContext =
|
||||
JsonPath.using(Configuration.defaultConfiguration.addOptions(com.jayway.jsonpath.Option.SUPPRESS_EXCEPTIONS)).parse(json)
|
||||
val values = new Array[Any](schema.size)
|
||||
|
||||
values(identityFieldPosition) = MapDocumentUtil.getJPathString(conf.getWf.getIdPath, documentContext)
|
||||
|
||||
schema.fieldNames.zipWithIndex.foldLeft(values) {
|
||||
case ((res, (fname, index))) => {
|
||||
val fdef = conf.getPace.getModelMap.get(fname)
|
||||
|
||||
if (fdef != null) {
|
||||
res(index) = fdef.getType match {
|
||||
case Type.String | Type.Int =>
|
||||
MapDocumentUtil.truncateValue(
|
||||
MapDocumentUtil.getJPathString(fdef.getPath, documentContext),
|
||||
fdef.getLength
|
||||
)
|
||||
|
||||
case Type.URL =>
|
||||
var uv = MapDocumentUtil.getJPathString(fdef.getPath, documentContext)
|
||||
if (!URL_REGEX.matcher(uv).matches)
|
||||
uv = ""
|
||||
uv
|
||||
|
||||
case Type.List | Type.JSON =>
|
||||
MapDocumentUtil.truncateList(
|
||||
MapDocumentUtil.getJPathList(fdef.getPath, documentContext, fdef.getType),
|
||||
fdef.getSize
|
||||
).toArray
|
||||
|
||||
case Type.StringConcat =>
|
||||
val jpaths = CONCAT_REGEX.split(fdef.getPath)
|
||||
|
||||
MapDocumentUtil.truncateValue(
|
||||
jpaths
|
||||
.map(jpath => MapDocumentUtil.getJPathString(jpath, documentContext))
|
||||
.mkString(" "),
|
||||
fdef.getLength
|
||||
)
|
||||
|
||||
case Type.DoubleArray =>
|
||||
MapDocumentUtil.getJPathArray(fdef.getPath, json)
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
new GenericRowWithSchema(values, schema)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("alwaysMatch")
|
||||
public class AlwaysMatch<T> extends AbstractComparator<T> {
|
||||
|
||||
public AlwaysMatch(final Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
public AlwaysMatch(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected AlwaysMatch(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final Object a, final Object b, final Config conf) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("authorsMatch")
|
||||
public class AuthorsMatch extends AbstractListComparator {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
private double SURNAME_THRESHOLD;
|
||||
private double NAME_THRESHOLD;
|
||||
private double FULLNAME_THRESHOLD;
|
||||
private String MODE; // full or surname
|
||||
private int SIZE_THRESHOLD;
|
||||
private String TYPE; // count or percentage
|
||||
private int common;
|
||||
|
||||
public AuthorsMatch(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
|
||||
MODE = params.getOrDefault("mode", "full");
|
||||
SURNAME_THRESHOLD = Double.parseDouble(params.getOrDefault("surname_th", "0.95"));
|
||||
NAME_THRESHOLD = Double.parseDouble(params.getOrDefault("name_th", "0.95"));
|
||||
FULLNAME_THRESHOLD = Double.parseDouble(params.getOrDefault("fullname_th", "0.9"));
|
||||
SIZE_THRESHOLD = Integer.parseInt(params.getOrDefault("size_th", "20"));
|
||||
TYPE = params.getOrDefault("type", "percentage");
|
||||
common = 0;
|
||||
}
|
||||
|
||||
protected AuthorsMatch(double w, AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final List<String> a, final List<String> b, final Config conf) {
|
||||
|
||||
if (a.isEmpty() || b.isEmpty())
|
||||
return -1;
|
||||
|
||||
if (a.size() > SIZE_THRESHOLD || b.size() > SIZE_THRESHOLD)
|
||||
return 1.0;
|
||||
|
||||
List<Person> aList = a.stream().map(author -> new Person(author, false)).collect(Collectors.toList());
|
||||
List<Person> bList = b.stream().map(author -> new Person(author, false)).collect(Collectors.toList());
|
||||
|
||||
common = 0;
|
||||
// compare each element of List1 with each element of List2
|
||||
for (Person p1 : aList)
|
||||
|
||||
for (Person p2 : bList) {
|
||||
|
||||
// both persons are inaccurate
|
||||
if (!p1.isAccurate() && !p2.isAccurate()) {
|
||||
// compare just normalized fullnames
|
||||
String fullname1 = normalization(
|
||||
p1.getNormalisedFullname().isEmpty() ? p1.getOriginal() : p1.getNormalisedFullname());
|
||||
String fullname2 = normalization(
|
||||
p2.getNormalisedFullname().isEmpty() ? p2.getOriginal() : p2.getNormalisedFullname());
|
||||
|
||||
if (ssalgo.score(fullname1, fullname2) > FULLNAME_THRESHOLD) {
|
||||
common += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// one person is inaccurate
|
||||
if (p1.isAccurate() ^ p2.isAccurate()) {
|
||||
// prepare data
|
||||
// data for the accurate person
|
||||
String name = normalization(
|
||||
p1.isAccurate() ? p1.getNormalisedFirstName() : p2.getNormalisedFirstName());
|
||||
String surname = normalization(
|
||||
p1.isAccurate() ? p1.getNormalisedSurname() : p2.getNormalisedSurname());
|
||||
|
||||
// data for the inaccurate person
|
||||
String fullname = normalization(
|
||||
p1.isAccurate()
|
||||
? ((p2.getNormalisedFullname().isEmpty()) ? p2.getOriginal() : p2.getNormalisedFullname())
|
||||
: (p1.getNormalisedFullname().isEmpty() ? p1.getOriginal() : p1.getNormalisedFullname()));
|
||||
|
||||
if (fullname.contains(surname)) {
|
||||
if (MODE.equals("full")) {
|
||||
if (fullname.contains(name)) {
|
||||
common += 1;
|
||||
break;
|
||||
}
|
||||
} else { // MODE equals "surname"
|
||||
common += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// both persons are accurate
|
||||
if (p1.isAccurate() && p2.isAccurate()) {
|
||||
|
||||
if (compareSurname(p1, p2)) {
|
||||
if (MODE.equals("full")) {
|
||||
if (compareFirstname(p1, p2)) {
|
||||
common += 1;
|
||||
break;
|
||||
}
|
||||
} else { // MODE equals "surname"
|
||||
common += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// normalization factor to compute the score
|
||||
int normFactor = aList.size() == bList.size() ? aList.size() : (aList.size() + bList.size() - common);
|
||||
|
||||
if (TYPE.equals("percentage")) {
|
||||
return (double) common / normFactor;
|
||||
} else {
|
||||
return (double) common;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean compareSurname(Person p1, Person p2) {
|
||||
return ssalgo
|
||||
.score(
|
||||
normalization(p1.getNormalisedSurname()), normalization(p2.getNormalisedSurname())) > SURNAME_THRESHOLD;
|
||||
}
|
||||
|
||||
public boolean compareFirstname(Person p1, Person p2) {
|
||||
|
||||
if (p1.getNormalisedFirstName().length() <= 2 || p2.getNormalisedFirstName().length() <= 2) {
|
||||
if (firstLC(p1.getNormalisedFirstName()).equals(firstLC(p2.getNormalisedFirstName())))
|
||||
return true;
|
||||
}
|
||||
|
||||
return ssalgo
|
||||
.score(
|
||||
normalization(p1.getNormalisedFirstName()),
|
||||
normalization(p2.getNormalisedFirstName())) > NAME_THRESHOLD;
|
||||
}
|
||||
|
||||
public String normalization(String s) {
|
||||
return normalize(utf8(cleanup(s)));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("cityMatch")
|
||||
public class CityMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public CityMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = citiesToCodes(cities1);
|
||||
Set<String> codes2 = citiesToCodes(cities2);
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1; // undefined if one of the two has no cities
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("cosineSimilarity")
|
||||
public class CosineSimilarity extends AbstractComparator<double[]> {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
public CosineSimilarity(Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(Object a, Object b, Config config) {
|
||||
return compare((double[]) a, (double[]) b, config);
|
||||
}
|
||||
|
||||
public double compare(final double[] a, final double[] b, final Config conf) {
|
||||
|
||||
if (a.length == 0 || b.length == 0)
|
||||
return -1;
|
||||
|
||||
return cosineSimilarity(a, b);
|
||||
}
|
||||
|
||||
double cosineSimilarity(double[] a, double[] b) {
|
||||
double dotProduct = 0;
|
||||
double normASum = 0;
|
||||
double normBSum = 0;
|
||||
|
||||
for (int i = 0; i < a.length; i++) {
|
||||
dotProduct += a[i] * b[i];
|
||||
normASum += a[i] * a[i];
|
||||
normBSum += b[i] * b[i];
|
||||
}
|
||||
|
||||
double eucledianDist = Math.sqrt(normASum) * Math.sqrt(normBSum);
|
||||
return dotProduct / eucledianDist;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class ExactMatch.
|
||||
*
|
||||
* @author claudio
|
||||
*/
|
||||
@ComparatorClass("doiExactMatch")
|
||||
public class DoiExactMatch extends ExactMatchIgnoreCase {
|
||||
|
||||
public final String PREFIX = "(http:\\/\\/dx\\.doi\\.org\\/)|(doi:)";
|
||||
|
||||
public DoiExactMatch(final Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toString(final Object f) {
|
||||
return super.toString(f).replaceAll(PREFIX, "");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("domainExactMatch")
|
||||
public class DomainExactMatch extends ExactMatchIgnoreCase {
|
||||
|
||||
public DomainExactMatch(final Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toString(final Object f) {
|
||||
|
||||
try {
|
||||
return asUrl(super.toString(f)).getHost();
|
||||
} catch (MalformedURLException e) {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
private URL asUrl(final String value) throws MalformedURLException {
|
||||
return new URL(value);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("exactMatch")
|
||||
public class ExactMatch extends AbstractStringComparator {
|
||||
|
||||
public ExactMatch(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
public ExactMatch(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected ExactMatch(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
if (a.isEmpty() || b.isEmpty()) {
|
||||
return -1.0; // return -1 if a field is missing
|
||||
}
|
||||
return a.equals(b) ? 1.0 : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("exactMatchIgnoreCase")
|
||||
public class ExactMatchIgnoreCase extends AbstractStringComparator {
|
||||
|
||||
public ExactMatchIgnoreCase(Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(String a, String b, final Config conf) {
|
||||
|
||||
if (a.isEmpty() || b.isEmpty())
|
||||
return -1;
|
||||
|
||||
return a.equalsIgnoreCase(b) ? 1 : 0;
|
||||
}
|
||||
|
||||
protected String toString(final Object object) {
|
||||
return toFirstString(object);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("instanceTypeMatch")
|
||||
public class InstanceTypeMatch extends AbstractListComparator {
|
||||
|
||||
final Map<String, String> translationMap = new HashMap<>();
|
||||
|
||||
public InstanceTypeMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
|
||||
// jolly types
|
||||
translationMap.put("Conference object", "*");
|
||||
translationMap.put("Other literature type", "*");
|
||||
translationMap.put("Unknown", "*");
|
||||
|
||||
// article types
|
||||
translationMap.put("Article", "Article");
|
||||
translationMap.put("Data Paper", "Article");
|
||||
translationMap.put("Software Paper", "Article");
|
||||
translationMap.put("Preprint", "Article");
|
||||
|
||||
// thesis types
|
||||
translationMap.put("Thesis", "Thesis");
|
||||
translationMap.put("Master thesis", "Thesis");
|
||||
translationMap.put("Bachelor thesis", "Thesis");
|
||||
translationMap.put("Doctoral thesis", "Thesis");
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final List<String> a, final List<String> b, final Config conf) {
|
||||
|
||||
if (a == null || b == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (a.isEmpty() || b.isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
final Set<String> ca = a.stream().map(this::translate).collect(Collectors.toSet());
|
||||
final Set<String> cb = b.stream().map(this::translate).collect(Collectors.toSet());
|
||||
|
||||
// if at least one is a jolly type, it must produce a match
|
||||
if (ca.contains("*") || cb.contains("*"))
|
||||
return 1.0;
|
||||
|
||||
int incommon = Sets.intersection(ca, cb).size();
|
||||
|
||||
// if at least one is in common, it must produce a match
|
||||
return incommon >= 1 ? 1 : 0;
|
||||
}
|
||||
|
||||
public String translate(String term) {
|
||||
return translationMap.getOrDefault(term, term);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
//case class JaroWinkler(w: Double) extends SecondStringDistanceAlgo(w, new com.wcohen.ss.JaroWinkler())
|
||||
@ComparatorClass("jaroWinkler")
|
||||
public class JaroWinkler extends AbstractStringComparator {
|
||||
|
||||
public JaroWinkler(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
public JaroWinkler(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinkler(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("jaroWinklerNormalizedName")
|
||||
public class JaroWinklerNormalizedName extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public JaroWinklerNormalizedName(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public JaroWinklerNormalizedName(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerNormalizedName(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
ca = removeKeywords(ca, keywords1);
|
||||
ca = removeKeywords(ca, cities1);
|
||||
cb = removeKeywords(cb, keywords2);
|
||||
cb = removeKeywords(cb, cities2);
|
||||
|
||||
ca = ca.replaceAll("[ ]{2,}", " ");
|
||||
cb = cb.replaceAll("[ ]{2,}", " ");
|
||||
|
||||
if (ca.isEmpty() && cb.isEmpty())
|
||||
return 1.0;
|
||||
else
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
//case class JaroWinkler(w: Double) extends SecondStringDistanceAlgo(w, new com.wcohen.ss.JaroWinkler())
|
||||
@ComparatorClass("jaroWinklerTitle")
|
||||
public class JaroWinklerTitle extends AbstractStringComparator {
|
||||
|
||||
public JaroWinklerTitle(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
public JaroWinklerTitle(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerTitle(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
boolean check = checkNumbers(ca, cb);
|
||||
return check ? 0.5 : normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.jayway.jsonpath.Configuration;
|
||||
import com.jayway.jsonpath.DocumentContext;
|
||||
import com.jayway.jsonpath.JsonPath;
|
||||
import com.jayway.jsonpath.Option;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||
|
||||
@ComparatorClass("jsonListMatch")
|
||||
public class JsonListMatch extends AbstractListComparator {
|
||||
|
||||
private static final Log log = LogFactory.getLog(JsonListMatch.class);
|
||||
private Map<String, String> params;
|
||||
|
||||
private String MODE; // "percentage" or "count"
|
||||
|
||||
public JsonListMatch(final Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
|
||||
MODE = params.getOrDefault("mode", "percentage");
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final List<String> sa, final List<String> sb, final Config conf) {
|
||||
if (sa.isEmpty() || sb.isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
final Set<String> ca = sa.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
final Set<String> cb = sb.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
|
||||
int incommon = Sets.intersection(ca, cb).size();
|
||||
int simDiff = Sets.symmetricDifference(ca, cb).size();
|
||||
|
||||
if (incommon + simDiff == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
if (MODE.equals("percentage"))
|
||||
return (double) incommon / (incommon + simDiff);
|
||||
else
|
||||
return incommon;
|
||||
|
||||
}
|
||||
|
||||
// converts every json into a comparable string basing on parameters
|
||||
private String toComparableString(String json) {
|
||||
|
||||
StringBuilder st = new StringBuilder(); // to build the string used for comparisons basing on the jpath into
|
||||
// parameters
|
||||
final DocumentContext documentContext = JsonPath
|
||||
.using(Configuration.defaultConfiguration().addOptions(Option.SUPPRESS_EXCEPTIONS))
|
||||
.parse(json);
|
||||
// for each path in the param list
|
||||
for (String key : params.keySet().stream().filter(k -> k.contains("jpath")).collect(Collectors.toList())) {
|
||||
String path = params.get(key);
|
||||
String value = MapDocumentUtil.getJPathString(path, documentContext);
|
||||
if (value == null || value.isEmpty())
|
||||
value = "";
|
||||
st.append(value);
|
||||
st.append("::");
|
||||
}
|
||||
|
||||
st.setLength(st.length() - 2);
|
||||
return st.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("keywordMatch")
|
||||
public class KeywordMatch extends AbstractStringComparator {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
public KeywordMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = toCodes(keywords1, conf.translationMap());
|
||||
Set<String> codes2 = toCodes(keywords2, conf.translationMap());
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1.0; // undefined if one of the two has no keywords
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("level2JaroWinkler")
|
||||
public class Level2JaroWinkler extends AbstractStringComparator {
|
||||
|
||||
public Level2JaroWinkler(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
public Level2JaroWinkler(double w) {
|
||||
super(w, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
protected Level2JaroWinkler(double w, AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("level2JaroWinklerTitle")
|
||||
public class Level2JaroWinklerTitle extends AbstractStringComparator {
|
||||
|
||||
public Level2JaroWinklerTitle(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
public Level2JaroWinklerTitle(final double w) {
|
||||
super(w, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
protected Level2JaroWinklerTitle(final double w, final AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
final String ca = cleanup(a);
|
||||
final String cb = cleanup(b);
|
||||
|
||||
final boolean check = checkNumbers(ca, cb);
|
||||
|
||||
if (check)
|
||||
return 0.5;
|
||||
|
||||
return ssalgo.score(ca, cb);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("level2Levenstein")
|
||||
public class Level2Levenstein extends AbstractStringComparator {
|
||||
|
||||
public Level2Levenstein(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Level2Levenstein());
|
||||
}
|
||||
|
||||
public Level2Levenstein(double w) {
|
||||
super(w, new com.wcohen.ss.Level2Levenstein());
|
||||
}
|
||||
|
||||
protected Level2Levenstein(double w, AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return 1 / Math.pow(Math.abs(d) + 1, 0.1);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("levenstein")
|
||||
public class Levenstein extends AbstractStringComparator {
|
||||
|
||||
public Levenstein(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
public Levenstein(double w) {
|
||||
super(w, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
protected Levenstein(double w, AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return 1 / Math.pow(Math.abs(d) + 1, 0.1);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("levensteinTitle")
|
||||
public class LevensteinTitle extends AbstractStringComparator {
|
||||
|
||||
private static final Log log = LogFactory.getLog(LevensteinTitle.class);
|
||||
|
||||
public LevensteinTitle(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
public LevensteinTitle(final double w) {
|
||||
super(w, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
protected LevensteinTitle(final double w, final AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
final String ca = cleanup(a);
|
||||
final String cb = cleanup(b);
|
||||
|
||||
final boolean check = checkNumbers(ca, cb);
|
||||
|
||||
if (check)
|
||||
return 0.5;
|
||||
|
||||
return normalize(ssalgo.score(ca, cb), ca.length(), cb.length());
|
||||
}
|
||||
|
||||
private double normalize(final double score, final int la, final int lb) {
|
||||
return 1 - (Math.abs(score) / Math.max(la, lb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return 1 / Math.pow(Math.abs(d) + 1, 0.1);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* Compared compare between two titles, ignoring version numbers. Suitable for Software entities.
|
||||
*/
|
||||
@ComparatorClass("levensteinTitleIgnoreVersion")
|
||||
public class LevensteinTitleIgnoreVersion extends AbstractStringComparator {
|
||||
|
||||
public LevensteinTitleIgnoreVersion(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
public LevensteinTitleIgnoreVersion(final double w) {
|
||||
super(w, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
protected LevensteinTitleIgnoreVersion(final double w, final AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = ca.replaceAll("\\d", "").replaceAll(getRomans(ca), "").trim();
|
||||
cb = cb.replaceAll("\\d", "").replaceAll(getRomans(cb), "").trim();
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
return normalize(ssalgo.score(ca, cb), ca.length(), cb.length());
|
||||
}
|
||||
|
||||
private double normalize(final double score, final int la, final int lb) {
|
||||
return 1 - (Math.abs(score) / Math.max(la, lb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return 1 / Math.pow(Math.abs(d) + 1, 0.1);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class Contains match
|
||||
*
|
||||
* @author miconis
|
||||
* */
|
||||
@ComparatorClass("listContainsMatch")
|
||||
public class ListContainsMatch extends AbstractListComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
private boolean CASE_SENSITIVE;
|
||||
private String STRING;
|
||||
private String AGGREGATOR;
|
||||
|
||||
public ListContainsMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
|
||||
// read parameters
|
||||
CASE_SENSITIVE = Boolean.parseBoolean(params.getOrDefault("caseSensitive", "false"));
|
||||
STRING = params.get("string");
|
||||
AGGREGATOR = params.get("bool");
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(List<String> sa, List<String> sb, Config conf) {
|
||||
if (sa.isEmpty() || sb.isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!CASE_SENSITIVE) {
|
||||
sa = sa.stream().map(String::toLowerCase).collect(Collectors.toList());
|
||||
sb = sb.stream().map(String::toLowerCase).collect(Collectors.toList());
|
||||
STRING = STRING.toLowerCase();
|
||||
}
|
||||
|
||||
switch (AGGREGATOR) {
|
||||
case "AND":
|
||||
if (sa.contains(STRING) && sb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
case "OR":
|
||||
if (sa.contains(STRING) || sb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
case "XOR":
|
||||
if (sa.contains(STRING) ^ sb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
default:
|
||||
return 0.0;
|
||||
}
|
||||
return 0.0;
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("mustBeDifferent")
|
||||
public class MustBeDifferent extends AbstractStringComparator {
|
||||
|
||||
public MustBeDifferent(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
public MustBeDifferent(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected MustBeDifferent(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
return !a.equals(b) ? 1.0 : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.Comparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* Not all fields of a document need to partecipate in the compare measure. We model those fields as having a
|
||||
* NullDistanceAlgo.
|
||||
*/
|
||||
@ComparatorClass("null")
|
||||
public class NullDistanceAlgo<T> implements Comparator<T> {
|
||||
|
||||
public NullDistanceAlgo(Map<String, String> params) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(Object a, Object b, Config config) {
|
||||
return 0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("numbersComparator")
|
||||
public class NumbersComparator extends AbstractStringComparator {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
public NumbersComparator(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, Config conf) {
|
||||
|
||||
// extracts numbers from the field
|
||||
String numbers1 = getNumbers(nfd(a));
|
||||
String numbers2 = getNumbers(nfd(b));
|
||||
|
||||
if (numbers1.isEmpty() || numbers2.isEmpty())
|
||||
return -1.0;
|
||||
|
||||
int n1 = Integer.parseInt(numbers1);
|
||||
int n2 = Integer.parseInt(numbers2);
|
||||
|
||||
return Math.abs(n1 - n2);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("numbersMatch")
|
||||
public class NumbersMatch extends AbstractStringComparator {
|
||||
|
||||
public NumbersMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, Config conf) {
|
||||
|
||||
// extracts numbers from the field
|
||||
String numbers1 = getNumbers(nfd(a));
|
||||
String numbers2 = getNumbers(nfd(b));
|
||||
|
||||
if (numbers1.isEmpty() && numbers2.isEmpty())
|
||||
return 1.0;
|
||||
|
||||
if (numbers1.isEmpty() || numbers2.isEmpty())
|
||||
return -1.0;
|
||||
|
||||
if (numbers1.equals(numbers2))
|
||||
return 1.0;
|
||||
|
||||
return 0.0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("romansMatch")
|
||||
public class RomansMatch extends AbstractStringComparator {
|
||||
|
||||
public RomansMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, Config conf) {
|
||||
|
||||
// extracts romans from the field
|
||||
String romans1 = getRomans(nfd(a));
|
||||
String romans2 = getRomans(nfd(b));
|
||||
|
||||
if (romans1.isEmpty() && romans2.isEmpty())
|
||||
return 1.0;
|
||||
|
||||
if (romans1.isEmpty() || romans2.isEmpty())
|
||||
return -1.0;
|
||||
|
||||
if (romans1.equals(romans2))
|
||||
return 1.0;
|
||||
|
||||
return 0.0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* Returns true if the number of values in the fields is the same.
|
||||
*
|
||||
* @author claudio
|
||||
*/
|
||||
@ComparatorClass("sizeMatch")
|
||||
public class SizeMatch extends AbstractListComparator {
|
||||
|
||||
/**
|
||||
* Instantiates a new size match.
|
||||
*
|
||||
* @param params
|
||||
* the parameters
|
||||
*/
|
||||
public SizeMatch(final Map<String, String> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final List<String> a, final List<String> b, final Config conf) {
|
||||
|
||||
if (a.isEmpty() || b.isEmpty())
|
||||
return -1.0;
|
||||
|
||||
return a.size() == b.size() ? 1.0 : 0.0;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.AbstractSortedComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class SortedJaroWinkler.
|
||||
*/
|
||||
@ComparatorClass("sortedJaroWinkler")
|
||||
public class SortedJaroWinkler extends AbstractSortedComparator {
|
||||
|
||||
public SortedJaroWinkler(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new sorted jaro winkler.
|
||||
*
|
||||
* @param weight
|
||||
* the weight
|
||||
*/
|
||||
public SortedJaroWinkler(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new sorted jaro winkler.
|
||||
*
|
||||
* @param weight
|
||||
* the weight
|
||||
* @param ssalgo
|
||||
* the ssalgo
|
||||
*/
|
||||
protected SortedJaroWinkler(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.DistanceAlgo#getWeight()
|
||||
*/
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.SecondStringDistanceAlgo#normalize(double)
|
||||
*/
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.tree.support.AbstractSortedComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class SortedJaroWinkler.
|
||||
*/
|
||||
@ComparatorClass("sortedLevel2JaroWinkler")
|
||||
public class SortedLevel2JaroWinkler extends AbstractSortedComparator {
|
||||
|
||||
/**
|
||||
* Instantiates a new sorted jaro winkler.
|
||||
*
|
||||
* @param weight
|
||||
* the weight
|
||||
*/
|
||||
public SortedLevel2JaroWinkler(final double weight) {
|
||||
super(weight, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
public SortedLevel2JaroWinkler(final Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Level2JaroWinkler());
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new sorted jaro winkler.
|
||||
*
|
||||
* @param weight
|
||||
* the weight
|
||||
* @param ssalgo
|
||||
* the ssalgo
|
||||
*/
|
||||
protected SortedLevel2JaroWinkler(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.DistanceAlgo#getWeight()
|
||||
*/
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.SecondStringDistanceAlgo#normalize(double)
|
||||
*/
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class Contains match
|
||||
*
|
||||
* @author miconis
|
||||
* */
|
||||
@ComparatorClass("stringContainsMatch")
|
||||
public class StringContainsMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
private boolean CASE_SENSITIVE;
|
||||
private String STRING;
|
||||
private String AGGREGATOR;
|
||||
|
||||
public StringContainsMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
|
||||
// read parameters
|
||||
CASE_SENSITIVE = Boolean.parseBoolean(params.getOrDefault("caseSensitive", "false"));
|
||||
STRING = params.get("string");
|
||||
AGGREGATOR = params.get("aggregator");
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = a;
|
||||
String cb = b;
|
||||
if (!CASE_SENSITIVE) {
|
||||
ca = a.toLowerCase();
|
||||
cb = b.toLowerCase();
|
||||
STRING = STRING.toLowerCase();
|
||||
}
|
||||
|
||||
if (AGGREGATOR != null) {
|
||||
switch (AGGREGATOR) {
|
||||
case "AND":
|
||||
if (ca.contains(STRING) && cb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
case "OR":
|
||||
if (ca.contains(STRING) || cb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
case "XOR":
|
||||
if (ca.contains(STRING) ^ cb.contains(STRING))
|
||||
return 1.0;
|
||||
break;
|
||||
default:
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0.0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractListComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("stringListMatch")
|
||||
public class StringListMatch extends AbstractListComparator {
|
||||
|
||||
private static final Log log = LogFactory.getLog(StringListMatch.class);
|
||||
private Map<String, String> params;
|
||||
|
||||
final private String TYPE; // percentage or count
|
||||
|
||||
public StringListMatch(final Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
|
||||
TYPE = params.getOrDefault("type", "percentage");
|
||||
}
|
||||
|
||||
@Override
|
||||
public double compare(final List<String> a, final List<String> b, final Config conf) {
|
||||
|
||||
final Set<String> pa = new HashSet<>(a);
|
||||
final Set<String> pb = new HashSet<>(b);
|
||||
|
||||
if (pa.isEmpty() || pb.isEmpty()) {
|
||||
return -1; // return undefined if one of the two lists is empty
|
||||
}
|
||||
|
||||
int incommon = Sets.intersection(pa, pb).size();
|
||||
int simDiff = Sets.symmetricDifference(pa, pb).size();
|
||||
|
||||
if (incommon + simDiff == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
if (TYPE.equals("percentage"))
|
||||
return (double) incommon / (incommon + simDiff);
|
||||
else
|
||||
return incommon;
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
/**
|
||||
* The Class SubStringLevenstein.
|
||||
*/
|
||||
@ComparatorClass("subStringLevenstein")
|
||||
public class SubStringLevenstein extends AbstractStringComparator {
|
||||
|
||||
/**
|
||||
* The limit.
|
||||
*/
|
||||
protected int limit;
|
||||
|
||||
/**
|
||||
* Instantiates a new sub string levenstein.
|
||||
*
|
||||
* @param w the w
|
||||
*/
|
||||
public SubStringLevenstein(final double w) {
|
||||
super(w, new com.wcohen.ss.Levenstein());
|
||||
}
|
||||
|
||||
public SubStringLevenstein(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.Levenstein());
|
||||
this.limit = Integer.parseInt(params.getOrDefault("limit", "1"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new sub string levenstein.
|
||||
*
|
||||
* @param w the w
|
||||
* @param limit the limit
|
||||
*/
|
||||
public SubStringLevenstein(final double w, final int limit) {
|
||||
super(w, new com.wcohen.ss.Levenstein());
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new sub string levenstein.
|
||||
*
|
||||
* @param w the w
|
||||
* @param limit the limit
|
||||
* @param ssalgo the ssalgo
|
||||
*/
|
||||
protected SubStringLevenstein(final double w, final int limit, final AbstractStringDistance ssalgo) {
|
||||
super(w, ssalgo);
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.SecondStringDistanceAlgo#compare(eu.dnetlib.pace.model.Field,
|
||||
* eu.dnetlib.pace.model.Field)
|
||||
*/
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
return distance(StringUtils.left(a, limit), StringUtils.left(b, limit), conf);
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.DistanceAlgo#getWeight()
|
||||
*/
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see eu.dnetlib.pace.compare.SecondStringDistanceAlgo#normalize(double)
|
||||
*/
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return 1 / Math.pow(Math.abs(d) + 1, 0.1);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue