diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml
index 6df11f4ea2..6198bd81ee 100644
--- a/dhp-common/pom.xml
+++ b/dhp-common/pom.xml
@@ -52,6 +52,8 @@
+ true
+ ${scala.binary.version}
${scala.version}
@@ -60,6 +62,11 @@
+
+ eu.dnetlib.dhp
+ dhp-pace-core
+ ${project.version}
+
org.apache.hadoop
@@ -76,11 +83,11 @@
org.apache.spark
- spark-core_2.11
+ spark-core_${scala.binary.version}
org.apache.spark
- spark-sql_2.11
+ spark-sql_${scala.binary.version}
@@ -142,11 +149,6 @@
okhttp
-
- eu.dnetlib
- dnet-pace-core
-
-
org.apache.httpcomponents
httpclient
@@ -159,7 +161,7 @@
eu.dnetlib.dhp
- dhp-schemas
+ ${dhp-schemas.artifact}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/Constants.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/Constants.java
index a62a0ac799..4f2c6341eb 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/Constants.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/Constants.java
@@ -10,6 +10,12 @@ public class Constants {
public static final Map accessRightsCoarMap = Maps.newHashMap();
public static final Map coarCodeLabelMap = Maps.newHashMap();
+ public static final String ROR_NS_PREFIX = "ror_________";
+
+ public static final String ROR_OPENAIRE_ID = "10|openaire____::993a7ae7a863813cf95028b50708e222";
+
+ public static final String ROR_DATASOURCE_NAME = "Research Organization Registry (ROR)";
+
public static String COAR_ACCESS_RIGHT_SCHEMA = "http://vocabularies.coar-repositories.org/documentation/access_rights/";
private Constants() {
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/MDStoreInfo.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MDStoreInfo.java
new file mode 100644
index 0000000000..bd1ccca50d
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MDStoreInfo.java
@@ -0,0 +1,100 @@
+
+package eu.dnetlib.dhp.common;
+
+/**
+ * This utility represent the Metadata Store information
+ * needed during the migration from mongo to HDFS to store
+ */
+public class MDStoreInfo {
+ private String mdstore;
+ private String currentId;
+ private Long latestTimestamp;
+
+ /**
+ * Instantiates a new Md store info.
+ */
+ public MDStoreInfo() {
+ }
+
+ /**
+ * Instantiates a new Md store info.
+ *
+ * @param mdstore the mdstore
+ * @param currentId the current id
+ * @param latestTimestamp the latest timestamp
+ */
+ public MDStoreInfo(String mdstore, String currentId, Long latestTimestamp) {
+ this.mdstore = mdstore;
+ this.currentId = currentId;
+ this.latestTimestamp = latestTimestamp;
+ }
+
+ /**
+ * Gets mdstore.
+ *
+ * @return the mdstore
+ */
+ public String getMdstore() {
+ return mdstore;
+ }
+
+ /**
+ * Sets mdstore.
+ *
+ * @param mdstore the mdstore
+ * @return the mdstore
+ */
+ public MDStoreInfo setMdstore(String mdstore) {
+ this.mdstore = mdstore;
+ return this;
+ }
+
+ /**
+ * Gets current id.
+ *
+ * @return the current id
+ */
+ public String getCurrentId() {
+ return currentId;
+ }
+
+ /**
+ * Sets current id.
+ *
+ * @param currentId the current id
+ * @return the current id
+ */
+ public MDStoreInfo setCurrentId(String currentId) {
+ this.currentId = currentId;
+ return this;
+ }
+
+ /**
+ * Gets latest timestamp.
+ *
+ * @return the latest timestamp
+ */
+ public Long getLatestTimestamp() {
+ return latestTimestamp;
+ }
+
+ /**
+ * Sets latest timestamp.
+ *
+ * @param latestTimestamp the latest timestamp
+ * @return the latest timestamp
+ */
+ public MDStoreInfo setLatestTimestamp(Long latestTimestamp) {
+ this.latestTimestamp = latestTimestamp;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "MDStoreInfo{" +
+ "mdstore='" + mdstore + '\'' +
+ ", currentId='" + currentId + '\'' +
+ ", latestTimestamp=" + latestTimestamp +
+ '}';
+ }
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/MakeTarArchive.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MakeTarArchive.java
index 43e61e5fca..eca433e9e7 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/MakeTarArchive.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MakeTarArchive.java
@@ -45,28 +45,17 @@ public class MakeTarArchive implements Serializable {
.map(Integer::valueOf)
.orElse(10);
- final boolean rename = Optional
- .ofNullable(parser.get("rename"))
- .map(Boolean::valueOf)
- .orElse(Boolean.FALSE);
-
Configuration conf = new Configuration();
conf.set("fs.defaultFS", hdfsNameNode);
FileSystem fileSystem = FileSystem.get(conf);
- makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit, rename);
+ makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit);
}
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath, int gBperSplit)
throws IOException {
- makeTArArchive(fileSystem, inputPath, outputPath, gBperSplit, false);
- }
-
- public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath, int gBperSplit,
- boolean rename)
- throws IOException {
RemoteIterator dirIterator = fileSystem.listLocatedStatus(new Path(inputPath));
@@ -77,7 +66,7 @@ public class MakeTarArchive implements Serializable {
String pathString = p.toString();
String entity = pathString.substring(pathString.lastIndexOf("/") + 1);
- MakeTarArchive.tarMaxSize(fileSystem, pathString, outputPath + "/" + entity, entity, gBperSplit, rename);
+ MakeTarArchive.tarMaxSize(fileSystem, pathString, outputPath + "/" + entity, entity, gBperSplit);
}
}
@@ -90,8 +79,7 @@ public class MakeTarArchive implements Serializable {
return new TarArchiveOutputStream(fileSystem.create(hdfsWritePath).getWrappedStream());
}
- private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dirName,
- boolean rename)
+ private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dirName)
throws IOException {
Path hdfsWritePath = new Path(outputPath);
@@ -107,20 +95,20 @@ public class MakeTarArchive implements Serializable {
new Path(inputPath), true);
while (iterator.hasNext()) {
- writeCurrentFile(fileSystem, dirName, iterator, ar, 0, rename);
+ writeCurrentFile(fileSystem, dirName, iterator, ar, 0);
}
}
}
public static void tarMaxSize(FileSystem fileSystem, String inputPath, String outputPath, String dir_name,
- int gBperSplit, boolean rename) throws IOException {
+ int gBperSplit) throws IOException {
final long bytesPerSplit = 1024L * 1024L * 1024L * gBperSplit;
long sourceSize = fileSystem.getContentSummary(new Path(inputPath)).getSpaceConsumed();
if (sourceSize < bytesPerSplit) {
- write(fileSystem, inputPath, outputPath + ".tar", dir_name, rename);
+ write(fileSystem, inputPath, outputPath + ".tar", dir_name);
} else {
int partNum = 0;
@@ -133,8 +121,7 @@ public class MakeTarArchive implements Serializable {
long currentSize = 0;
while (next && currentSize < bytesPerSplit) {
- currentSize = writeCurrentFile(
- fileSystem, dir_name, fileStatusListIterator, ar, currentSize, rename);
+ currentSize = writeCurrentFile(fileSystem, dir_name, fileStatusListIterator, ar, currentSize);
next = fileStatusListIterator.hasNext();
}
@@ -147,7 +134,7 @@ public class MakeTarArchive implements Serializable {
private static long writeCurrentFile(FileSystem fileSystem, String dirName,
RemoteIterator fileStatusListIterator,
- TarArchiveOutputStream ar, long currentSize, boolean rename) throws IOException {
+ TarArchiveOutputStream ar, long currentSize) throws IOException {
LocatedFileStatus fileStatus = fileStatusListIterator.next();
Path p = fileStatus.getPath();
@@ -161,11 +148,6 @@ public class MakeTarArchive implements Serializable {
}
name = tmp;
}
- if (rename) {
- if (name.endsWith(".txt.gz"))
- name = name.replace(".txt.gz", ".json.gz");
- }
-
TarArchiveEntry entry = new TarArchiveEntry(dirName + "/" + name);
entry.setSize(fileStatus.getLen());
currentSize += fileStatus.getLen();
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/MdstoreClient.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MdstoreClient.java
index d06544ae16..34aa37be5b 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/MdstoreClient.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/MdstoreClient.java
@@ -1,12 +1,12 @@
package eu.dnetlib.dhp.common;
+import static com.mongodb.client.model.Sorts.descending;
+
import java.io.Closeable;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
+import java.util.*;
+import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.commons.lang3.StringUtils;
@@ -38,6 +38,26 @@ public class MdstoreClient implements Closeable {
this.db = getDb(client, dbName);
}
+ private Long parseTimestamp(Document f) {
+ if (f == null || !f.containsKey("timestamp"))
+ return null;
+
+ Object ts = f.get("timestamp");
+
+ return Long.parseLong(ts.toString());
+ }
+
+ public Long getLatestTimestamp(final String collectionId) {
+ MongoCollection collection = db.getCollection(collectionId);
+ FindIterable result = collection.find().sort(descending("timestamp")).limit(1);
+ if (result == null) {
+ return null;
+ }
+
+ Document f = result.first();
+ return parseTimestamp(f);
+ }
+
public MongoCollection mdStore(final String mdId) {
BasicDBObject query = (BasicDBObject) QueryBuilder.start("mdId").is(mdId).get();
@@ -54,6 +74,16 @@ public class MdstoreClient implements Closeable {
return getColl(db, currentId, true);
}
+ public List mdStoreWithTimestamp(final String mdFormat, final String mdLayout,
+ final String mdInterpretation) {
+ Map res = validCollections(mdFormat, mdLayout, mdInterpretation);
+ return res
+ .entrySet()
+ .stream()
+ .map(e -> new MDStoreInfo(e.getKey(), e.getValue(), getLatestTimestamp(e.getValue())))
+ .collect(Collectors.toList());
+ }
+
public Map validCollections(
final String mdFormat, final String mdLayout, final String mdInterpretation) {
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/api/ZenodoAPIClient.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/api/ZenodoAPIClient.java
index 2aeccfcf23..544da78f53 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/api/ZenodoAPIClient.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/api/ZenodoAPIClient.java
@@ -9,13 +9,13 @@ import java.util.concurrent.TimeUnit;
import org.apache.http.HttpHeaders;
import org.apache.http.entity.ContentType;
+import org.jetbrains.annotations.NotNull;
import com.google.gson.Gson;
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModel;
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModelList;
import okhttp3.*;
-import org.jetbrains.annotations.NotNull;
public class ZenodoAPIClient implements Serializable {
@@ -80,7 +80,7 @@ public class ZenodoAPIClient implements Serializable {
int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + body);
ZenodoModel newSubmission = new Gson().fromJson(body, ZenodoModel.class);
@@ -115,7 +115,7 @@ public class ZenodoAPIClient implements Serializable {
}
int responseCode = conn.getResponseCode();
- if(! checkOKStatus(responseCode)){
+ if (!checkOKStatus(responseCode)) {
throw new IOException("Unexpected code " + responseCode + getBody(conn));
}
@@ -126,7 +126,7 @@ public class ZenodoAPIClient implements Serializable {
private String getBody(HttpURLConnection conn) throws IOException {
String body = "{}";
try (BufferedReader br = new BufferedReader(
- new InputStreamReader(conn.getInputStream(), "utf-8"))) {
+ new InputStreamReader(conn.getInputStream(), "utf-8"))) {
StringBuilder response = new StringBuilder();
String responseLine = null;
while ((responseLine = br.readLine()) != null) {
@@ -155,7 +155,6 @@ public class ZenodoAPIClient implements Serializable {
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
-
try (OutputStream os = conn.getOutputStream()) {
byte[] input = metadata.getBytes("utf-8");
os.write(input, 0, input.length);
@@ -164,19 +163,18 @@ public class ZenodoAPIClient implements Serializable {
final int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + getBody(conn));
return responseCode;
-
}
- private boolean checkOKStatus(int responseCode) {
+ private boolean checkOKStatus(int responseCode) {
- if(HttpURLConnection.HTTP_OK != responseCode ||
- HttpURLConnection.HTTP_CREATED != responseCode)
- return true ;
+ if (HttpURLConnection.HTTP_OK != responseCode ||
+ HttpURLConnection.HTTP_CREATED != responseCode)
+ return true;
return false;
}
@@ -233,7 +231,6 @@ public class ZenodoAPIClient implements Serializable {
conn.setDoOutput(true);
conn.setRequestMethod("POST");
-
try (OutputStream os = conn.getOutputStream()) {
byte[] input = json.getBytes("utf-8");
os.write(input, 0, input.length);
@@ -245,7 +242,7 @@ public class ZenodoAPIClient implements Serializable {
int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + body);
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
@@ -290,13 +287,12 @@ public class ZenodoAPIClient implements Serializable {
int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + body);
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
bucket = zenodoModel.getLinks().getBucket();
-
return responseCode;
}
@@ -331,22 +327,16 @@ public class ZenodoAPIClient implements Serializable {
conn.setDoOutput(true);
conn.setRequestMethod("GET");
-
-
String body = getBody(conn);
int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + body);
-
-
return body;
-
-
}
private String getBucket(String inputUurl) throws IOException {
@@ -363,15 +353,13 @@ public class ZenodoAPIClient implements Serializable {
int responseCode = conn.getResponseCode();
conn.disconnect();
- if(!checkOKStatus(responseCode))
+ if (!checkOKStatus(responseCode))
throw new IOException("Unexpected code " + responseCode + body);
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
return zenodoModel.getLinks().getBucket();
-
-
}
}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/Vocabulary.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/Vocabulary.java
index b3eb98d4fa..2ab23bda6f 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/Vocabulary.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/Vocabulary.java
@@ -4,6 +4,7 @@ package eu.dnetlib.dhp.common.vocabulary;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import org.apache.commons.lang3.StringUtils;
@@ -66,21 +67,39 @@ public class Vocabulary implements Serializable {
}
public Qualifier getTermAsQualifier(final String termId) {
- if (StringUtils.isBlank(termId)) {
+ return getTermAsQualifier(termId, false);
+ }
+
+ public Qualifier getTermAsQualifier(final String termId, boolean strict) {
+ final VocabularyTerm term = getTerm(termId);
+ if (Objects.nonNull(term)) {
+ return OafMapperUtils.qualifier(term.getId(), term.getName(), getId(), getName());
+ } else if (Objects.isNull(term) && strict) {
return OafMapperUtils.unknown(getId(), getName());
- } else if (termExists(termId)) {
- final VocabularyTerm t = getTerm(termId);
- return OafMapperUtils.qualifier(t.getId(), t.getName(), getId(), getName());
} else {
return OafMapperUtils.qualifier(termId, termId, getId(), getName());
}
}
public Qualifier getSynonymAsQualifier(final String syn) {
+ return getSynonymAsQualifier(syn, false);
+ }
+
+ public Qualifier getSynonymAsQualifier(final String syn, boolean strict) {
return Optional
.ofNullable(getTermBySynonym(syn))
- .map(term -> getTermAsQualifier(term.getId()))
+ .map(term -> getTermAsQualifier(term.getId(), strict))
.orElse(null);
}
+ public Qualifier lookup(String id) {
+ return lookup(id, false);
+ }
+
+ public Qualifier lookup(String id, boolean strict) {
+ return Optional
+ .ofNullable(getSynonymAsQualifier(id, strict))
+ .orElse(getTermAsQualifier(id, strict));
+ }
+
}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/VocabularyGroup.java b/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/VocabularyGroup.java
index 1c129ff9ce..fc71752706 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/VocabularyGroup.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/common/vocabulary/VocabularyGroup.java
@@ -81,6 +81,13 @@ public class VocabularyGroup implements Serializable {
vocs.put(id.toLowerCase(), new Vocabulary(id, name));
}
+ public Optional find(final String vocId) {
+ return Optional
+ .ofNullable(vocId)
+ .map(String::toLowerCase)
+ .map(vocs::get);
+ }
+
public void addTerm(final String vocId, final String id, final String name) {
if (vocabularyExists(vocId)) {
vocs.get(vocId.toLowerCase()).addTerm(id, name);
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/oa/merge/DispatchEntitiesSparkJob.java b/dhp-common/src/main/java/eu/dnetlib/dhp/oa/merge/DispatchEntitiesSparkJob.java
index 3f65d754fc..cf0a183d72 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/oa/merge/DispatchEntitiesSparkJob.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/oa/merge/DispatchEntitiesSparkJob.java
@@ -11,25 +11,18 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.api.java.function.MapFunction;
-import org.apache.spark.sql.Encoders;
-import org.apache.spark.sql.SaveMode;
-import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.databind.ObjectMapper;
-
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport;
-import eu.dnetlib.dhp.schema.oaf.Oaf;
-import eu.dnetlib.dhp.schema.oaf.OafEntity;
+import eu.dnetlib.dhp.schema.common.ModelSupport;
public class DispatchEntitiesSparkJob {
private static final Logger log = LoggerFactory.getLogger(DispatchEntitiesSparkJob.class);
- private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
-
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
@@ -54,44 +47,51 @@ public class DispatchEntitiesSparkJob {
String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath);
- String graphTableClassName = parser.get("graphTableClassName");
- log.info("graphTableClassName: {}", graphTableClassName);
-
- @SuppressWarnings("unchecked")
- Class extends OafEntity> entityClazz = (Class extends OafEntity>) Class.forName(graphTableClassName);
+ boolean filterInvisible = Boolean.parseBoolean(parser.get("filterInvisible"));
+ log.info("filterInvisible: {}", filterInvisible);
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
- spark -> {
- HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
- dispatchEntities(spark, inputPath, entityClazz, outputPath);
- });
+ spark -> dispatchEntities(spark, inputPath, outputPath, filterInvisible));
}
- private static void dispatchEntities(
+ private static void dispatchEntities(
SparkSession spark,
String inputPath,
- Class clazz,
- String outputPath) {
+ String outputPath,
+ boolean filterInvisible) {
- spark
- .read()
- .textFile(inputPath)
- .filter((FilterFunction) s -> isEntityType(s, clazz))
- .map((MapFunction) s -> StringUtils.substringAfter(s, "|"), Encoders.STRING())
- .map(
- (MapFunction) value -> OBJECT_MAPPER.readValue(value, clazz),
- Encoders.bean(clazz))
- .write()
- .mode(SaveMode.Overwrite)
- .option("compression", "gzip")
- .json(outputPath);
+ Dataset df = spark.read().textFile(inputPath);
+
+ ModelSupport.oafTypes.entrySet().parallelStream().forEach(entry -> {
+ String entityType = entry.getKey();
+ Class> clazz = entry.getValue();
+
+ final String entityPath = outputPath + "/" + entityType;
+ if (!entityType.equalsIgnoreCase("relation")) {
+ HdfsSupport.remove(entityPath, spark.sparkContext().hadoopConfiguration());
+ Dataset entityDF = spark
+ .read()
+ .schema(Encoders.bean(clazz).schema())
+ .json(
+ df
+ .filter((FilterFunction) s -> s.startsWith(clazz.getName()))
+ .map(
+ (MapFunction) s -> StringUtils.substringAfter(s, "|"),
+ Encoders.STRING()));
+
+ if (filterInvisible) {
+ entityDF = entityDF.filter("dataInfo.invisible != true");
+ }
+
+ entityDF
+ .write()
+ .mode(SaveMode.Overwrite)
+ .option("compression", "gzip")
+ .json(entityPath);
+ }
+ });
}
-
- private static boolean isEntityType(final String s, final Class clazz) {
- return StringUtils.substringBefore(s, "|").equals(clazz.getName());
- }
-
}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/DoiCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/DoiCleaningRule.java
new file mode 100644
index 0000000000..1a74826851
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/DoiCleaningRule.java
@@ -0,0 +1,14 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+public class DoiCleaningRule {
+
+ public static String clean(final String doi) {
+ return doi
+ .toLowerCase()
+ .replaceAll("\\s", "")
+ .replaceAll("^doi:", "")
+ .replaceFirst(CleaningFunctions.DOI_PREFIX_REGEX, CleaningFunctions.DOI_PREFIX);
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/FundRefCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/FundRefCleaningRule.java
new file mode 100644
index 0000000000..a267b8b88e
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/FundRefCleaningRule.java
@@ -0,0 +1,25 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class FundRefCleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("\\d+");
+
+ public static String clean(final String fundRefId) {
+
+ String s = fundRefId
+ .toLowerCase()
+ .replaceAll("\\s", "");
+
+ Matcher m = PATTERN.matcher(s);
+ if (m.find()) {
+ return m.group();
+ } else {
+ return "";
+ }
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GraphCleaningFunctions.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GraphCleaningFunctions.java
index 351bd2dd5f..8afa41f952 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GraphCleaningFunctions.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GraphCleaningFunctions.java
@@ -1,6 +1,8 @@
package eu.dnetlib.dhp.schema.oaf.utils;
+import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
+
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
@@ -16,7 +18,6 @@ import com.github.sisyphsu.dateparser.DateParserUtils;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import eu.dnetlib.dhp.common.vocabulary.Vocabulary;
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.common.ModelSupport;
@@ -34,61 +35,197 @@ public class GraphCleaningFunctions extends CleaningFunctions {
public static final String TITLE_FILTER_REGEX = String.format("(%s)|\\W|\\d", TITLE_TEST);
public static final int TITLE_FILTER_RESIDUAL_LENGTH = 5;
+ private static final String NAME_CLEANING_REGEX = "[\\r\\n\\t\\s]+";
+
+ public static T cleanContext(T value, String contextId, String verifyParam) {
+ if (ModelSupport.isSubClass(value, Result.class)) {
+ final Result res = (Result) value;
+ if (shouldCleanContext(res, verifyParam)) {
+ res
+ .setContext(
+ res
+ .getContext()
+ .stream()
+ .filter(c -> !StringUtils.startsWith(c.getId().toLowerCase(), contextId))
+ .collect(Collectors.toList()));
+ }
+ return (T) res;
+ } else {
+ return value;
+ }
+ }
+
+ private static boolean shouldCleanContext(Result res, String verifyParam) {
+ boolean titleMatch = res
+ .getTitle()
+ .stream()
+ .filter(
+ t -> t
+ .getQualifier()
+ .getClassid()
+ .equalsIgnoreCase(ModelConstants.MAIN_TITLE_QUALIFIER.getClassid()))
+ .anyMatch(t -> t.getValue().toLowerCase().startsWith(verifyParam.toLowerCase()));
+
+ return titleMatch && Objects.nonNull(res.getContext());
+ }
+
+ public static T cleanCountry(T value, String[] verifyParam, Set hostedBy,
+ String collectedfrom, String country) {
+ if (ModelSupport.isSubClass(value, Result.class)) {
+ final Result res = (Result) value;
+ if (res.getInstance().stream().anyMatch(i -> hostedBy.contains(i.getHostedby().getKey())) ||
+ !res.getCollectedfrom().stream().anyMatch(cf -> cf.getValue().equals(collectedfrom))) {
+ return (T) res;
+ }
+
+ List ids = getPidsAndAltIds(res).collect(Collectors.toList());
+ if (ids
+ .stream()
+ .anyMatch(
+ p -> p
+ .getQualifier()
+ .getClassid()
+ .equals(PidType.doi.toString()) && pidInParam(p.getValue(), verifyParam))) {
+ res
+ .setCountry(
+ res
+ .getCountry()
+ .stream()
+ .filter(
+ c -> toTakeCountry(c, country))
+ .collect(Collectors.toList()));
+ }
+
+ return (T) res;
+ } else {
+ return value;
+ }
+ }
+
+ private static Stream getPidsAndAltIds(T r) {
+ final Stream resultPids = Optional
+ .ofNullable(r.getPid())
+ .map(Collection::stream)
+ .orElse(Stream.empty());
+
+ final Stream instancePids = Optional
+ .ofNullable(r.getInstance())
+ .map(
+ instance -> instance
+ .stream()
+ .flatMap(
+ i -> Optional
+ .ofNullable(i.getPid())
+ .map(Collection::stream)
+ .orElse(Stream.empty())))
+ .orElse(Stream.empty());
+
+ final Stream instanceAltIds = Optional
+ .ofNullable(r.getInstance())
+ .map(
+ instance -> instance
+ .stream()
+ .flatMap(
+ i -> Optional
+ .ofNullable(i.getAlternateIdentifier())
+ .map(Collection::stream)
+ .orElse(Stream.empty())))
+ .orElse(Stream.empty());
+
+ return Stream
+ .concat(
+ Stream.concat(resultPids, instancePids),
+ instanceAltIds);
+ }
+
+ private static boolean pidInParam(String value, String[] verifyParam) {
+ for (String s : verifyParam)
+ if (value.startsWith(s))
+ return true;
+ return false;
+ }
+
+ private static boolean toTakeCountry(Country c, String country) {
+ // If dataInfo is not set, or dataInfo.inferenceprovenance is not set or not present then it cannot be
+ // inserted via propagation
+ if (!Optional.ofNullable(c.getDataInfo()).isPresent())
+ return true;
+ if (!Optional.ofNullable(c.getDataInfo().getInferenceprovenance()).isPresent())
+ return true;
+ return !(c
+ .getClassid()
+ .equalsIgnoreCase(country) &&
+ c.getDataInfo().getInferenceprovenance().equals("propagation"));
+ }
public static T fixVocabularyNames(T value) {
- if (value instanceof Datasource) {
- // nothing to clean here
- } else if (value instanceof Project) {
- // nothing to clean here
- } else if (value instanceof Organization) {
- Organization o = (Organization) value;
- if (Objects.nonNull(o.getCountry())) {
- fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
+ if (value instanceof OafEntity) {
+
+ OafEntity e = (OafEntity) value;
+
+ Optional
+ .ofNullable(e.getPid())
+ .ifPresent(pid -> pid.forEach(p -> fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES)));
+
+ if (value instanceof Result) {
+ Result r = (Result) value;
+
+ fixVocabName(r.getLanguage(), ModelConstants.DNET_LANGUAGES);
+ fixVocabName(r.getResourcetype(), ModelConstants.DNET_DATA_CITE_RESOURCE);
+ fixVocabName(r.getBestaccessright(), ModelConstants.DNET_ACCESS_MODES);
+
+ if (Objects.nonNull(r.getSubject())) {
+ r.getSubject().forEach(s -> fixVocabName(s.getQualifier(), ModelConstants.DNET_SUBJECT_TYPOLOGIES));
+ }
+ if (Objects.nonNull(r.getInstance())) {
+ for (Instance i : r.getInstance()) {
+ fixVocabName(i.getAccessright(), ModelConstants.DNET_ACCESS_MODES);
+ fixVocabName(i.getRefereed(), ModelConstants.DNET_REVIEW_LEVELS);
+ Optional
+ .ofNullable(i.getPid())
+ .ifPresent(
+ pid -> pid.forEach(p -> fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES)));
+
+ }
+ }
+ if (Objects.nonNull(r.getAuthor())) {
+ r.getAuthor().stream().filter(Objects::nonNull).forEach(a -> {
+ if (Objects.nonNull(a.getPid())) {
+ a.getPid().stream().filter(Objects::nonNull).forEach(p -> {
+ fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES);
+ });
+ }
+ });
+ }
+ if (value instanceof Publication) {
+
+ } else if (value instanceof Dataset) {
+
+ } else if (value instanceof OtherResearchProduct) {
+
+ } else if (value instanceof Software) {
+
+ }
+ } else if (value instanceof Datasource) {
+ // nothing to clean here
+ } else if (value instanceof Project) {
+ // nothing to clean here
+ } else if (value instanceof Organization) {
+ Organization o = (Organization) value;
+ if (Objects.nonNull(o.getCountry())) {
+ fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
+ }
+
}
} else if (value instanceof Relation) {
// nothing to clean here
- } else if (value instanceof Result) {
-
- Result r = (Result) value;
-
- fixVocabName(r.getLanguage(), ModelConstants.DNET_LANGUAGES);
- fixVocabName(r.getResourcetype(), ModelConstants.DNET_DATA_CITE_RESOURCE);
- fixVocabName(r.getBestaccessright(), ModelConstants.DNET_ACCESS_MODES);
-
- if (Objects.nonNull(r.getSubject())) {
- r.getSubject().forEach(s -> fixVocabName(s.getQualifier(), ModelConstants.DNET_SUBJECT_TYPOLOGIES));
- }
- if (Objects.nonNull(r.getInstance())) {
- for (Instance i : r.getInstance()) {
- fixVocabName(i.getAccessright(), ModelConstants.DNET_ACCESS_MODES);
- fixVocabName(i.getRefereed(), ModelConstants.DNET_REVIEW_LEVELS);
- }
- }
- if (Objects.nonNull(r.getAuthor())) {
- r.getAuthor().stream().filter(Objects::nonNull).forEach(a -> {
- if (Objects.nonNull(a.getPid())) {
- a.getPid().stream().filter(Objects::nonNull).forEach(p -> {
- fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES);
- });
- }
- });
- }
- if (value instanceof Publication) {
-
- } else if (value instanceof Dataset) {
-
- } else if (value instanceof OtherResearchProduct) {
-
- } else if (value instanceof Software) {
-
- }
}
return value;
}
public static boolean filter(T value) {
- if (Boolean.TRUE
+ if (!(value instanceof Relation) && (Boolean.TRUE
.equals(
Optional
.ofNullable(value)
@@ -99,15 +236,16 @@ public class GraphCleaningFunctions extends CleaningFunctions {
d -> Optional
.ofNullable(d.getInvisible())
.orElse(true))
- .orElse(true))
- .orElse(true))) {
+ .orElse(false))
+ .orElse(true)))) {
return true;
}
if (value instanceof Datasource) {
// nothing to evaluate here
} else if (value instanceof Project) {
- // nothing to evaluate here
+ final Project p = (Project) value;
+ return Objects.nonNull(p.getCode()) && StringUtils.isNotBlank(p.getCode().getValue());
} else if (value instanceof Organization) {
// nothing to evaluate here
} else if (value instanceof Relation) {
@@ -134,15 +272,343 @@ public class GraphCleaningFunctions extends CleaningFunctions {
}
public static T cleanup(T value, VocabularyGroup vocs) {
- if (value instanceof Datasource) {
- // nothing to clean here
- } else if (value instanceof Project) {
- // nothing to clean here
- } else if (value instanceof Organization) {
- Organization o = (Organization) value;
- if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
- o.setCountry(ModelConstants.UNKNOWN_COUNTRY);
+
+ if (value instanceof OafEntity) {
+
+ OafEntity e = (OafEntity) value;
+ if (Objects.nonNull(e.getPid())) {
+ e.setPid(processPidCleaning(e.getPid()));
}
+
+ if (value instanceof Datasource) {
+ // nothing to clean here
+ } else if (value instanceof Project) {
+ // nothing to clean here
+ } else if (value instanceof Organization) {
+ Organization o = (Organization) value;
+ if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
+ o.setCountry(ModelConstants.UNKNOWN_COUNTRY);
+ }
+ } else if (value instanceof Result) {
+ Result r = (Result) value;
+
+ if (Objects.nonNull(r.getFulltext())
+ && (ModelConstants.SOFTWARE_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()) ||
+ ModelConstants.DATASET_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()))) {
+ r.setFulltext(null);
+
+ }
+
+ if (Objects.nonNull(r.getDateofacceptance())) {
+ Optional date = cleanDateField(r.getDateofacceptance());
+ if (date.isPresent()) {
+ r.getDateofacceptance().setValue(date.get());
+ } else {
+ r.setDateofacceptance(null);
+ }
+ }
+ if (Objects.nonNull(r.getRelevantdate())) {
+ r
+ .setRelevantdate(
+ r
+ .getRelevantdate()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(sp -> Objects.nonNull(sp.getQualifier()))
+ .filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
+ .map(sp -> {
+ sp.setValue(GraphCleaningFunctions.cleanDate(sp.getValue()));
+ return sp;
+ })
+ .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
+ .collect(Collectors.toList()));
+ }
+ if (Objects.nonNull(r.getPublisher())) {
+ if (StringUtils.isBlank(r.getPublisher().getValue())) {
+ r.setPublisher(null);
+ } else {
+ r
+ .getPublisher()
+ .setValue(
+ r
+ .getPublisher()
+ .getValue()
+ .replaceAll(NAME_CLEANING_REGEX, " "));
+ }
+ }
+ if (Objects.isNull(r.getLanguage()) || StringUtils.isBlank(r.getLanguage().getClassid())) {
+ r
+ .setLanguage(
+ qualifier("und", "Undetermined", ModelConstants.DNET_LANGUAGES));
+ }
+ if (Objects.nonNull(r.getSubject())) {
+ List subjects = Lists
+ .newArrayList(
+ r
+ .getSubject()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
+ .filter(sp -> Objects.nonNull(sp.getQualifier()))
+ .filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
+ .map(s -> {
+ if ("dnet:result_subject".equals(s.getQualifier().getClassid())) {
+ s.getQualifier().setClassid(ModelConstants.DNET_SUBJECT_TYPOLOGIES);
+ s.getQualifier().setClassname(ModelConstants.DNET_SUBJECT_TYPOLOGIES);
+ }
+ return s;
+ })
+ .map(GraphCleaningFunctions::cleanValue)
+ .collect(
+ Collectors
+ .toMap(
+ s -> Optional
+ .ofNullable(s.getQualifier())
+ .map(q -> q.getClassid() + s.getValue())
+ .orElse(s.getValue()),
+ Function.identity(),
+ (s1, s2) -> Collections
+ .min(Lists.newArrayList(s1, s2), new SubjectProvenanceComparator())))
+ .values());
+ r.setSubject(subjects);
+ }
+ if (Objects.nonNull(r.getTitle())) {
+ r
+ .setTitle(
+ r
+ .getTitle()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
+ .filter(
+ sp -> {
+ final String title = sp
+ .getValue()
+ .toLowerCase();
+ final String decoded = Unidecode.decode(title);
+
+ if (StringUtils.contains(decoded, TITLE_TEST)) {
+ return decoded
+ .replaceAll(TITLE_FILTER_REGEX, "")
+ .length() > TITLE_FILTER_RESIDUAL_LENGTH;
+ }
+ return !decoded
+ .replaceAll("\\W|\\d", "")
+ .isEmpty();
+ })
+ .map(GraphCleaningFunctions::cleanValue)
+ .collect(Collectors.toList()));
+ }
+ if (Objects.nonNull(r.getFormat())) {
+ r
+ .setFormat(
+ r
+ .getFormat()
+ .stream()
+ .map(GraphCleaningFunctions::cleanValue)
+ .collect(Collectors.toList()));
+ }
+ if (Objects.nonNull(r.getDescription())) {
+ r
+ .setDescription(
+ r
+ .getDescription()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
+ .map(GraphCleaningFunctions::cleanValue)
+ .collect(Collectors.toList()));
+ }
+ if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
+ r
+ .setResourcetype(
+ qualifier(ModelConstants.UNKNOWN, "Unknown", ModelConstants.DNET_DATA_CITE_RESOURCE));
+ }
+ if (Objects.nonNull(r.getInstance())) {
+
+ for (Instance i : r.getInstance()) {
+ if (!vocs
+ .termExists(ModelConstants.DNET_PUBLICATION_RESOURCE, i.getInstancetype().getClassid())) {
+ if (r instanceof Publication) {
+ i
+ .setInstancetype(
+ OafMapperUtils
+ .qualifier(
+ "0038", "Other literature type",
+ ModelConstants.DNET_PUBLICATION_RESOURCE,
+ ModelConstants.DNET_PUBLICATION_RESOURCE));
+ } else if (r instanceof Dataset) {
+ i
+ .setInstancetype(
+ OafMapperUtils
+ .qualifier(
+ "0039", "Other dataset type", ModelConstants.DNET_PUBLICATION_RESOURCE,
+ ModelConstants.DNET_PUBLICATION_RESOURCE));
+ } else if (r instanceof Software) {
+ i
+ .setInstancetype(
+ OafMapperUtils
+ .qualifier(
+ "0040", "Other software type", ModelConstants.DNET_PUBLICATION_RESOURCE,
+ ModelConstants.DNET_PUBLICATION_RESOURCE));
+ } else if (r instanceof OtherResearchProduct) {
+ i
+ .setInstancetype(
+ OafMapperUtils
+ .qualifier(
+ "0020", "Other ORP type", ModelConstants.DNET_PUBLICATION_RESOURCE,
+ ModelConstants.DNET_PUBLICATION_RESOURCE));
+ }
+ }
+
+ if (Objects.nonNull(i.getPid())) {
+ i.setPid(processPidCleaning(i.getPid()));
+ }
+ if (Objects.nonNull(i.getAlternateIdentifier())) {
+ i.setAlternateIdentifier(processPidCleaning(i.getAlternateIdentifier()));
+ }
+ Optional
+ .ofNullable(i.getPid())
+ .ifPresent(pid -> {
+ final Set pids = Sets.newHashSet(pid);
+ Optional
+ .ofNullable(i.getAlternateIdentifier())
+ .ifPresent(altId -> {
+ final Set altIds = Sets.newHashSet(altId);
+ i.setAlternateIdentifier(Lists.newArrayList(Sets.difference(altIds, pids)));
+ });
+ });
+
+ if (Objects.isNull(i.getAccessright())
+ || StringUtils.isBlank(i.getAccessright().getClassid())) {
+ i
+ .setAccessright(
+ accessRight(
+ ModelConstants.UNKNOWN, ModelConstants.NOT_AVAILABLE,
+ ModelConstants.DNET_ACCESS_MODES));
+ }
+ if (Objects.isNull(i.getHostedby()) || StringUtils.isBlank(i.getHostedby().getKey())) {
+ i.setHostedby(ModelConstants.UNKNOWN_REPOSITORY);
+ }
+ if (Objects.isNull(i.getRefereed()) || StringUtils.isBlank(i.getRefereed().getClassid())) {
+ i.setRefereed(qualifier("0000", "Unknown", ModelConstants.DNET_REVIEW_LEVELS));
+ }
+ if (Objects.nonNull(i.getDateofacceptance())) {
+ Optional date = cleanDateField(i.getDateofacceptance());
+ if (date.isPresent()) {
+ i.getDateofacceptance().setValue(date.get());
+ } else {
+ i.setDateofacceptance(null);
+ }
+ }
+ if (StringUtils.isNotBlank(i.getFulltext()) &&
+ (ModelConstants.SOFTWARE_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()) ||
+ ModelConstants.DATASET_RESULTTYPE_CLASSID.equals(r.getResulttype().getClassid()))) {
+ i.setFulltext(null);
+ }
+ }
+ }
+ if (Objects.isNull(r.getBestaccessright())
+ || StringUtils.isBlank(r.getBestaccessright().getClassid())) {
+ Qualifier bestaccessrights = OafMapperUtils.createBestAccessRights(r.getInstance());
+ if (Objects.isNull(bestaccessrights)) {
+ r
+ .setBestaccessright(
+ qualifier(
+ ModelConstants.UNKNOWN, ModelConstants.NOT_AVAILABLE,
+ ModelConstants.DNET_ACCESS_MODES));
+ } else {
+ r.setBestaccessright(bestaccessrights);
+ }
+ }
+ if (Objects.nonNull(r.getAuthor())) {
+ r
+ .setAuthor(
+ r
+ .getAuthor()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(a -> StringUtils.isNotBlank(a.getFullname()))
+ .filter(a -> StringUtils.isNotBlank(a.getFullname().replaceAll("[\\W]", "")))
+ .map(GraphCleaningFunctions::cleanupAuthor)
+ .collect(Collectors.toList()));
+
+ boolean nullRank = r
+ .getAuthor()
+ .stream()
+ .anyMatch(a -> Objects.isNull(a.getRank()));
+ if (nullRank) {
+ int i = 1;
+ for (Author author : r.getAuthor()) {
+ author.setRank(i++);
+ }
+ }
+
+ for (Author a : r.getAuthor()) {
+ if (Objects.isNull(a.getPid())) {
+ a.setPid(Lists.newArrayList());
+ } else {
+ a
+ .setPid(
+ a
+ .getPid()
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(p -> Objects.nonNull(p.getQualifier()))
+ .filter(p -> StringUtils.isNotBlank(p.getValue()))
+ .map(p -> {
+ // hack to distinguish orcid from orcid_pending
+ String pidProvenance = getProvenance(p.getDataInfo());
+ if (p
+ .getQualifier()
+ .getClassid()
+ .toLowerCase()
+ .contains(ModelConstants.ORCID)) {
+ if (pidProvenance
+ .equals(ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY)) {
+ p.getQualifier().setClassid(ModelConstants.ORCID);
+ } else {
+ p.getQualifier().setClassid(ModelConstants.ORCID_PENDING);
+ }
+ final String orcid = p
+ .getValue()
+ .trim()
+ .toLowerCase()
+ .replaceAll(ORCID_CLEANING_REGEX, "$1-$2-$3-$4");
+ if (orcid.length() == ORCID_LEN) {
+ p.setValue(orcid);
+ } else {
+ p.setValue("");
+ }
+ }
+ return p;
+ })
+ .filter(p -> StringUtils.isNotBlank(p.getValue()))
+ .collect(
+ Collectors
+ .toMap(
+ p -> p.getQualifier().getClassid() + p.getValue(),
+ Function.identity(),
+ (p1, p2) -> p1,
+ LinkedHashMap::new))
+ .values()
+ .stream()
+ .collect(Collectors.toList()));
+ }
+ }
+ }
+ if (value instanceof Publication) {
+
+ } else if (value instanceof Dataset) {
+
+ } else if (value instanceof OtherResearchProduct) {
+
+ } else if (value instanceof Software) {
+
+ }
+
+ }
+
} else if (value instanceof Relation) {
Relation r = (Relation) value;
@@ -154,294 +620,40 @@ public class GraphCleaningFunctions extends CleaningFunctions {
r.setValidationDate(null);
r.setValidated(false);
}
- } else if (value instanceof Result) {
-
- Result r = (Result) value;
-
- if (Objects.nonNull(r.getDateofacceptance())) {
- Optional date = cleanDateField(r.getDateofacceptance());
- if (date.isPresent()) {
- r.getDateofacceptance().setValue(date.get());
- } else {
- r.setDateofacceptance(null);
- }
- }
- if (Objects.nonNull(r.getRelevantdate())) {
- r
- .setRelevantdate(
- r
- .getRelevantdate()
- .stream()
- .filter(Objects::nonNull)
- .filter(sp -> Objects.nonNull(sp.getQualifier()))
- .filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
- .map(sp -> {
- sp.setValue(GraphCleaningFunctions.cleanDate(sp.getValue()));
- return sp;
- })
- .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
- .collect(Collectors.toList()));
- }
- if (Objects.nonNull(r.getPublisher()) && StringUtils.isBlank(r.getPublisher().getValue())) {
- r.setPublisher(null);
- }
- if (Objects.isNull(r.getLanguage()) || StringUtils.isBlank(r.getLanguage().getClassid())) {
- r
- .setLanguage(
- qualifier("und", "Undetermined", ModelConstants.DNET_LANGUAGES));
- }
- if (Objects.nonNull(r.getSubject())) {
- r
- .setSubject(
- r
- .getSubject()
- .stream()
- .filter(Objects::nonNull)
- .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
- .filter(sp -> Objects.nonNull(sp.getQualifier()))
- .filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
- .map(GraphCleaningFunctions::cleanValue)
- .collect(Collectors.toList()));
- }
- if (Objects.nonNull(r.getTitle())) {
- r
- .setTitle(
- r
- .getTitle()
- .stream()
- .filter(Objects::nonNull)
- .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
- .filter(
- sp -> {
- final String title = sp
- .getValue()
- .toLowerCase();
- final String decoded = Unidecode.decode(title);
-
- if (StringUtils.contains(decoded, TITLE_TEST)) {
- return decoded
- .replaceAll(TITLE_FILTER_REGEX, "")
- .length() > TITLE_FILTER_RESIDUAL_LENGTH;
- }
- return !decoded
- .replaceAll("\\W|\\d", "")
- .isEmpty();
- })
- .map(GraphCleaningFunctions::cleanValue)
- .collect(Collectors.toList()));
- }
- if (Objects.nonNull(r.getFormat())) {
- r
- .setFormat(
- r
- .getFormat()
- .stream()
- .map(GraphCleaningFunctions::cleanValue)
- .collect(Collectors.toList()));
- }
- if (Objects.nonNull(r.getDescription())) {
- r
- .setDescription(
- r
- .getDescription()
- .stream()
- .filter(Objects::nonNull)
- .filter(sp -> StringUtils.isNotBlank(sp.getValue()))
- .map(GraphCleaningFunctions::cleanValue)
- .collect(Collectors.toList()));
- }
- if (Objects.nonNull(r.getPid())) {
- r.setPid(processPidCleaning(r.getPid()));
- }
- if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
- r
- .setResourcetype(
- qualifier(ModelConstants.UNKNOWN, "Unknown", ModelConstants.DNET_DATA_CITE_RESOURCE));
- }
- if (Objects.nonNull(r.getInstance())) {
-
- for (Instance i : r.getInstance()) {
- if (!vocs.termExists(ModelConstants.DNET_PUBLICATION_RESOURCE, i.getInstancetype().getClassid())) {
- if (r instanceof Publication) {
- i
- .setInstancetype(
- OafMapperUtils
- .qualifier(
- "0038", "Other literature type", ModelConstants.DNET_PUBLICATION_RESOURCE,
- ModelConstants.DNET_PUBLICATION_RESOURCE));
- } else if (r instanceof Dataset) {
- i
- .setInstancetype(
- OafMapperUtils
- .qualifier(
- "0039", "Other dataset type", ModelConstants.DNET_PUBLICATION_RESOURCE,
- ModelConstants.DNET_PUBLICATION_RESOURCE));
- } else if (r instanceof Software) {
- i
- .setInstancetype(
- OafMapperUtils
- .qualifier(
- "0040", "Other software type", ModelConstants.DNET_PUBLICATION_RESOURCE,
- ModelConstants.DNET_PUBLICATION_RESOURCE));
- } else if (r instanceof OtherResearchProduct) {
- i
- .setInstancetype(
- OafMapperUtils
- .qualifier(
- "0020", "Other ORP type", ModelConstants.DNET_PUBLICATION_RESOURCE,
- ModelConstants.DNET_PUBLICATION_RESOURCE));
- }
- }
-
- if (Objects.nonNull(i.getPid())) {
- i.setPid(processPidCleaning(i.getPid()));
- }
- if (Objects.nonNull(i.getAlternateIdentifier())) {
- i.setAlternateIdentifier(processPidCleaning(i.getAlternateIdentifier()));
- }
- Optional
- .ofNullable(i.getPid())
- .ifPresent(pid -> {
- final Set pids = Sets.newHashSet(pid);
- Optional
- .ofNullable(i.getAlternateIdentifier())
- .ifPresent(altId -> {
- final Set altIds = Sets.newHashSet(altId);
- i.setAlternateIdentifier(Lists.newArrayList(Sets.difference(altIds, pids)));
- });
- });
-
- if (Objects.isNull(i.getAccessright()) || StringUtils.isBlank(i.getAccessright().getClassid())) {
- i
- .setAccessright(
- accessRight(
- ModelConstants.UNKNOWN, ModelConstants.NOT_AVAILABLE,
- ModelConstants.DNET_ACCESS_MODES));
- }
- if (Objects.isNull(i.getHostedby()) || StringUtils.isBlank(i.getHostedby().getKey())) {
- i.setHostedby(ModelConstants.UNKNOWN_REPOSITORY);
- }
- if (Objects.isNull(i.getRefereed())) {
- i.setRefereed(qualifier("0000", "Unknown", ModelConstants.DNET_REVIEW_LEVELS));
- }
- if (Objects.nonNull(i.getDateofacceptance())) {
- Optional date = cleanDateField(i.getDateofacceptance());
- if (date.isPresent()) {
- i.getDateofacceptance().setValue(date.get());
- } else {
- i.setDateofacceptance(null);
- }
- }
- }
- }
- if (Objects.isNull(r.getBestaccessright()) || StringUtils.isBlank(r.getBestaccessright().getClassid())) {
- Qualifier bestaccessrights = OafMapperUtils.createBestAccessRights(r.getInstance());
- if (Objects.isNull(bestaccessrights)) {
- r
- .setBestaccessright(
- qualifier(
- ModelConstants.UNKNOWN, ModelConstants.NOT_AVAILABLE,
- ModelConstants.DNET_ACCESS_MODES));
- } else {
- r.setBestaccessright(bestaccessrights);
- }
- }
- if (Objects.nonNull(r.getAuthor())) {
- r
- .setAuthor(
- r
- .getAuthor()
- .stream()
- .filter(Objects::nonNull)
- .filter(a -> StringUtils.isNotBlank(a.getFullname()))
- .filter(a -> StringUtils.isNotBlank(a.getFullname().replaceAll("[\\W]", "")))
- .collect(Collectors.toList()));
-
- boolean nullRank = r
- .getAuthor()
- .stream()
- .anyMatch(a -> Objects.isNull(a.getRank()));
- if (nullRank) {
- int i = 1;
- for (Author author : r.getAuthor()) {
- author.setRank(i++);
- }
- }
-
- for (Author a : r.getAuthor()) {
- if (Objects.isNull(a.getPid())) {
- a.setPid(Lists.newArrayList());
- } else {
- a
- .setPid(
- a
- .getPid()
- .stream()
- .filter(Objects::nonNull)
- .filter(p -> Objects.nonNull(p.getQualifier()))
- .filter(p -> StringUtils.isNotBlank(p.getValue()))
- .map(p -> {
- // hack to distinguish orcid from orcid_pending
- String pidProvenance = Optional
- .ofNullable(p.getDataInfo())
- .map(
- d -> Optional
- .ofNullable(d.getProvenanceaction())
- .map(Qualifier::getClassid)
- .orElse(""))
- .orElse("");
- if (p
- .getQualifier()
- .getClassid()
- .toLowerCase()
- .contains(ModelConstants.ORCID)) {
- if (pidProvenance
- .equals(ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY)) {
- p.getQualifier().setClassid(ModelConstants.ORCID);
- } else {
- p.getQualifier().setClassid(ModelConstants.ORCID_PENDING);
- }
- final String orcid = p
- .getValue()
- .trim()
- .toLowerCase()
- .replaceAll(ORCID_CLEANING_REGEX, "$1-$2-$3-$4");
- if (orcid.length() == ORCID_LEN) {
- p.setValue(orcid);
- } else {
- p.setValue("");
- }
- }
- return p;
- })
- .filter(p -> StringUtils.isNotBlank(p.getValue()))
- .collect(
- Collectors
- .toMap(
- p -> p.getQualifier().getClassid() + p.getValue(),
- Function.identity(),
- (p1, p2) -> p1,
- LinkedHashMap::new))
- .values()
- .stream()
- .collect(Collectors.toList()));
- }
- }
- }
- if (value instanceof Publication) {
-
- } else if (value instanceof Dataset) {
-
- } else if (value instanceof OtherResearchProduct) {
-
- } else if (value instanceof Software) {
-
- }
}
return value;
}
+ private static Author cleanupAuthor(Author author) {
+ if (StringUtils.isNotBlank(author.getFullname())) {
+ author
+ .setFullname(
+ author
+ .getFullname()
+ .replaceAll(NAME_CLEANING_REGEX, " ")
+ .replace("\"", "\\\""));
+ }
+ if (StringUtils.isNotBlank(author.getName())) {
+ author
+ .setName(
+ author
+ .getName()
+ .replaceAll(NAME_CLEANING_REGEX, " ")
+ .replace("\"", "\\\""));
+ }
+ if (StringUtils.isNotBlank(author.getSurname())) {
+ author
+ .setSurname(
+ author
+ .getSurname()
+ .replaceAll(NAME_CLEANING_REGEX, " ")
+ .replace("\"", "\\\""));
+ }
+
+ return author;
+ }
+
private static Optional cleanDateField(Field dateofacceptance) {
return Optional
.ofNullable(dateofacceptance)
@@ -491,7 +703,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
.filter(sp -> !PID_BLACKLIST.contains(sp.getValue().trim().toLowerCase()))
.filter(sp -> Objects.nonNull(sp.getQualifier()))
.filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
- .map(CleaningFunctions::normalizePidValue)
+ .map(PidCleaner::normalizePidValue)
.filter(CleaningFunctions::pidFilter)
.collect(Collectors.toList());
}
@@ -520,6 +732,11 @@ public class GraphCleaningFunctions extends CleaningFunctions {
return s;
}
+ protected static Subject cleanValue(Subject s) {
+ s.setValue(s.getValue().replaceAll(CLEANING_REGEX, " "));
+ return s;
+ }
+
protected static Field cleanValue(Field s) {
s.setValue(s.getValue().replaceAll(CLEANING_REGEX, " "));
return s;
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRule.java
new file mode 100644
index 0000000000..37ab91dd52
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRule.java
@@ -0,0 +1,24 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class GridCleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("(?\\d{4,6}\\.[0-9a-z]{1,2})");
+
+ public static String clean(String grid) {
+ String s = grid
+ .replaceAll("\\s", "")
+ .toLowerCase();
+
+ Matcher m = PATTERN.matcher(s);
+ if (m.find()) {
+ return "grid." + m.group("grid");
+ }
+
+ return "";
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRule.java
new file mode 100644
index 0000000000..bcd8279cc3
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRule.java
@@ -0,0 +1,21 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+// https://www.wikidata.org/wiki/Property:P213
+public class ISNICleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("([0]{4}) ?([0-9]{4}) ?([0-9]{4}) ?([0-9]{3}[0-9X])");
+
+ public static String clean(final String isni) {
+
+ Matcher m = PATTERN.matcher(isni);
+ if (m.find()) {
+ return String.join("", m.group(1), m.group(2), m.group(3), m.group(4));
+ } else {
+ return "";
+ }
+ }
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/OafMapperUtils.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/OafMapperUtils.java
index 6f452e846f..c58096d353 100644
--- a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/OafMapperUtils.java
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/OafMapperUtils.java
@@ -14,6 +14,7 @@ import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
+import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.common.ModelSupport;
import eu.dnetlib.dhp.schema.oaf.*;
@@ -141,7 +142,7 @@ public class OafMapperUtils {
}
public static Qualifier unknown(final String schemeid, final String schemename) {
- return qualifier("UNKNOWN", "Unknown", schemeid, schemename);
+ return qualifier(UNKNOWN, "Unknown", schemeid, schemename);
}
public static AccessRight accessRight(
@@ -189,6 +190,17 @@ public class OafMapperUtils {
return q;
}
+ public static Subject subject(
+ final String value,
+ final String classid,
+ final String classname,
+ final String schemeid,
+ final String schemename,
+ final DataInfo dataInfo) {
+
+ return subject(value, qualifier(classid, classname, schemeid, schemename), dataInfo);
+ }
+
public static StructuredProperty structuredProperty(
final String value,
final String classid,
@@ -200,6 +212,20 @@ public class OafMapperUtils {
return structuredProperty(value, qualifier(classid, classname, schemeid, schemename), dataInfo);
}
+ public static Subject subject(
+ final String value,
+ final Qualifier qualifier,
+ final DataInfo dataInfo) {
+ if (value == null) {
+ return null;
+ }
+ final Subject s = new Subject();
+ s.setValue(value);
+ s.setQualifier(qualifier);
+ s.setDataInfo(dataInfo);
+ return s;
+ }
+
public static StructuredProperty structuredProperty(
final String value,
final Qualifier qualifier,
@@ -477,4 +503,15 @@ public class OafMapperUtils {
rel.setProperties(properties);
return rel;
}
+
+ public static String getProvenance(DataInfo dataInfo) {
+ return Optional
+ .ofNullable(dataInfo)
+ .map(
+ d -> Optional
+ .ofNullable(d.getProvenanceaction())
+ .map(Qualifier::getClassid)
+ .orElse(""))
+ .orElse("");
+ }
}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRule.java
new file mode 100644
index 0000000000..a2213ed9f5
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRule.java
@@ -0,0 +1,21 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class PICCleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("\\d{9}");
+
+ public static String clean(final String pic) {
+
+ Matcher m = PATTERN.matcher(pic);
+ if (m.find()) {
+ return m.group();
+ } else {
+ return "";
+ }
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PidCleaner.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PidCleaner.java
new file mode 100644
index 0000000000..114c2b3af0
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PidCleaner.java
@@ -0,0 +1,62 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.Optional;
+
+import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
+
+public class PidCleaner {
+
+ /**
+ * Utility method that normalises PID values on a per-type basis.
+ * @param pid the PID whose value will be normalised.
+ * @return the PID containing the normalised value.
+ */
+ public static StructuredProperty normalizePidValue(StructuredProperty pid) {
+ pid
+ .setValue(
+ normalizePidValue(
+ pid.getQualifier().getClassid(),
+ pid.getValue()));
+
+ return pid;
+ }
+
+ public static String normalizePidValue(String pidType, String pidValue) {
+ String value = Optional
+ .ofNullable(pidValue)
+ .map(String::trim)
+ .orElseThrow(() -> new IllegalArgumentException("PID value cannot be empty"));
+
+ switch (pidType) {
+
+ // TODO add cleaning for more PID types as needed
+
+ // Result
+ case "doi":
+ return DoiCleaningRule.clean(value);
+ case "pmid":
+ return PmidCleaningRule.clean(value);
+ case "pmc":
+ return PmcCleaningRule.clean(value);
+ case "handle":
+ case "arXiv":
+ return value;
+
+ // Organization
+ case "GRID":
+ return GridCleaningRule.clean(value);
+ case "ISNI":
+ return ISNICleaningRule.clean(value);
+ case "ROR":
+ return RorCleaningRule.clean(value);
+ case "PIC":
+ return PICCleaningRule.clean(value);
+ case "FundRef":
+ return FundRefCleaningRule.clean(value);
+ default:
+ return value;
+ }
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRule.java
new file mode 100644
index 0000000000..903041d436
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRule.java
@@ -0,0 +1,24 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class PmcCleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("PMC\\d{1,8}");
+
+ public static String clean(String pmc) {
+ String s = pmc
+ .replaceAll("\\s", "")
+ .toUpperCase();
+
+ final Matcher m = PATTERN.matcher(s);
+
+ if (m.find()) {
+ return m.group();
+ }
+ return "";
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRule.java
new file mode 100644
index 0000000000..d0f5a3b27e
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRule.java
@@ -0,0 +1,25 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+// https://researchguides.stevens.edu/c.php?g=442331&p=6577176
+public class PmidCleaningRule {
+
+ public static final Pattern PATTERN = Pattern.compile("[1-9]{1,8}");
+
+ public static String clean(String pmid) {
+ String s = pmid
+ .toLowerCase()
+ .replaceAll("\\s", "");
+
+ final Matcher m = PATTERN.matcher(s);
+
+ if (m.find()) {
+ return m.group();
+ }
+ return "";
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRule.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRule.java
new file mode 100644
index 0000000000..f6685f19da
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRule.java
@@ -0,0 +1,27 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+// https://ror.readme.io/docs/ror-identifier-pattern
+public class RorCleaningRule {
+
+ public static final String ROR_PREFIX = "https://ror.org/";
+
+ private static final Pattern PATTERN = Pattern.compile("(?0[a-hj-km-np-tv-z|0-9]{6}[0-9]{2})");
+
+ public static String clean(String ror) {
+ String s = ror
+ .replaceAll("\\s", "")
+ .toLowerCase();
+
+ Matcher m = PATTERN.matcher(s);
+
+ if (m.find()) {
+ return ROR_PREFIX + m.group("ror");
+ }
+ return "";
+ }
+
+}
diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/SubjectProvenanceComparator.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/SubjectProvenanceComparator.java
new file mode 100644
index 0000000000..f4e3c8841c
--- /dev/null
+++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/SubjectProvenanceComparator.java
@@ -0,0 +1,46 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
+import static org.apache.commons.lang3.StringUtils.isBlank;
+
+import java.util.Comparator;
+
+import eu.dnetlib.dhp.schema.oaf.Subject;
+
+public class SubjectProvenanceComparator implements Comparator {
+
+ @Override
+ public int compare(Subject left, Subject right) {
+
+ String lProv = getProvenance(left.getDataInfo());
+ String rProv = getProvenance(right.getDataInfo());
+
+ if (isBlank(lProv) && isBlank(rProv))
+ return 0;
+ if (isBlank(lProv))
+ return 1;
+ if (isBlank(rProv))
+ return -1;
+ if (lProv.equals(rProv))
+ return 0;
+ if (lProv.toLowerCase().contains("crosswalk"))
+ return -1;
+ if (rProv.toLowerCase().contains("crosswalk"))
+ return 1;
+ if (lProv.toLowerCase().contains("user"))
+ return -1;
+ if (rProv.toLowerCase().contains("user"))
+ return 1;
+ if (lProv.toLowerCase().contains("propagation"))
+ return -1;
+ if (rProv.toLowerCase().contains("propagation"))
+ return 1;
+ if (lProv.toLowerCase().contains("iis"))
+ return -1;
+ if (rProv.toLowerCase().contains("iis"))
+ return 1;
+
+ return 0;
+ }
+}
diff --git a/dhp-common/src/main/resources/eu/dnetlib/dhp/common/input_maketar_parameters.json b/dhp-common/src/main/resources/eu/dnetlib/dhp/common/input_maketar_parameters.json
index c57f67ebdc..a153188651 100644
--- a/dhp-common/src/main/resources/eu/dnetlib/dhp/common/input_maketar_parameters.json
+++ b/dhp-common/src/main/resources/eu/dnetlib/dhp/common/input_maketar_parameters.json
@@ -23,12 +23,6 @@
"paramLongName":"splitSize",
"paramDescription": "the maximum size of the archive",
"paramRequired": false
- },
- {
- "paramName":"rn",
- "paramLongName":"rename",
- "paramDescription": "if the file has to be renamed",
- "paramRequired": false
}
]
diff --git a/dhp-common/src/main/resources/eu/dnetlib/dhp/oa/merge/dispatch_entities_parameters.json b/dhp-common/src/main/resources/eu/dnetlib/dhp/oa/merge/dispatch_entities_parameters.json
index aa8d2a7c22..60f11ac844 100644
--- a/dhp-common/src/main/resources/eu/dnetlib/dhp/oa/merge/dispatch_entities_parameters.json
+++ b/dhp-common/src/main/resources/eu/dnetlib/dhp/oa/merge/dispatch_entities_parameters.json
@@ -18,9 +18,9 @@
"paramRequired": true
},
{
- "paramName": "c",
- "paramLongName": "graphTableClassName",
- "paramDescription": "the graph entity class name",
+ "paramName": "fi",
+ "paramLongName": "filterInvisible",
+ "paramDescription": "if true filters out invisible entities",
"paramRequired": true
}
]
\ No newline at end of file
diff --git a/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogModel.scala b/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogModel.scala
new file mode 100644
index 0000000000..d74ec3f696
--- /dev/null
+++ b/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogModel.scala
@@ -0,0 +1,10 @@
+package eu.dnetlib.dhp.application.dedup.log
+
+case class DedupLogModel(
+ tag: String,
+ configuration: String,
+ entity: String,
+ startTS: Long,
+ endTS: Long,
+ totalMs: Long
+) {}
diff --git a/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogWriter.scala b/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogWriter.scala
new file mode 100644
index 0000000000..4409c01d92
--- /dev/null
+++ b/dhp-common/src/main/scala/eu/dnetlib/dhp/application/dedup/log/DedupLogWriter.scala
@@ -0,0 +1,14 @@
+package eu.dnetlib.dhp.application.dedup.log
+
+import org.apache.spark.sql.{SaveMode, SparkSession}
+
+class DedupLogWriter(path: String) {
+
+ def appendLog(dedupLogModel: DedupLogModel, spark: SparkSession): Unit = {
+ import spark.implicits._
+ val df = spark.createDataset[DedupLogModel](data = List(dedupLogModel))
+ df.write.mode(SaveMode.Append).save(path)
+
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/common/MdStoreClientTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/common/MdStoreClientTest.java
new file mode 100644
index 0000000000..f87f6e3130
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/common/MdStoreClientTest.java
@@ -0,0 +1,36 @@
+
+package eu.dnetlib.dhp.common;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+
+import org.junit.jupiter.api.Test;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class MdStoreClientTest {
+
+ // @Test
+ public void testMongoCollection() throws IOException {
+ final MdstoreClient client = new MdstoreClient("mongodb://localhost:27017", "mdstore");
+
+ final ObjectMapper mapper = new ObjectMapper();
+
+ final List infos = client.mdStoreWithTimestamp("ODF", "store", "cleaned");
+
+ infos.forEach(System.out::println);
+
+ final String s = mapper.writeValueAsString(infos);
+
+ Path fileName = Paths.get("/Users/sandro/mdstore_info.json");
+
+ // Writing into the file
+ Files.write(fileName, s.getBytes(StandardCharsets.UTF_8));
+
+ }
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/oa/merge/AuthorMergerTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/oa/merge/AuthorMergerTest.java
deleted file mode 100644
index 3a7a41a1b0..0000000000
--- a/dhp-common/src/test/java/eu/dnetlib/dhp/oa/merge/AuthorMergerTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-
-package eu.dnetlib.dhp.oa.merge;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-
-import eu.dnetlib.dhp.schema.oaf.Author;
-import eu.dnetlib.dhp.schema.oaf.Publication;
-import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
-import eu.dnetlib.pace.util.MapDocumentUtil;
-import scala.Tuple2;
-
-class AuthorMergerTest {
-
- private String publicationsBasePath;
-
- private List> authors;
-
- @BeforeEach
- public void setUp() throws Exception {
-
- publicationsBasePath = Paths
- .get(AuthorMergerTest.class.getResource("/eu/dnetlib/dhp/oa/merge").toURI())
- .toFile()
- .getAbsolutePath();
-
- authors = readSample(publicationsBasePath + "/publications_with_authors.json", Publication.class)
- .stream()
- .map(p -> p._2().getAuthor())
- .collect(Collectors.toList());
-
- }
-
- @Test
- void mergeTest() { // used in the dedup: threshold set to 0.95
-
- for (List authors1 : authors) {
- System.out.println("List " + (authors.indexOf(authors1) + 1));
- for (Author author : authors1) {
- System.out.println(authorToString(author));
- }
- }
-
- List merge = AuthorMerger.merge(authors);
-
- System.out.println("Merge ");
- for (Author author : merge) {
- System.out.println(authorToString(author));
- }
-
- Assertions.assertEquals(7, merge.size());
-
- }
-
- public List> readSample(String path, Class clazz) {
- List> res = new ArrayList<>();
- BufferedReader reader;
- try {
- reader = new BufferedReader(new FileReader(path));
- String line = reader.readLine();
- while (line != null) {
- res
- .add(
- new Tuple2<>(
- MapDocumentUtil.getJPathString("$.id", line),
- new ObjectMapper().readValue(line, clazz)));
- // read next line
- line = reader.readLine();
- }
- reader.close();
- } catch (IOException e) {
- e.printStackTrace();
- }
-
- return res;
- }
-
- public String authorToString(Author a) {
-
- String print = "Fullname = ";
- print += a.getFullname() + " pid = [";
- if (a.getPid() != null)
- for (StructuredProperty sp : a.getPid()) {
- print += sp.toComparableString() + " ";
- }
- print += "]";
- return print;
- }
-}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRuleTest.java
new file mode 100644
index 0000000000..1b9163d464
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/GridCleaningRuleTest.java
@@ -0,0 +1,18 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class GridCleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("grid.493784.5", GridCleaningRule.clean("grid.493784.5"));
+ assertEquals("grid.493784.5x", GridCleaningRule.clean("grid.493784.5x"));
+ assertEquals("grid.493784.5x", GridCleaningRule.clean("493784.5x"));
+ assertEquals("", GridCleaningRule.clean("493x784.5x"));
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRuleTest.java
new file mode 100644
index 0000000000..e51d1e05c9
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/ISNICleaningRuleTest.java
@@ -0,0 +1,19 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class ISNICleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("0000000463436020", ISNICleaningRule.clean("0000 0004 6343 6020"));
+ assertEquals("0000000463436020", ISNICleaningRule.clean("0000000463436020"));
+ assertEquals("", ISNICleaningRule.clean("Q30256598"));
+ assertEquals("0000000493403529", ISNICleaningRule.clean("ISNI:0000000493403529"));
+ assertEquals("000000008614884X", ISNICleaningRule.clean("0000 0000 8614 884X"));
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRuleTest.java
new file mode 100644
index 0000000000..3736033c33
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PICCleaningRuleTest.java
@@ -0,0 +1,19 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class PICCleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("887624982", PICCleaningRule.clean("887624982"));
+ assertEquals("", PICCleaningRule.clean("887 624982"));
+ assertEquals("887624982", PICCleaningRule.clean(" 887624982 "));
+ assertEquals("887624982", PICCleaningRule.clean(" 887624982x "));
+ assertEquals("887624982", PICCleaningRule.clean(" 88762498200 "));
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRuleTest.java
new file mode 100644
index 0000000000..e53ebae897
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmcCleaningRuleTest.java
@@ -0,0 +1,19 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class PmcCleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("PMC1234", PmcCleaningRule.clean("PMC1234"));
+ assertEquals("PMC1234", PmcCleaningRule.clean(" PMC1234"));
+ assertEquals("PMC12345678", PmcCleaningRule.clean("PMC12345678"));
+ assertEquals("PMC12345678", PmcCleaningRule.clean("PMC123456789"));
+ assertEquals("PMC12345678", PmcCleaningRule.clean("PMC 12345678"));
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRuleTest.java
new file mode 100644
index 0000000000..9562adf7e0
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/PmidCleaningRuleTest.java
@@ -0,0 +1,18 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class PmidCleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("1234", PmidCleaningRule.clean("01234"));
+ assertEquals("1234567", PmidCleaningRule.clean("0123 4567"));
+ assertEquals("123", PmidCleaningRule.clean("0123x4567"));
+ assertEquals("", PmidCleaningRule.clean("abc"));
+ }
+
+}
diff --git a/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRuleTest.java b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRuleTest.java
new file mode 100644
index 0000000000..5d5c03959d
--- /dev/null
+++ b/dhp-common/src/test/java/eu/dnetlib/dhp/schema/oaf/utils/RorCleaningRuleTest.java
@@ -0,0 +1,17 @@
+
+package eu.dnetlib.dhp.schema.oaf.utils;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
+
+class RorCleaningRuleTest {
+
+ @Test
+ void testCleaning() {
+ assertEquals("https://ror.org/05rpz9w55", RorCleaningRule.clean("https://ror.org/05rpz9w55"));
+ assertEquals("https://ror.org/05rpz9w55", RorCleaningRule.clean("05rpz9w55"));
+ assertEquals("", RorCleaningRule.clean("05rpz9w_55"));
+ }
+
+}
diff --git a/dhp-common/src/test/resources/eu/dnetlib/dhp/oa/merge/publications_with_authors.json b/dhp-common/src/test/resources/eu/dnetlib/dhp/oa/merge/publications_with_authors.json
deleted file mode 100644
index 600181ba56..0000000000
--- a/dhp-common/src/test/resources/eu/dnetlib/dhp/oa/merge/publications_with_authors.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{ "journal":{ "dataInfo":null, "conferenceplace":null, "issnPrinted":"0009-9260", "issnOnline":null, "issnLinking":null, "ep":"636", "iss":null, "sp":"632", "vol":"55", "edition":null, "conferencedate":null, "name":"Clinical Radiology" }, "measures":null, "author":[ { "rank":null, "fullname":"KARL TURETSCHEK", "affiliation":null, "pid":null, "surname":"TURETSCHEK", "name":"KARL" }, { "rank":null, "fullname":"WOLFGANG EBNER", "affiliation":null, "pid":null, "surname":"EBNER", "name":"WOLFGANG" }, { "rank":null, "fullname":"DOMINIK FLEISCHMANN", "affiliation":null, "pid":null, "surname":"FLEISCHMANN", "name":"DOMINIK" }, { "rank":null, "fullname":"PATRICK WUNDERBALDINGER", "affiliation":null, "pid":null, "surname":"WUNDERBALDINGER", "name":"PATRICK" }, { "rank":null, "fullname":"LUDWIG ERLACHER", "affiliation":null, "pid":null, "surname":"ERLACHER", "name":"LUDWIG" }, { "rank":null, "fullname":"THOMAS ZONTSICH", "affiliation":null, "pid":null, "surname":"ZONTSICH", "name":"THOMAS" }, { "rank":null, "fullname":"ALEXANDER A. BANKIER", "affiliation":null, "pid":null, "surname":"BANKIER", "name":"ALEXANDER A." } ], "resulttype":{ "classid":"publication", "schemeid":"dnet:result_typologies", "schemename":"dnet:result_typologies", "classname":"publication"}, "title":[ { "qualifier":{ "classid":"main title", "schemeid":"dnet:dataCite_title", "schemename":"dnet:dataCite_title", "classname":"main title" }, "dataInfo":null, "value":"Early Pulmonary Involvement in Ankylosing Spondylitis: Assessment With Thin-section CT" } ], "relevantdate":[ { "qualifier":{ "classid":"created", "schemeid":"dnet:dataCite_date", "schemename":"dnet:dataCite_date", "classname":"created" }, "dataInfo":null, "value":"2002-09-19T13:54:50Z" } ], "dateofacceptance":{ "dataInfo":null, "value":"2002-09-19T13:54:50Z" }, "publisher":{ "dataInfo":null, "value":"Elsevier BV" }, "embargoenddate":null, "fulltext":null, "contributor":null, "resourcetype":{ "classid":"0001", "schemeid":"dnet:dataCite_resource", "schemename":"dnet:dataCite_resource", "classname":"0001"}, "coverage":null, "bestaccessright":null, "externalReference":null, "format":null, "description":[ ], "source":[ { "dataInfo":null, "value":"Crossref" } ], "subject":[ { "qualifier":{ "classid":"keywords", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"keywords" }, "dataInfo":null, "value":"Radiology Nuclear Medicine and imaging" }, { "qualifier":{ "classid":"keywords", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"keywords" }, "dataInfo":null, "value":"General Medicine" } ], "language":null, "instance":[ { "processingchargecurrency":null, "refereed":null, "instancetype":{ "classid":"0001", "schemeid":"dnet:publication_resource", "schemename":"dnet:publication_resource", "classname":"Article" }, "hostedby":null, "distributionlocation":null, "processingchargeamount":null, "license":{ "dataInfo":null, "value":"https://www.elsevier.com/tdm/userlicense/1.0/" }, "accessright":{ "classid":"RESTRICTED", "schemeid":"dnet:access_modes", "schemename":"dnet:access_modes", "classname":"Restricted" }, "dateofacceptance":{ "dataInfo":null, "value":"2002-09-19T13:54:50Z" }, "collectedfrom":{ "dataInfo":null, "value":"Crossref", "key":"10|openaire____::081b82f96300b6a6e3d282bad31cb6e2" }, "url":[ "https://api.elsevier.com/content/article/PII:S0009926000904987?httpAccept=text/xml", "https://api.elsevier.com/content/article/PII:S0009926000904987?httpAccept=text/plain", "http://dx.doi.org/10.1053/crad.2000.0498" ] } ], "context":null, "country":null, "originalId":[ "S0009926000904987", "10.1053/crad.2000.0498" ], "pid":[ { "qualifier":{ "classid":"doi", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"doi" }, "dataInfo":null, "value":"10.1053/crad.2000.0498" } ], "dateofcollection":"2020-02-06T20:40:22Z", "dateoftransformation":null, "oaiprovenance":null, "extraInfo":null, "id":"50|doiboost____::994b7e47b9e225ab6d5e14841cb45a7f", "collectedfrom":[ { "dataInfo":null, "value":"Crossref", "key":"10|openaire____::081b82f96300b6a6e3d282bad31cb6e2" } ], "dataInfo":{ "trust":"0.9", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "lastupdatetimestamp":1581021622595 }
-{ "journal":null, "measures":null, "author":[ { "rank":null, "fullname":"Dominik Fleischmann", "affiliation":null, "pid":[ { "qualifier":{ "classid":"ORCID", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"ORCID" }, "dataInfo":{ "trust":"0.91", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:crosswalk:entityregistry", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"Harvested"} }, "value":"0000-0003-0715-0952" } ], "surname":"Fleischmann", "name":"Dominik" } ], "resulttype":{ "classid":"publication", "schemeid":"dnet:result_typologies", "schemename":"dnet:result_typologies", "classname":"publication"}, "title":[ ], "relevantdate":[ ], "dateofacceptance":null, "publisher":null, "embargoenddate":null, "fulltext":[ ], "contributor":[ ], "resourcetype":null, "coverage":[ ], "bestaccessright":null, "externalReference":[ ], "format":[ ], "description":null, "source":[ ], "subject":[ ], "language":null, "instance":[ ], "context":[ ], "country":[ ], "originalId":[ ], "pid":[ { "qualifier":{ "classid":"doi", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"doi"}, "dataInfo":null, "value":"10.1053/crad.2000.0498" } ], "dateofcollection":null, "dateoftransformation":null, "oaiprovenance":null, "extraInfo":[ ], "id":"50|doiboost____::994b7e47b9e225ab6d5e14841cb45a7f", "collectedfrom":[ { "dataInfo":null, "value":"ORCID", "key":"10|openaire____::806360c771262b4d6770e7cdf04b5c5a" } ], "dataInfo":{ "trust":"0.9", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "lastupdatetimestamp":null }
-{ "journal":{ "dataInfo":null, "conferenceplace":null, "issnPrinted":"0009-9260", "issnOnline":null, "issnLinking":null, "ep":"636", "iss":"8", "sp":"632", "vol":"55", "edition":null, "conferencedate":null, "name":"Clinical Radiology" }, "measures":null, "author":[ { "rank":null, "fullname":"T. Zontsich", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL"}, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/1966908432" } ], "surname":null, "name":null }, { "rank":null, "fullname":"L Erlacher", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL"}, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/687931320" } ], "surname":null, "name":null }, { "rank":null, "fullname":"Dominik Fleischmann", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL"}, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/2156559961" } ], "surname":null, "name":null }, { "rank":null, "fullname":"Alexander A. Bankier", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL"}, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/1107971609" } ], "surname":null, "name":null }, { "rank":null, "fullname":"Patrick Wunderbaldinger", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL" }, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/2422340537" } ], "surname":null, "name":null }, { "rank":null, "fullname":"Wolfgang Ebner", "affiliation":null, "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL" }, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/2186462571" } ], "surname":null, "name":null }, { "rank":null, "fullname":"K. Turetschek", "affiliation":[ { "dataInfo":null, "value":"University of Vienna" } ], "pid":[ { "qualifier":{ "classid":"URL", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"URL" }, "dataInfo":null, "value":"https://academic.microsoft.com/#/detail/321765676" } ], "surname":null, "name":null } ], "resulttype":{ "classid":"publication", "schemeid":"dnet:result_typologies", "schemename":"dnet:result_typologies", "classname":"publication" }, "title":[ { "qualifier":{ "classid":"main title", "schemeid":"dnet:dataCite_title", "schemename":"dnet:dataCite_title", "classname":"main title" }, "dataInfo":null, "value":"early pulmonary involvement in ankylosing spondylitis assessment with thin section ct" }, { "qualifier":{ "classid":"alternative title", "schemeid":"dnet:dataCite_title", "schemename":"dnet:dataCite_title", "classname":"alternative title" }, "dataInfo":null, "value":"Early pulmonary involvement in ankylosing spondylitis: assessment with thin-section CT." } ], "relevantdate":null, "dateofacceptance":{ "dataInfo":null, "value":"2000-08-01" }, "publisher":{ "dataInfo":null, "value":"Elsevier" }, "embargoenddate":null, "fulltext":null, "contributor":null, "resourcetype":null, "coverage":null, "bestaccessright":null, "externalReference":null, "format":null, "description":[ { "dataInfo":null, "value":"Abstract AIM: To determine the frequency and the distribution of early pulmonary lesions in patients with ankylosing spondylitis (AS) and a normal chest X-ray on thin-section CT and to correlate the CT findings with the results of pulmonary function tests and clinical data. MATERIALS AND METHODS: Twenty-five patients with clinically proven AS and no history of smoking underwent clinical examinations, pulmonary function tests (PFT), chest radiography, and thin-section CT. Four of 25 patients (16%), who had obvious signs on plain films suggestive of pre-existing disorders unrelated to AS were excluded. RESULTS: Fifteen of 21 patients (71%) had abnormalities on thin-section CT. The most frequent abnormalities were thickening of the interlobular septa in seven of 21 patients (33%), mild bronchial wall thickening in (6/21, 29%), pleural thickening and pleuropulmonary irregularities (both 29%) and linear septal thickening (6/21, 29%). In six patients there were no signs of pleuropulmonary involvement. Eight of 15 patients (53%) with abnormal and four of six patients (67%) with normal CT findings revealed mild restrictive lung function impairment. CONCLUSION: Patients with AS but a normal chest radiograph frequently have abnormalities on thin-section CT. As these abnormalities are usually subtle and their extent does not correlate with functional and clinical data, the overall routine impact of thin-section CT in the diagnosis of AS is limited. Turetschek, K , (2000) Clinical Radiology53, 632–636." } ], "source":[ { "dataInfo":null, "value":null } ], "subject":[ { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Complication" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Chest radiograph" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.580897", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine.diagnostic_test" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.580897", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"In patient" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Radiography" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.4582326", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"business.industry" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.4582326", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"business" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Thin section ct" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Respiratory disease" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.49358836", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine.disease" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.49358836", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Ankylosing spondylitis" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.49937168", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine.disease" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.49937168", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Radiology" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.4573571", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine.medical_specialty" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.4573571", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"medicine" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Medicine" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.40295774", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"business.industry" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":{ "trust":"0.40295774", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset" } }, "value":"business" }, { "qualifier":{ "classid":"MAG", "schemeid":"dnet:subject_classification_typologies", "schemename":"dnet:subject_classification_typologies", "classname":"Microsoft Academic Graph classification" }, "dataInfo":null, "value":"Pulmonary function testing" } ], "language":null, "instance":[ { "processingchargecurrency":null, "refereed":null, "instancetype":null, "hostedby":null, "distributionlocation":null, "processingchargeamount":null, "license":null, "accessright":null, "dateofacceptance":null, "collectedfrom":{ "dataInfo":null, "value":"Microsoft Academic Graph", "key":"10|openaire____::5f532a3fc4f1ea403f37070f59a7a53a" }, "url":[ "https://www.ncbi.nlm.nih.gov/pubmed/10964736", "https://www.sciencedirect.com/science/article/pii/S0009926000904987", "https://academic.microsoft.com/#/detail/1990704599" ] } ], "context":null, "country":null, "originalId":[ "1990704599", "10.1053/crad.2000.0498" ], "pid":[ { "qualifier":{ "classid":"doi", "schemeid":"dnet:pid_types", "schemename":"dnet:pid_types", "classname":"doi" }, "dataInfo":null, "value":"10.1053/crad.2000.0498" } ], "dateofcollection":null, "dateoftransformation":null, "oaiprovenance":null, "extraInfo":null, "id":"50|doiboost____::994b7e47b9e225ab6d5e14841cb45a7f", "collectedfrom":[ { "dataInfo":null, "value":"Microsoft Academic Graph", "key":"10|openaire____::5f532a3fc4f1ea403f37070f59a7a53a" } ], "dataInfo":{ "trust":"0.9", "invisible":false, "inferred":false, "deletedbyinference":false, "inferenceprovenance":null, "provenanceaction":{ "classid":"sysimport:actionset", "schemeid":"dnet:provenanceActions", "schemename":"dnet:provenanceActions", "classname":"sysimport:actionset"} }, "lastupdatetimestamp":null }
\ No newline at end of file
diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml
new file mode 100644
index 0000000000..fd7f44fc94
--- /dev/null
+++ b/dhp-pace-core/pom.xml
@@ -0,0 +1,110 @@
+
+
+
+ 4.0.0
+
+
+ eu.dnetlib.dhp
+ dhp
+ 1.2.5-SNAPSHOT
+ ../pom.xml
+
+
+ eu.dnetlib.dhp
+ dhp-pace-core
+ 1.2.5-SNAPSHOT
+ jar
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ ${net.alchim31.maven.version}
+
+
+ scala-compile-first
+ initialize
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+
+ true
+ ${scala.binary.version}
+ ${scala.version}
+
+
+
+
+
+
+
+
+ edu.cmu
+ secondstring
+
+
+ com.google.guava
+ guava
+
+
+ com.google.code.gson
+ gson
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ commons-io
+ commons-io
+
+
+ org.antlr
+ stringtemplate
+
+
+ commons-logging
+ commons-logging
+
+
+ org.reflections
+ reflections
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
+
+ org.apache.commons
+ commons-math3
+
+
+ com.jayway.jsonpath
+ json-path
+
+
+ com.ibm.icu
+ icu4j
+
+
+ org.apache.spark
+ spark-core_${scala.binary.version}
+
+
+ org.apache.spark
+ spark-sql_${scala.binary.version}
+
+
+
+
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/AbstractClusteringFunction.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/AbstractClusteringFunction.java
new file mode 100644
index 0000000000..3da8eb4900
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/AbstractClusteringFunction.java
@@ -0,0 +1,46 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.StringUtils;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+import eu.dnetlib.pace.config.Config;
+
+public abstract class AbstractClusteringFunction extends AbstractPaceFunctions implements ClusteringFunction {
+
+ protected Map params;
+
+ public AbstractClusteringFunction(final Map params) {
+ this.params = params;
+ }
+
+ protected abstract Collection doApply(Config conf, String s);
+
+ @Override
+ public Collection apply(Config conf, List fields) {
+ return fields
+ .stream()
+ .filter(f -> !f.isEmpty())
+ .map(this::normalize)
+ .map(s -> filterAllStopWords(s))
+ .map(s -> doApply(conf, s))
+ .map(c -> filterBlacklisted(c, ngramBlacklist))
+ .flatMap(c -> c.stream())
+ .filter(StringUtils::isNotBlank)
+ .collect(Collectors.toCollection(HashSet::new));
+ }
+
+ public Map getParams() {
+ return params;
+ }
+
+ protected Integer param(String name) {
+ return params.get(name);
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Acronyms.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Acronyms.java
new file mode 100644
index 0000000000..9072fbb4b2
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Acronyms.java
@@ -0,0 +1,51 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("acronyms")
+public class Acronyms extends AbstractClusteringFunction {
+
+ public Acronyms(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return extractAcronyms(s, param("max"), param("minLen"), param("maxLen"));
+ }
+
+ private Set extractAcronyms(final String s, int maxAcronyms, int minLen, int maxLen) {
+
+ final Set acronyms = Sets.newLinkedHashSet();
+
+ for (int i = 0; i < maxAcronyms; i++) {
+
+ final StringTokenizer st = new StringTokenizer(s);
+ final StringBuilder sb = new StringBuilder();
+
+ while (st.hasMoreTokens()) {
+ final String token = st.nextToken();
+ if (sb.length() > maxLen) {
+ break;
+ }
+ if (token.length() > 1 && i < token.length()) {
+ sb.append(token.charAt(i));
+ }
+ }
+ String acronym = sb.toString();
+ if (acronym.length() > minLen) {
+ acronyms.add(acronym);
+ }
+ }
+ return acronyms;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringClass.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringClass.java
new file mode 100644
index 0000000000..3bb845b15d
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringClass.java
@@ -0,0 +1,14 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface ClusteringClass {
+
+ public String value();
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringFunction.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringFunction.java
new file mode 100644
index 0000000000..8b78524182
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ClusteringFunction.java
@@ -0,0 +1,16 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import eu.dnetlib.pace.config.Config;
+
+public interface ClusteringFunction {
+
+ public Collection apply(Config config, List fields);
+
+ public Map getParams();
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ImmutableFieldValue.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ImmutableFieldValue.java
new file mode 100644
index 0000000000..bc8844aee0
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/ImmutableFieldValue.java
@@ -0,0 +1,28 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("immutablefieldvalue")
+public class ImmutableFieldValue extends AbstractClusteringFunction {
+
+ public ImmutableFieldValue(final Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, final String s) {
+ final List res = Lists.newArrayList();
+
+ res.add(s);
+
+ return res;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/KeywordsClustering.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/KeywordsClustering.java
new file mode 100644
index 0000000000..38299adb43
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/KeywordsClustering.java
@@ -0,0 +1,54 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.StringUtils;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("keywordsclustering")
+public class KeywordsClustering extends AbstractClusteringFunction {
+
+ public KeywordsClustering(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, String s) {
+
+ // takes city codes and keywords codes without duplicates
+ Set keywords = getKeywords(s, conf.translationMap(), params.getOrDefault("windowSize", 4));
+ Set cities = getCities(s, params.getOrDefault("windowSize", 4));
+
+ // list of combination to return as result
+ final Collection combinations = new LinkedHashSet();
+
+ for (String keyword : keywordsToCodes(keywords, conf.translationMap())) {
+ for (String city : citiesToCodes(cities)) {
+ combinations.add(keyword + "-" + city);
+ if (combinations.size() >= params.getOrDefault("max", 2)) {
+ return combinations;
+ }
+ }
+ }
+
+ return combinations;
+ }
+
+ @Override
+ public Collection apply(final Config conf, List fields) {
+ return fields
+ .stream()
+ .filter(f -> !f.isEmpty())
+ .map(this::cleanup)
+ .map(this::normalize)
+ .map(s -> filterAllStopWords(s))
+ .map(s -> doApply(conf, s))
+ .map(c -> filterBlacklisted(c, ngramBlacklist))
+ .flatMap(c -> c.stream())
+ .filter(StringUtils::isNotBlank)
+ .collect(Collectors.toCollection(HashSet::new));
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LastNameFirstInitial.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LastNameFirstInitial.java
new file mode 100644
index 0000000000..5a385961a6
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LastNameFirstInitial.java
@@ -0,0 +1,79 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+import eu.dnetlib.pace.model.Person;
+
+@ClusteringClass("lnfi")
+public class LastNameFirstInitial extends AbstractClusteringFunction {
+
+ private boolean DEFAULT_AGGRESSIVE = true;
+
+ public LastNameFirstInitial(final Map params) {
+ super(params);
+ }
+
+ @Override
+ public Collection apply(Config conf, List fields) {
+ return fields
+ .stream()
+ .filter(f -> !f.isEmpty())
+ .map(this::normalize)
+ .map(s -> doApply(conf, s))
+ .map(c -> filterBlacklisted(c, ngramBlacklist))
+ .flatMap(c -> c.stream())
+ .filter(StringUtils::isNotBlank)
+ .collect(Collectors.toCollection(HashSet::new));
+ }
+
+ @Override
+ protected String normalize(final String s) {
+ return fixAliases(transliterate(nfd(unicodeNormalization(s))))
+ // do not compact the regexes in a single expression, would cause StackOverflowError in case of large input
+ // strings
+ .replaceAll("[^ \\w]+", "")
+ .replaceAll("(\\p{InCombiningDiacriticalMarks})+", "")
+ .replaceAll("(\\p{Punct})+", " ")
+ .replaceAll("(\\d)+", " ")
+ .replaceAll("(\\n)+", " ")
+ .trim();
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, final String s) {
+
+ final List res = Lists.newArrayList();
+
+ final boolean aggressive = (Boolean) (getParams().containsKey("aggressive") ? getParams().get("aggressive")
+ : DEFAULT_AGGRESSIVE);
+
+ Person p = new Person(s, aggressive);
+
+ if (p.isAccurate()) {
+ String lastName = p.getNormalisedSurname().toLowerCase();
+ String firstInitial = p.getNormalisedFirstName().toLowerCase().substring(0, 1);
+
+ res.add(firstInitial.concat(lastName));
+ } else { // is not accurate, meaning it has no defined name and surname
+ List fullname = Arrays.asList(p.getNormalisedFullname().split(" "));
+ if (fullname.size() == 1) {
+ res.add(p.getNormalisedFullname().toLowerCase());
+ } else if (fullname.size() == 2) {
+ res.add(fullname.get(0).substring(0, 1).concat(fullname.get(1)).toLowerCase());
+ res.add(fullname.get(1).substring(0, 1).concat(fullname.get(0)).toLowerCase());
+ } else {
+ res.add(fullname.get(0).substring(0, 1).concat(fullname.get(fullname.size() - 1)).toLowerCase());
+ res.add(fullname.get(fullname.size() - 1).substring(0, 1).concat(fullname.get(0)).toLowerCase());
+ }
+ }
+
+ return res;
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LowercaseClustering.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LowercaseClustering.java
new file mode 100644
index 0000000000..a3a6c48819
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/LowercaseClustering.java
@@ -0,0 +1,38 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("lowercase")
+public class LowercaseClustering extends AbstractClusteringFunction {
+
+ public LowercaseClustering(final Map params) {
+ super(params);
+ }
+
+ @Override
+ public Collection apply(Config conf, List fields) {
+ Collection c = Sets.newLinkedHashSet();
+ for (String f : fields) {
+ c.addAll(doApply(conf, f));
+ }
+ return c;
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, final String s) {
+ if (StringUtils.isBlank(s)) {
+ return Lists.newArrayList();
+ }
+ return Lists.newArrayList(s.toLowerCase().trim());
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NGramUtils.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NGramUtils.java
new file mode 100644
index 0000000000..6ee80b86e7
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NGramUtils.java
@@ -0,0 +1,24 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+
+public class NGramUtils extends AbstractPaceFunctions {
+ static private final NGramUtils NGRAMUTILS = new NGramUtils();
+
+ private static final int SIZE = 100;
+
+ private static final Set stopwords = AbstractPaceFunctions
+ .loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
+
+ public static String cleanupForOrdering(String s) {
+ return (NGRAMUTILS.filterStopWords(NGRAMUTILS.normalize(s), stopwords) + StringUtils.repeat(" ", SIZE))
+ .substring(0, SIZE)
+ .replaceAll(" ", "");
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NgramPairs.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NgramPairs.java
new file mode 100644
index 0000000000..aa06aa408e
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/NgramPairs.java
@@ -0,0 +1,41 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("ngrampairs")
+public class NgramPairs extends Ngrams {
+
+ public NgramPairs(Map params) {
+ super(params, false);
+ }
+
+ public NgramPairs(Map params, boolean sorted) {
+ super(params, sorted);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return ngramPairs(Lists.newArrayList(getNgrams(s, param("ngramLen"), param("max") * 2, 1, 2)), param("max"));
+ }
+
+ protected Collection ngramPairs(final List ngrams, int maxNgrams) {
+ Collection res = Lists.newArrayList();
+ int j = 0;
+ for (int i = 0; i < ngrams.size() && res.size() < maxNgrams; i++) {
+ if (++j >= ngrams.size()) {
+ break;
+ }
+ res.add(ngrams.get(i) + ngrams.get(j));
+ // System.out.println("-- " + concatNgrams);
+ }
+ return res;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Ngrams.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Ngrams.java
new file mode 100644
index 0000000000..96c305a16a
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/Ngrams.java
@@ -0,0 +1,52 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.*;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("ngrams")
+public class Ngrams extends AbstractClusteringFunction {
+
+ private final boolean sorted;
+
+ public Ngrams(Map params) {
+ this(params, false);
+ }
+
+ public Ngrams(Map params, boolean sorted) {
+ super(params);
+ this.sorted = sorted;
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return getNgrams(s, param("ngramLen"), param("max"), param("maxPerToken"), param("minNgramLen"));
+ }
+
+ protected Collection getNgrams(String s, int ngramLen, int max, int maxPerToken, int minNgramLen) {
+
+ final Collection ngrams = sorted ? new TreeSet<>() : new LinkedHashSet();
+ final StringTokenizer st = new StringTokenizer(s);
+
+ while (st.hasMoreTokens()) {
+ final String token = st.nextToken();
+ if (!token.isEmpty()) {
+ for (int i = 0; i < maxPerToken && ngramLen + i <= token.length(); i++) {
+ String ngram = token.substring(i, Math.min(ngramLen + i, token.length())).trim();
+
+ if (ngram.length() >= minNgramLen) {
+ ngrams.add(ngram);
+
+ if (ngrams.size() >= max) {
+ return ngrams;
+ }
+ }
+ }
+ }
+ }
+ // System.out.println(ngrams + " n: " + ngrams.size());
+ return ngrams;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonClustering.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonClustering.java
new file mode 100644
index 0000000000..b4a04ce65f
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonClustering.java
@@ -0,0 +1,84 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+import eu.dnetlib.pace.config.Config;
+import eu.dnetlib.pace.model.Person;
+
+@ClusteringClass("personClustering")
+public class PersonClustering extends AbstractPaceFunctions implements ClusteringFunction {
+
+ private Map params;
+
+ private static final int MAX_TOKENS = 5;
+
+ public PersonClustering(final Map params) {
+ this.params = params;
+ }
+
+ @Override
+ public Collection apply(final Config conf, final List fields) {
+ final Set hashes = Sets.newHashSet();
+
+ for (final String f : fields) {
+
+ final Person person = new Person(f, false);
+
+ if (StringUtils.isNotBlank(person.getNormalisedFirstName())
+ && StringUtils.isNotBlank(person.getNormalisedSurname())) {
+ hashes.add(firstLC(person.getNormalisedFirstName()) + person.getNormalisedSurname().toLowerCase());
+ } else {
+ for (final String token1 : tokens(f, MAX_TOKENS)) {
+ for (final String token2 : tokens(f, MAX_TOKENS)) {
+ if (!token1.equals(token2)) {
+ hashes.add(firstLC(token1) + token2);
+ }
+ }
+ }
+ }
+ }
+
+ return hashes;
+ }
+
+// @Override
+// public Collection apply(final List fields) {
+// final Set hashes = Sets.newHashSet();
+//
+// for (final Field f : fields) {
+//
+// final GTAuthor gta = GTAuthor.fromOafJson(f.stringValue());
+//
+// final Author a = gta.getAuthor();
+//
+// if (StringUtils.isNotBlank(a.getFirstname()) && StringUtils.isNotBlank(a.getSecondnames())) {
+// hashes.add(firstLC(a.getFirstname()) + a.getSecondnames().toLowerCase());
+// } else {
+// for (final String token1 : tokens(f.stringValue(), MAX_TOKENS)) {
+// for (final String token2 : tokens(f.stringValue(), MAX_TOKENS)) {
+// if (!token1.equals(token2)) {
+// hashes.add(firstLC(token1) + token2);
+// }
+// }
+// }
+// }
+// }
+//
+// return hashes;
+// }
+
+ @Override
+ public Map getParams() {
+ return params;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonHash.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonHash.java
new file mode 100644
index 0000000000..a3d58a9be3
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/PersonHash.java
@@ -0,0 +1,34 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+import eu.dnetlib.pace.model.Person;
+
+@ClusteringClass("personHash")
+public class PersonHash extends AbstractClusteringFunction {
+
+ private boolean DEFAULT_AGGRESSIVE = false;
+
+ public PersonHash(final Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, final String s) {
+ final List res = Lists.newArrayList();
+
+ final boolean aggressive = (Boolean) (getParams().containsKey("aggressive") ? getParams().get("aggressive")
+ : DEFAULT_AGGRESSIVE);
+
+ res.add(new Person(s, aggressive).hash());
+
+ return res;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/RandomClusteringFunction.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/RandomClusteringFunction.java
new file mode 100644
index 0000000000..2aab926da4
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/RandomClusteringFunction.java
@@ -0,0 +1,20 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.Map;
+
+import eu.dnetlib.pace.config.Config;
+
+public class RandomClusteringFunction extends AbstractClusteringFunction {
+
+ public RandomClusteringFunction(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, String s) {
+ return null;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SortedNgramPairs.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SortedNgramPairs.java
new file mode 100644
index 0000000000..b085ae26d0
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SortedNgramPairs.java
@@ -0,0 +1,31 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.*;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("sortedngrampairs")
+public class SortedNgramPairs extends NgramPairs {
+
+ public SortedNgramPairs(Map params) {
+ super(params, false);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+
+ final List tokens = Lists.newArrayList(Splitter.on(" ").omitEmptyStrings().trimResults().split(s));
+
+ Collections.sort(tokens);
+
+ return ngramPairs(
+ Lists.newArrayList(getNgrams(Joiner.on(" ").join(tokens), param("ngramLen"), param("max") * 2, 1, 2)),
+ param("max"));
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SpaceTrimmingFieldValue.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SpaceTrimmingFieldValue.java
new file mode 100644
index 0000000000..392aecc794
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SpaceTrimmingFieldValue.java
@@ -0,0 +1,34 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("spacetrimmingfieldvalue")
+public class SpaceTrimmingFieldValue extends AbstractClusteringFunction {
+
+ public SpaceTrimmingFieldValue(final Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(final Config conf, final String s) {
+ final List res = Lists.newArrayList();
+
+ res
+ .add(
+ StringUtils.isBlank(s) ? RandomStringUtils.random(getParams().get("randomLength"))
+ : s.toLowerCase().replaceAll("\\s+", ""));
+
+ return res;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SuffixPrefix.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SuffixPrefix.java
new file mode 100644
index 0000000000..2a1c023a96
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/SuffixPrefix.java
@@ -0,0 +1,42 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("suffixprefix")
+public class SuffixPrefix extends AbstractClusteringFunction {
+
+ public SuffixPrefix(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return suffixPrefix(s, param("len"), param("max"));
+ }
+
+ private Collection suffixPrefix(String s, int len, int max) {
+ final Set bigrams = Sets.newLinkedHashSet();
+ int i = 0;
+ while (++i < s.length() && bigrams.size() < max) {
+ int j = s.indexOf(" ", i);
+
+ int offset = j + len + 1 < s.length() ? j + len + 1 : s.length();
+
+ if (j - len > 0) {
+ String bigram = s.substring(j - len, offset).replaceAll(" ", "").trim();
+ if (bigram.length() >= 4) {
+ bigrams.add(bigram);
+ }
+ }
+ }
+ return bigrams;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/UrlClustering.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/UrlClustering.java
new file mode 100644
index 0000000000..5b267ad106
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/UrlClustering.java
@@ -0,0 +1,52 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("urlclustering")
+public class UrlClustering extends AbstractPaceFunctions implements ClusteringFunction {
+
+ protected Map params;
+
+ public UrlClustering(final Map params) {
+ this.params = params;
+ }
+
+ @Override
+ public Collection apply(final Config conf, List fields) {
+ try {
+ return fields
+ .stream()
+ .filter(f -> !f.isEmpty())
+ .map(this::asUrl)
+ .map(URL::getHost)
+ .collect(Collectors.toCollection(HashSet::new));
+ } catch (IllegalStateException e) {
+ return new HashSet<>();
+ }
+ }
+
+ @Override
+ public Map getParams() {
+ return null;
+ }
+
+ private URL asUrl(String value) {
+ try {
+ return new URL(value);
+ } catch (MalformedURLException e) {
+ // should not happen as checked by pace typing
+ throw new IllegalStateException("invalid URL: " + value);
+ }
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsStatsSuffixPrefixChain.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsStatsSuffixPrefixChain.java
new file mode 100644
index 0000000000..c8e02f8f03
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsStatsSuffixPrefixChain.java
@@ -0,0 +1,91 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("wordsStatsSuffixPrefixChain")
+public class WordsStatsSuffixPrefixChain extends AbstractClusteringFunction {
+
+ public WordsStatsSuffixPrefixChain(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return suffixPrefixChain(s, param("mod"));
+ }
+
+ private Collection suffixPrefixChain(String s, int mod) {
+
+ // create the list of words from the string (remove short words)
+ List wordsList = Arrays
+ .stream(s.split(" "))
+ .filter(si -> si.length() > 3)
+ .collect(Collectors.toList());
+
+ final int words = wordsList.size();
+ final int letters = s.length();
+
+ // create the prefix: number of words + number of letters/mod
+ String prefix = words + "-" + letters / mod + "-";
+
+ return doSuffixPrefixChain(wordsList, prefix);
+
+ }
+
+ private Collection doSuffixPrefixChain(List wordsList, String prefix) {
+
+ Set set = Sets.newLinkedHashSet();
+ switch (wordsList.size()) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ set
+ .add(
+ prefix +
+ suffix(wordsList.get(0), 3) +
+ prefix(wordsList.get(1), 3));
+
+ set
+ .add(
+ prefix +
+ prefix(wordsList.get(0), 3) +
+ suffix(wordsList.get(1), 3));
+
+ break;
+ default:
+ set
+ .add(
+ prefix +
+ suffix(wordsList.get(0), 3) +
+ prefix(wordsList.get(1), 3) +
+ suffix(wordsList.get(2), 3));
+
+ set
+ .add(
+ prefix +
+ prefix(wordsList.get(0), 3) +
+ suffix(wordsList.get(1), 3) +
+ prefix(wordsList.get(2), 3));
+ break;
+ }
+
+ return set;
+
+ }
+
+ private String suffix(String s, int len) {
+ return s.substring(s.length() - len);
+ }
+
+ private String prefix(String s, int len) {
+ return s.substring(0, len);
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsSuffixPrefix.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsSuffixPrefix.java
new file mode 100644
index 0000000000..e606590a53
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/clustering/WordsSuffixPrefix.java
@@ -0,0 +1,59 @@
+
+package eu.dnetlib.pace.clustering;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.config.Config;
+
+@ClusteringClass("wordssuffixprefix")
+public class WordsSuffixPrefix extends AbstractClusteringFunction {
+
+ public WordsSuffixPrefix(Map params) {
+ super(params);
+ }
+
+ @Override
+ protected Collection doApply(Config conf, String s) {
+ return suffixPrefix(s, param("len"), param("max"));
+ }
+
+ private Collection suffixPrefix(String s, int len, int max) {
+
+ final int words = s.split(" ").length;
+
+ // adjust the token length according to the number of words
+ switch (words) {
+ case 1:
+ return Sets.newLinkedHashSet();
+ case 2:
+ return doSuffixPrefix(s, len + 2, max, words);
+ case 3:
+ return doSuffixPrefix(s, len + 1, max, words);
+ default:
+ return doSuffixPrefix(s, len, max, words);
+ }
+ }
+
+ private Collection doSuffixPrefix(String s, int len, int max, int words) {
+ final Set bigrams = Sets.newLinkedHashSet();
+ int i = 0;
+ while (++i < s.length() && bigrams.size() < max) {
+ int j = s.indexOf(" ", i);
+
+ int offset = j + len + 1 < s.length() ? j + len + 1 : s.length();
+
+ if (j - len > 0) {
+ String bigram = s.substring(j - len, offset).replaceAll(" ", "").trim();
+ if (bigram.length() >= 4) {
+ bigrams.add(words + bigram);
+ }
+ }
+ }
+ return bigrams;
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java
new file mode 100644
index 0000000000..b440686ded
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java
@@ -0,0 +1,357 @@
+
+package eu.dnetlib.pace.common;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.nio.charset.StandardCharsets;
+import java.text.Normalizer;
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.ibm.icu.text.Transliterator;
+
+import eu.dnetlib.pace.clustering.NGramUtils;
+
+/**
+ * Set of common functions for the framework
+ *
+ * @author claudio
+ */
+public abstract class AbstractPaceFunctions {
+
+ // city map to be used when translating the city names into codes
+ private static Map cityMap = AbstractPaceFunctions
+ .loadMapFromClasspath("/eu/dnetlib/pace/config/city_map.csv");
+
+ // list of stopwords in different languages
+ protected static Set stopwords_gr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_gr.txt");
+ protected static Set stopwords_en = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
+ protected static Set stopwords_de = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_de.txt");
+ protected static Set stopwords_es = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_es.txt");
+ protected static Set stopwords_fr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_fr.txt");
+ protected static Set stopwords_it = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_it.txt");
+ protected static Set stopwords_pt = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_pt.txt");
+
+ // transliterator
+ protected static Transliterator transliterator = Transliterator.getInstance("Any-Eng");
+
+ // blacklist of ngrams: to avoid generic keys
+ protected static Set ngramBlacklist = loadFromClasspath("/eu/dnetlib/pace/config/ngram_blacklist.txt");
+
+ // html regex for normalization
+ public static final Pattern HTML_REGEX = Pattern.compile("<[^>]*>");
+
+ private static final String alpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ";
+ private static final String aliases_from = "⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎àáâäæãåāèéêëēėęəîïíīįìôöòóœøōõûüùúūßśšłžźżçćčñń";
+ private static final String aliases_to = "0123456789+-=()n0123456789+-=()aaaaaaaaeeeeeeeeiiiiiioooooooouuuuussslzzzcccnn";
+
+ // doi prefix for normalization
+ public static final Pattern DOI_PREFIX = Pattern.compile("(https?:\\/\\/dx\\.doi\\.org\\/)|(doi:)");
+
+ private static Pattern numberPattern = Pattern.compile("-?\\d+(\\.\\d+)?");
+
+ private static Pattern hexUnicodePattern = Pattern.compile("\\\\u(\\p{XDigit}{4})");
+
+ protected String concat(final List l) {
+ return Joiner.on(" ").skipNulls().join(l);
+ }
+
+ protected String cleanup(final String s) {
+ final String s1 = HTML_REGEX.matcher(s).replaceAll("");
+ final String s2 = unicodeNormalization(s1.toLowerCase());
+ final String s3 = nfd(s2);
+ final String s4 = fixXML(s3);
+ final String s5 = s4.replaceAll("([0-9]+)", " $1 ");
+ final String s6 = transliterate(s5);
+ final String s7 = fixAliases(s6);
+ final String s8 = s7.replaceAll("[^\\p{ASCII}]", "");
+ final String s9 = s8.replaceAll("[\\p{Punct}]", " ");
+ final String s10 = s9.replaceAll("\\n", " ");
+ final String s11 = s10.replaceAll("(?m)\\s+", " ");
+ final String s12 = s11.trim();
+ return s12;
+ }
+
+ protected String fixXML(final String a) {
+
+ return a
+ .replaceAll("–", " ")
+ .replaceAll("&", " ")
+ .replaceAll(""", " ")
+ .replaceAll("−", " ");
+ }
+
+ protected boolean checkNumbers(final String a, final String b) {
+ final String numbersA = getNumbers(a);
+ final String numbersB = getNumbers(b);
+ final String romansA = getRomans(a);
+ final String romansB = getRomans(b);
+ return !numbersA.equals(numbersB) || !romansA.equals(romansB);
+ }
+
+ protected String getRomans(final String s) {
+ final StringBuilder sb = new StringBuilder();
+ for (final String t : s.split(" ")) {
+ sb.append(isRoman(t) ? t : "");
+ }
+ return sb.toString();
+ }
+
+ protected boolean isRoman(final String s) {
+ return s
+ .replaceAll("^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$", "qwertyuiop")
+ .equals("qwertyuiop");
+ }
+
+ protected String getNumbers(final String s) {
+ final StringBuilder sb = new StringBuilder();
+ for (final String t : s.split(" ")) {
+ sb.append(isNumber(t) ? t : "");
+ }
+ return sb.toString();
+ }
+
+ public boolean isNumber(String strNum) {
+ if (strNum == null) {
+ return false;
+ }
+ return numberPattern.matcher(strNum).matches();
+ }
+
+ protected static String fixAliases(final String s) {
+ final StringBuilder sb = new StringBuilder();
+
+ s.chars().forEach(ch -> {
+ final int i = StringUtils.indexOf(aliases_from, ch);
+ sb.append(i >= 0 ? aliases_to.charAt(i) : (char) ch);
+ });
+
+ return sb.toString();
+ }
+
+ protected static String transliterate(final String s) {
+ try {
+ return transliterator.transliterate(s);
+ } catch (Exception e) {
+ return s;
+ }
+ }
+
+ protected String removeSymbols(final String s) {
+ final StringBuilder sb = new StringBuilder();
+
+ s.chars().forEach(ch -> {
+ sb.append(StringUtils.contains(alpha, ch) ? (char) ch : ' ');
+ });
+
+ return sb.toString().replaceAll("\\s+", " ");
+ }
+
+ protected boolean notNull(final String s) {
+ return s != null;
+ }
+
+ protected String normalize(final String s) {
+ return fixAliases(transliterate(nfd(unicodeNormalization(s))))
+ .toLowerCase()
+ // do not compact the regexes in a single expression, would cause StackOverflowError in case of large input
+ // strings
+ .replaceAll("[^ \\w]+", "")
+ .replaceAll("(\\p{InCombiningDiacriticalMarks})+", "")
+ .replaceAll("(\\p{Punct})+", " ")
+ .replaceAll("(\\d)+", " ")
+ .replaceAll("(\\n)+", " ")
+ .trim();
+ }
+
+ public String nfd(final String s) {
+ return Normalizer.normalize(s, Normalizer.Form.NFD);
+ }
+
+ public String utf8(final String s) {
+ byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
+ return new String(bytes, StandardCharsets.UTF_8);
+ }
+
+ public String unicodeNormalization(final String s) {
+
+ Matcher m = hexUnicodePattern.matcher(s);
+ StringBuffer buf = new StringBuffer(s.length());
+ while (m.find()) {
+ String ch = String.valueOf((char) Integer.parseInt(m.group(1), 16));
+ m.appendReplacement(buf, Matcher.quoteReplacement(ch));
+ }
+ m.appendTail(buf);
+ return buf.toString();
+ }
+
+ protected String filterStopWords(final String s, final Set stopwords) {
+ final StringTokenizer st = new StringTokenizer(s);
+ final StringBuilder sb = new StringBuilder();
+ while (st.hasMoreTokens()) {
+ final String token = st.nextToken();
+ if (!stopwords.contains(token)) {
+ sb.append(token);
+ sb.append(" ");
+ }
+ }
+ return sb.toString().trim();
+ }
+
+ public String filterAllStopWords(String s) {
+
+ s = filterStopWords(s, stopwords_en);
+ s = filterStopWords(s, stopwords_de);
+ s = filterStopWords(s, stopwords_it);
+ s = filterStopWords(s, stopwords_fr);
+ s = filterStopWords(s, stopwords_pt);
+ s = filterStopWords(s, stopwords_es);
+ s = filterStopWords(s, stopwords_gr);
+
+ return s;
+ }
+
+ protected Collection filterBlacklisted(final Collection set, final Set ngramBlacklist) {
+ final Set newset = Sets.newLinkedHashSet();
+ for (final String s : set) {
+ if (!ngramBlacklist.contains(s)) {
+ newset.add(s);
+ }
+ }
+ return newset;
+ }
+
+ public static Set loadFromClasspath(final String classpath) {
+
+ Transliterator transliterator = Transliterator.getInstance("Any-Eng");
+
+ final Set h = Sets.newHashSet();
+ try {
+ for (final String s : IOUtils
+ .readLines(NGramUtils.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
+ h.add(fixAliases(transliterator.transliterate(s))); // transliteration of the stopwords
+ }
+ } catch (final Throwable e) {
+ return Sets.newHashSet();
+ }
+ return h;
+ }
+
+ public static Map loadMapFromClasspath(final String classpath) {
+
+ Transliterator transliterator = Transliterator.getInstance("Any-Eng");
+
+ final Map m = new HashMap<>();
+ try {
+ for (final String s : IOUtils
+ .readLines(AbstractPaceFunctions.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
+ // string is like this: code;word1;word2;word3
+ String[] line = s.split(";");
+ String value = line[0];
+ for (int i = 1; i < line.length; i++) {
+ m.put(fixAliases(transliterator.transliterate(line[i].toLowerCase())), value);
+ }
+ }
+ } catch (final Throwable e) {
+ return new HashMap<>();
+ }
+ return m;
+ }
+
+ public String removeKeywords(String s, Set keywords) {
+
+ s = " " + s + " ";
+ for (String k : keywords) {
+ s = s.replaceAll(k.toLowerCase(), "");
+ }
+
+ return s.trim();
+ }
+
+ public double commonElementsPercentage(Set s1, Set s2) {
+
+ double longer = Math.max(s1.size(), s2.size());
+ return (double) s1.stream().filter(s2::contains).count() / longer;
+ }
+
+ // convert the set of keywords to codes
+ public Set toCodes(Set keywords, Map translationMap) {
+ return keywords.stream().map(s -> translationMap.get(s)).collect(Collectors.toSet());
+ }
+
+ public Set keywordsToCodes(Set keywords, Map translationMap) {
+ return toCodes(keywords, translationMap);
+ }
+
+ public Set citiesToCodes(Set keywords) {
+ return toCodes(keywords, cityMap);
+ }
+
+ protected String firstLC(final String s) {
+ return StringUtils.substring(s, 0, 1).toLowerCase();
+ }
+
+ protected Iterable tokens(final String s, final int maxTokens) {
+ return Iterables.limit(Splitter.on(" ").omitEmptyStrings().trimResults().split(s), maxTokens);
+ }
+
+ public String normalizePid(String pid) {
+ return DOI_PREFIX.matcher(pid.toLowerCase()).replaceAll("");
+ }
+
+ // get the list of keywords into the input string
+ public Set getKeywords(String s1, Map translationMap, int windowSize) {
+
+ String s = s1;
+
+ List tokens = Arrays.asList(s.toLowerCase().split(" "));
+
+ Set codes = new HashSet<>();
+
+ if (tokens.size() < windowSize)
+ windowSize = tokens.size();
+
+ int length = windowSize;
+
+ while (length != 0) {
+
+ for (int i = 0; i <= tokens.size() - length; i++) {
+ String candidate = concat(tokens.subList(i, i + length));
+ if (translationMap.containsKey(candidate)) {
+ codes.add(candidate);
+ s = s.replace(candidate, "").trim();
+ }
+ }
+
+ tokens = Arrays.asList(s.split(" "));
+ length -= 1;
+ }
+
+ return codes;
+ }
+
+ public Set getCities(String s1, int windowSize) {
+ return getKeywords(s1, cityMap, windowSize);
+ }
+
+ public static String readFromClasspath(final String filename, final Class clazz) {
+ final StringWriter sw = new StringWriter();
+ try {
+ IOUtils.copy(clazz.getResourceAsStream(filename), sw, StandardCharsets.UTF_8);
+ return sw.toString();
+ } catch (final IOException e) {
+ throw new RuntimeException("cannot load resource from classpath: " + filename);
+ }
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Config.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Config.java
new file mode 100644
index 0000000000..4d823d1291
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Config.java
@@ -0,0 +1,53 @@
+
+package eu.dnetlib.pace.config;
+
+import java.util.List;
+import java.util.Map;
+import java.util.function.Predicate;
+
+import eu.dnetlib.pace.model.ClusteringDef;
+import eu.dnetlib.pace.model.FieldDef;
+import eu.dnetlib.pace.tree.support.TreeNodeDef;
+
+/**
+ * Interface for PACE configuration bean.
+ *
+ * @author claudio
+ */
+public interface Config {
+
+ /**
+ * Field configuration definitions.
+ *
+ * @return the list of definitions
+ */
+ public List model();
+
+ /**
+ * Decision Tree definition
+ *
+ * @return the map representing the decision tree
+ */
+ public Map decisionTree();
+
+ /**
+ * Clusterings.
+ *
+ * @return the list
+ */
+ public List clusterings();
+
+ /**
+ * Blacklists.
+ *
+ * @return the map
+ */
+ public Map> blacklists();
+
+ /**
+ * Translation map.
+ *
+ * @return the map
+ * */
+ public Map translationMap();
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/DedupConfig.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/DedupConfig.java
new file mode 100644
index 0000000000..ac0ef08e46
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/DedupConfig.java
@@ -0,0 +1,178 @@
+
+package eu.dnetlib.pace.config;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.nio.charset.StandardCharsets;
+import java.util.AbstractMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.function.Predicate;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+import java.util.stream.Collectors;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.Maps;
+
+import eu.dnetlib.pace.model.ClusteringDef;
+import eu.dnetlib.pace.model.FieldDef;
+import eu.dnetlib.pace.tree.support.TreeNodeDef;
+import eu.dnetlib.pace.util.PaceException;
+
+public class DedupConfig implements Config, Serializable {
+ private static String CONFIG_TEMPLATE = "dedupConfig.st";
+
+ private PaceConfig pace;
+
+ private WfConfig wf;
+
+ @JsonIgnore
+ private Map> blacklists;
+
+ private static Map defaults = Maps.newHashMap();
+
+ static {
+ defaults.put("dedupRun", "001");
+ defaults.put("entityType", "result");
+ defaults.put("subEntityType", "resulttype");
+ defaults.put("subEntityValue", "publication");
+ defaults.put("orderField", "title");
+ defaults.put("queueMaxSize", "2000");
+ defaults.put("groupMaxSize", "10");
+ defaults.put("slidingWindowSize", "200");
+ defaults.put("rootBuilder", "result");
+ defaults.put("includeChildren", "true");
+ defaults.put("maxIterations", "20");
+ defaults.put("idPath", "$.id");
+ }
+
+ public DedupConfig() {
+ }
+
+ public static DedupConfig load(final String json) {
+
+ final DedupConfig config;
+ try {
+ config = new ObjectMapper().readValue(json, DedupConfig.class);
+ config.getPace().initModel();
+ config.getPace().initTranslationMap();
+
+ config.blacklists = config
+ .getPace()
+ .getBlacklists()
+ .entrySet()
+ .stream()
+ .map(
+ e -> new AbstractMap.SimpleEntry>(e.getKey(),
+ e
+ .getValue()
+ .stream()
+ .filter(s -> !StringUtils.isBlank(s))
+ .map(Pattern::compile)
+ .collect(Collectors.toList())))
+ .collect(
+ Collectors
+ .toMap(
+ e -> e.getKey(),
+ e -> (Predicate & Serializable) s -> e
+ .getValue()
+ .stream()
+ .filter(p -> p.matcher(s).matches())
+ .findFirst()
+ .isPresent()))
+
+ ;
+
+ return config;
+ } catch (IOException | PatternSyntaxException e) {
+ throw new PaceException("Error in parsing configuration json", e);
+ }
+
+ }
+
+ public static DedupConfig loadDefault() throws IOException {
+ return loadDefault(new HashMap());
+ }
+
+ public static DedupConfig loadDefault(final Map params) throws IOException {
+
+ final StringTemplate template = new StringTemplate(new DedupConfig().readFromClasspath(CONFIG_TEMPLATE));
+
+ for (final Entry e : defaults.entrySet()) {
+ template.setAttribute(e.getKey(), e.getValue());
+ }
+ for (final Entry e : params.entrySet()) {
+ if (template.getAttribute(e.getKey()) != null) {
+ template.getAttributes().computeIfPresent(e.getKey(), (o, o2) -> e.getValue());
+ } else {
+ template.setAttribute(e.getKey(), e.getValue());
+ }
+ }
+
+ final String json = template.toString();
+ return load(json);
+ }
+
+ private String readFromClasspath(final String resource) throws IOException {
+ return IOUtils.toString(getClass().getResource(resource), StandardCharsets.UTF_8);
+ }
+
+ public PaceConfig getPace() {
+ return pace;
+ }
+
+ public void setPace(final PaceConfig pace) {
+ this.pace = pace;
+ }
+
+ public WfConfig getWf() {
+ return wf;
+ }
+
+ public void setWf(final WfConfig wf) {
+ this.wf = wf;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ return new ObjectMapper().writeValueAsString(this);
+ } catch (IOException e) {
+ throw new PaceException("unable to serialise configuration", e);
+ }
+ }
+
+ @Override
+ public Map decisionTree() {
+ return getPace().getDecisionTree();
+ }
+
+ @Override
+ public List model() {
+ return getPace().getModel();
+ }
+
+ @Override
+ public List clusterings() {
+ return getPace().getClustering();
+ }
+
+ @Override
+ public Map> blacklists() {
+ return blacklists;
+ }
+
+ @Override
+ public Map translationMap() {
+ return getPace().translationMap();
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/PaceConfig.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/PaceConfig.java
new file mode 100644
index 0000000000..f1bc49f4af
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/PaceConfig.java
@@ -0,0 +1,108 @@
+
+package eu.dnetlib.pace.config;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.collect.Maps;
+import com.ibm.icu.text.Transliterator;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+import eu.dnetlib.pace.model.ClusteringDef;
+import eu.dnetlib.pace.model.FieldDef;
+import eu.dnetlib.pace.tree.support.TreeNodeDef;
+import eu.dnetlib.pace.util.PaceResolver;
+
+public class PaceConfig extends AbstractPaceFunctions implements Serializable {
+
+ private List model;
+
+ private List clustering;
+ private Map decisionTree;
+
+ private Map> blacklists;
+ private Map> synonyms;
+
+ @JsonIgnore
+ private Map translationMap;
+
+ public Map getModelMap() {
+ return modelMap;
+ }
+
+ @JsonIgnore
+ private Map modelMap;
+
+ @JsonIgnore
+ public static PaceResolver resolver = new PaceResolver();
+
+ public PaceConfig() {
+ }
+
+ public void initModel() {
+ modelMap = Maps.newHashMap();
+ for (FieldDef fd : getModel()) {
+ modelMap.put(fd.getName(), fd);
+ }
+ }
+
+ public void initTranslationMap() {
+ translationMap = Maps.newHashMap();
+
+ Transliterator transliterator = Transliterator.getInstance("Any-Eng");
+ for (String key : synonyms.keySet()) {
+ for (String term : synonyms.get(key)) {
+ translationMap
+ .put(
+ fixAliases(transliterator.transliterate(term.toLowerCase())),
+ key);
+ }
+ }
+ }
+
+ public Map translationMap() {
+ return translationMap;
+ }
+
+ public List getModel() {
+ return model;
+ }
+
+ public void setModel(final List model) {
+ this.model = model;
+ }
+
+ public List getClustering() {
+ return clustering;
+ }
+
+ public void setClustering(final List clustering) {
+ this.clustering = clustering;
+ }
+
+ public Map getDecisionTree() {
+ return decisionTree;
+ }
+
+ public void setDecisionTree(Map decisionTree) {
+ this.decisionTree = decisionTree;
+ }
+
+ public Map> getBlacklists() {
+ return blacklists;
+ }
+
+ public void setBlacklists(final Map> blacklists) {
+ this.blacklists = blacklists;
+ }
+
+ public Map> getSynonyms() {
+ return synonyms;
+ }
+
+ public void setSynonyms(Map> synonyms) {
+ this.synonyms = synonyms;
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Type.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Type.java
new file mode 100644
index 0000000000..9f3323edc7
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/Type.java
@@ -0,0 +1,6 @@
+
+package eu.dnetlib.pace.config;
+
+public enum Type {
+ String, Int, List, JSON, URL, StringConcat, DoubleArray
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/WfConfig.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/WfConfig.java
new file mode 100644
index 0000000000..8dea042328
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/config/WfConfig.java
@@ -0,0 +1,294 @@
+
+package eu.dnetlib.pace.config;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import eu.dnetlib.pace.util.PaceException;
+
+public class WfConfig implements Serializable {
+
+ /**
+ * Entity type.
+ */
+ private String entityType = "";
+
+ /**
+ * Sub-Entity type refers to one of fields declared in the model. See eu.dnetlib.pace.config.PaceConfig.modelMap
+ */
+ private String subEntityType = "";
+
+ /**
+ * Sub-Entity value declares a value for subTypes to be considered.
+ */
+ private String subEntityValue = "";
+
+ /**
+ * Field name used to sort the values in the reducer phase.
+ */
+ private String orderField = "";
+
+ /**
+ * Column Families involved in the relations redirection.
+ */
+ private List rootBuilder = Lists.newArrayList();
+
+ /**
+ * Set of datasource namespace prefixes that won't be deduplicated.
+ */
+ private Set skipList = Sets.newHashSet();
+
+ /**
+ * Subprefix used to build the root id, allows multiple dedup runs.
+ */
+ private String dedupRun = "";
+
+ /**
+ * Similarity threshold.
+ */
+ private double threshold = 0;
+
+ /** The queue max size. */
+ private int queueMaxSize = 2000;
+
+ /** The group max size. */
+ private int groupMaxSize;
+
+ /** The sliding window size. */
+ private int slidingWindowSize;
+
+ /** The configuration id. */
+ private String configurationId;
+
+ /** The include children. */
+ private boolean includeChildren;
+
+ /** Default maximum number of allowed children. */
+ private final static int MAX_CHILDREN = 10;
+
+ /** Maximum number of allowed children. */
+ private int maxChildren = MAX_CHILDREN;
+
+ /** Default maximum number of iterations. */
+ private final static int MAX_ITERATIONS = 20;
+
+ /** Maximum number of iterations */
+ private int maxIterations = MAX_ITERATIONS;
+
+ /** The Jquery path to retrieve the identifier */
+ private String idPath = "$.id";
+
+ public WfConfig() {
+ }
+
+ /**
+ * Instantiates a new dedup config.
+ *
+ * @param entityType
+ * the entity type
+ * @param orderField
+ * the order field
+ * @param rootBuilder
+ * the root builder families
+ * @param dedupRun
+ * the dedup run
+ * @param skipList
+ * the skip list
+ * @param queueMaxSize
+ * the queue max size
+ * @param groupMaxSize
+ * the group max size
+ * @param slidingWindowSize
+ * the sliding window size
+ * @param includeChildren
+ * allows the children to be included in the representative records or not.
+ * @param maxIterations
+ * the maximum number of iterations
+ * @param idPath
+ * the path for the id of the entity
+ */
+ public WfConfig(final String entityType, final String orderField, final List rootBuilder,
+ final String dedupRun,
+ final Set skipList, final int queueMaxSize, final int groupMaxSize, final int slidingWindowSize,
+ final boolean includeChildren, final int maxIterations, final String idPath) {
+ super();
+ this.entityType = entityType;
+ this.orderField = orderField;
+ this.rootBuilder = rootBuilder;
+ this.dedupRun = cleanupStringNumber(dedupRun);
+ this.skipList = skipList;
+ this.queueMaxSize = queueMaxSize;
+ this.groupMaxSize = groupMaxSize;
+ this.slidingWindowSize = slidingWindowSize;
+ this.includeChildren = includeChildren;
+ this.maxIterations = maxIterations;
+ this.idPath = idPath;
+ }
+
+ /**
+ * Cleanup string number.
+ *
+ * @param s
+ * the s
+ * @return the string
+ */
+ private String cleanupStringNumber(final String s) {
+ return s.contains("'") ? s.replaceAll("'", "") : s;
+ }
+
+ public boolean hasSubType() {
+ return StringUtils.isNotBlank(getSubEntityType()) && StringUtils.isNotBlank(getSubEntityValue());
+ }
+
+ public String getEntityType() {
+ return entityType;
+ }
+
+ public void setEntityType(final String entityType) {
+ this.entityType = entityType;
+ }
+
+ public String getSubEntityType() {
+ return subEntityType;
+ }
+
+ public void setSubEntityType(final String subEntityType) {
+ this.subEntityType = subEntityType;
+ }
+
+ public String getSubEntityValue() {
+ return subEntityValue;
+ }
+
+ public void setSubEntityValue(final String subEntityValue) {
+ this.subEntityValue = subEntityValue;
+ }
+
+ public String getOrderField() {
+ return orderField;
+ }
+
+ public void setOrderField(final String orderField) {
+ this.orderField = orderField;
+ }
+
+ public List getRootBuilder() {
+ return rootBuilder;
+ }
+
+ public void setRootBuilder(final List rootBuilder) {
+ this.rootBuilder = rootBuilder;
+ }
+
+ public Set getSkipList() {
+ return skipList != null ? skipList : new HashSet();
+ }
+
+ public void setSkipList(final Set skipList) {
+ this.skipList = skipList;
+ }
+
+ public String getDedupRun() {
+ return dedupRun;
+ }
+
+ public void setDedupRun(final String dedupRun) {
+ this.dedupRun = dedupRun;
+ }
+
+ public double getThreshold() {
+ return threshold;
+ }
+
+ public void setThreshold(final double threshold) {
+ this.threshold = threshold;
+ }
+
+ public int getQueueMaxSize() {
+ return queueMaxSize;
+ }
+
+ public void setQueueMaxSize(final int queueMaxSize) {
+ this.queueMaxSize = queueMaxSize;
+ }
+
+ public int getGroupMaxSize() {
+ return groupMaxSize;
+ }
+
+ public void setGroupMaxSize(final int groupMaxSize) {
+ this.groupMaxSize = groupMaxSize;
+ }
+
+ public int getSlidingWindowSize() {
+ return slidingWindowSize;
+ }
+
+ public void setSlidingWindowSize(final int slidingWindowSize) {
+ this.slidingWindowSize = slidingWindowSize;
+ }
+
+ public String getConfigurationId() {
+ return configurationId;
+ }
+
+ public void setConfigurationId(final String configurationId) {
+ this.configurationId = configurationId;
+ }
+
+ public boolean isIncludeChildren() {
+ return includeChildren;
+ }
+
+ public void setIncludeChildren(final boolean includeChildren) {
+ this.includeChildren = includeChildren;
+ }
+
+ public int getMaxChildren() {
+ return maxChildren;
+ }
+
+ public void setMaxChildren(final int maxChildren) {
+ this.maxChildren = maxChildren;
+ }
+
+ public int getMaxIterations() {
+ return maxIterations;
+ }
+
+ public void setMaxIterations(int maxIterations) {
+ this.maxIterations = maxIterations;
+ }
+
+ public String getIdPath() {
+ return idPath;
+ }
+
+ public void setIdPath(String idPath) {
+ this.idPath = idPath;
+
+ }
+
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ try {
+ return new ObjectMapper().writeValueAsString(this);
+ } catch (IOException e) {
+ throw new PaceException("unable to serialise " + this.getClass().getName(), e);
+ }
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/ClusteringDef.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/ClusteringDef.java
new file mode 100644
index 0000000000..d9ad81d42b
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/ClusteringDef.java
@@ -0,0 +1,63 @@
+
+package eu.dnetlib.pace.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import eu.dnetlib.pace.clustering.ClusteringFunction;
+import eu.dnetlib.pace.config.PaceConfig;
+import eu.dnetlib.pace.util.PaceException;
+
+public class ClusteringDef implements Serializable {
+
+ private String name;
+
+ private List fields;
+
+ private Map params;
+
+ public ClusteringDef() {
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(final String name) {
+ this.name = name;
+ }
+
+ public ClusteringFunction clusteringFunction() {
+ return PaceConfig.resolver.getClusteringFunction(getName(), params);
+ }
+
+ public List getFields() {
+ return fields;
+ }
+
+ public void setFields(final List fields) {
+ this.fields = fields;
+ }
+
+ public Map getParams() {
+ return params;
+ }
+
+ public void setParams(final Map params) {
+ this.params = params;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ return new ObjectMapper().writeValueAsString(this);
+ } catch (IOException e) {
+ throw new PaceException("unable to serialise " + this.getClass().getName(), e);
+ }
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/FieldDef.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/FieldDef.java
new file mode 100644
index 0000000000..f34545e6df
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/FieldDef.java
@@ -0,0 +1,103 @@
+
+package eu.dnetlib.pace.model;
+
+import java.io.Serializable;
+import java.util.List;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
+import eu.dnetlib.pace.config.Type;
+
+/**
+ * The schema is composed by field definitions (FieldDef). Each field has a type, a name, and an associated compare algorithm.
+ */
+public class FieldDef implements Serializable {
+
+ public final static String PATH_SEPARATOR = "/";
+
+ private String name;
+
+ private String path;
+
+ private Type type;
+
+ private boolean overrideMatch;
+
+ /**
+ * Sets maximum size for the repeatable fields in the model. -1 for unbounded size.
+ */
+ private int size = -1;
+
+ /**
+ * Sets maximum length for field values in the model. -1 for unbounded length.
+ */
+ private int length = -1;
+
+ public FieldDef() {
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public List getPathList() {
+ return Lists.newArrayList(Splitter.on(PATH_SEPARATOR).split(getPath()));
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ public void setType(final Type type) {
+ this.type = type;
+ }
+
+ public boolean isOverrideMatch() {
+ return overrideMatch;
+ }
+
+ public void setOverrideMatch(final boolean overrideMatch) {
+ this.overrideMatch = overrideMatch;
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+
+ public int getLength() {
+ return length;
+ }
+
+ public void setLength(int length) {
+ this.length = length;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setPath(String path) {
+ this.path = path;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ return new ObjectMapper().writeValueAsString(this);
+ } catch (JsonProcessingException e) {
+ return null;
+ }
+ }
+
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java
new file mode 100644
index 0000000000..96120cf4da
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java
@@ -0,0 +1,156 @@
+
+package eu.dnetlib.pace.model;
+
+import java.nio.charset.Charset;
+import java.text.Normalizer;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.hash.Hashing;
+
+import eu.dnetlib.pace.common.AbstractPaceFunctions;
+import eu.dnetlib.pace.util.Capitalise;
+import eu.dnetlib.pace.util.DotAbbreviations;
+
+public class Person {
+
+ private static final String UTF8 = "UTF-8";
+ private List name = Lists.newArrayList();
+ private List surname = Lists.newArrayList();
+ private List fullname = Lists.newArrayList();
+ private final String original;
+
+ private static Set particles = null;
+
+ public Person(String s, final boolean aggressive) {
+ original = s;
+ s = Normalizer.normalize(s, Normalizer.Form.NFD);
+ s = s.replaceAll("\\(.+\\)", "");
+ s = s.replaceAll("\\[.+\\]", "");
+ s = s.replaceAll("\\{.+\\}", "");
+ s = s.replaceAll("\\s+-\\s+", "-");
+ s = s.replaceAll("[\\p{Punct}&&[^,-]]", " ");
+ s = s.replaceAll("\\d", " ");
+ s = s.replaceAll("\\n", " ");
+ s = s.replaceAll("\\.", " ");
+ s = s.replaceAll("\\s+", " ");
+
+ if (aggressive) {
+ s = s.replaceAll("[\\p{InCombiningDiacriticalMarks}&&[^,-]]", "");
+ // s = s.replaceAll("[\\W&&[^,-]]", "");
+ }
+
+ if (s.contains(",")) { // if the name contains a comma it is easy derivable the name and the surname
+ final String[] arr = s.split(",");
+ if (arr.length == 1) {
+ fullname = splitTerms(arr[0]);
+ } else if (arr.length > 1) {
+ surname = splitTerms(arr[0]);
+ name = splitTerms(arr[1]);
+ fullname.addAll(surname);
+ fullname.addAll(name);
+ }
+ } else {
+ fullname = splitTerms(s);
+
+ int lastInitialPosition = fullname.size();
+ boolean hasSurnameInUpperCase = false;
+
+ for (int i = 0; i < fullname.size(); i++) {
+ final String term = fullname.get(i);
+ if (term.length() == 1) {
+ lastInitialPosition = i;
+ } else if (term.equals(term.toUpperCase())) {
+ hasSurnameInUpperCase = true;
+ }
+ }
+
+ if (lastInitialPosition < (fullname.size() - 1)) { // Case: Michele G. Artini
+ name = fullname.subList(0, lastInitialPosition + 1);
+ surname = fullname.subList(lastInitialPosition + 1, fullname.size());
+ } else if (hasSurnameInUpperCase) { // Case: Michele ARTINI
+ for (final String term : fullname) {
+ if ((term.length() > 1) && term.equals(term.toUpperCase())) {
+ surname.add(term);
+ } else {
+ name.add(term);
+ }
+ }
+ }
+ }
+ }
+
+ private List splitTerms(final String s) {
+ if (particles == null) {
+ particles = AbstractPaceFunctions.loadFromClasspath("/eu/dnetlib/pace/config/name_particles.txt");
+ }
+
+ final List list = Lists.newArrayList();
+ for (final String part : Splitter.on(" ").omitEmptyStrings().split(s)) {
+ if (!particles.contains(part.toLowerCase())) {
+ list.add(part);
+ }
+ }
+ return list;
+ }
+
+ public List getName() {
+ return name;
+ }
+
+ public String getNameString() {
+ return Joiner.on(" ").join(getName());
+ }
+
+ public List getSurname() {
+ return surname;
+ }
+
+ public List getFullname() {
+ return fullname;
+ }
+
+ public String getOriginal() {
+ return original;
+ }
+
+ public String hash() {
+ return Hashing.murmur3_128().hashString(getNormalisedFullname(), Charset.forName(UTF8)).toString();
+ }
+
+ public String getNormalisedFirstName() {
+ return Joiner.on(" ").join(getCapitalFirstnames());
+ }
+
+ public String getNormalisedSurname() {
+ return Joiner.on(" ").join(getCapitalSurname());
+ }
+
+ public String getSurnameString() {
+ return Joiner.on(" ").join(getSurname());
+ }
+
+ public String getNormalisedFullname() {
+ return isAccurate() ? getNormalisedSurname() + ", " + getNormalisedFirstName() : Joiner.on(" ").join(fullname);
+ }
+
+ public List getCapitalFirstnames() {
+ return Lists.newArrayList(Iterables.transform(getNameWithAbbreviations(), new Capitalise()));
+ }
+
+ public List getCapitalSurname() {
+ return Lists.newArrayList(Iterables.transform(surname, new Capitalise()));
+ }
+
+ public List getNameWithAbbreviations() {
+ return Lists.newArrayList(Iterables.transform(name, new DotAbbreviations()));
+ }
+
+ public boolean isAccurate() {
+ return ((name != null) && (surname != null) && !name.isEmpty() && !surname.isEmpty());
+ }
+}
diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/PersonComparatorUtils.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/PersonComparatorUtils.java
new file mode 100644
index 0000000000..1f8aab4bfe
--- /dev/null
+++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/PersonComparatorUtils.java
@@ -0,0 +1,119 @@
+
+package eu.dnetlib.pace.model;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+public class PersonComparatorUtils {
+
+ private static final int MAX_FULLNAME_LENGTH = 50;
+
+ public static Set