merged with beta and fixed confilcts
This commit is contained in:
commit
2fc458dbd4
|
@ -28,3 +28,4 @@ spark-warehouse
|
|||
/**/.scalafmt.conf
|
||||
/.java-version
|
||||
/dhp-shade-package/dependency-reduced-pom.xml
|
||||
/**/job.properties
|
||||
|
|
|
@ -10,6 +10,11 @@ public class Constants {
|
|||
public static final Map<String, String> accessRightsCoarMap = Maps.newHashMap();
|
||||
public static final Map<String, String> coarCodeLabelMap = Maps.newHashMap();
|
||||
|
||||
public static final String RAID_NS_PREFIX = "raid________";
|
||||
|
||||
public static final String END_DATE = "endDate";
|
||||
public static final String START_DATE = "startDate";
|
||||
|
||||
public static final String ROR_NS_PREFIX = "ror_________";
|
||||
|
||||
public static final String ROR_OPENAIRE_ID = "10|openaire____::993a7ae7a863813cf95028b50708e222";
|
||||
|
|
|
@ -212,11 +212,11 @@ public class HttpConnector2 {
|
|||
.format(
|
||||
"Unexpected status code: %s errors: %s", urlConn.getResponseCode(),
|
||||
MAPPER.writeValueAsString(report)));
|
||||
} catch (MalformedURLException | UnknownHostException e) {
|
||||
} catch (MalformedURLException e) {
|
||||
log.error(e.getMessage(), e);
|
||||
report.put(e.getClass().getName(), e.getMessage());
|
||||
throw new CollectorException(e.getMessage(), e);
|
||||
} catch (SocketTimeoutException | SocketException e) {
|
||||
} catch (SocketTimeoutException | SocketException | UnknownHostException e) {
|
||||
log.error(e.getMessage(), e);
|
||||
report.put(e.getClass().getName(), e.getMessage());
|
||||
backoffAndSleep(getClientParams().getRetryDelay() * retryNumber * 1000);
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
|
||||
package eu.dnetlib.dhp.common.person;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
|
||||
public class CoAuthorshipIterator implements Iterator<Relation> {
|
||||
private int firstIndex;
|
||||
private int secondIndex;
|
||||
private boolean firstRelation;
|
||||
private List<String> authors;
|
||||
private static final String PERSON_PREFIX = ModelSupport.getIdPrefix(Person.class) + "|orcid_______::";
|
||||
private static final String OPENAIRE_PREFIX = "openaire____";
|
||||
private static final String SEPARATOR = "::";
|
||||
private static final String ORCID_KEY = "10|" + OPENAIRE_PREFIX + SEPARATOR
|
||||
+ DHPUtils.md5(ModelConstants.ORCID.toLowerCase());
|
||||
public static final String ORCID_AUTHORS_CLASSID = "sysimport:crosswalk:orcid";
|
||||
public static final String ORCID_AUTHORS_CLASSNAME = "Imported from ORCID";
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return firstIndex < authors.size() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Relation next() {
|
||||
Relation rel = null;
|
||||
if (firstRelation) {
|
||||
rel = getRelation(authors.get(firstIndex), authors.get(secondIndex));
|
||||
firstRelation = Boolean.FALSE;
|
||||
} else {
|
||||
rel = getRelation(authors.get(secondIndex), authors.get(firstIndex));
|
||||
firstRelation = Boolean.TRUE;
|
||||
secondIndex += 1;
|
||||
if (secondIndex >= authors.size()) {
|
||||
firstIndex += 1;
|
||||
secondIndex = firstIndex + 1;
|
||||
}
|
||||
}
|
||||
|
||||
return rel;
|
||||
}
|
||||
|
||||
public CoAuthorshipIterator(List<String> authors) {
|
||||
this.authors = authors;
|
||||
this.firstIndex = 0;
|
||||
this.secondIndex = 1;
|
||||
this.firstRelation = Boolean.TRUE;
|
||||
|
||||
}
|
||||
|
||||
private Relation getRelation(String orcid1, String orcid2) {
|
||||
String source = PERSON_PREFIX + IdentifierFactory.md5(orcid1);
|
||||
String target = PERSON_PREFIX + IdentifierFactory.md5(orcid2);
|
||||
Relation relation = OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.PERSON_PERSON_RELTYPE,
|
||||
ModelConstants.PERSON_PERSON_SUBRELTYPE,
|
||||
ModelConstants.PERSON_PERSON_HASCOAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(ORCID_KEY, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null);
|
||||
relation.setValidated(true);
|
||||
return relation;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
|
||||
package eu.dnetlib.dhp.common.person;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
public class Coauthors implements Serializable {
|
||||
private List<String> coauthors;
|
||||
|
||||
public List<String> getCoauthors() {
|
||||
return coauthors;
|
||||
}
|
||||
|
||||
public void setCoauthors(List<String> coauthors) {
|
||||
this.coauthors = coauthors;
|
||||
}
|
||||
}
|
|
@ -2,8 +2,7 @@
|
|||
package eu.dnetlib.dhp.oa.merge;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import static org.apache.spark.sql.functions.col;
|
||||
import static org.apache.spark.sql.functions.when;
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -135,7 +134,9 @@ public class GroupEntitiesSparkJob {
|
|||
.applyCoarVocabularies(entity, vocs),
|
||||
OAFENTITY_KRYO_ENC)
|
||||
.groupByKey((MapFunction<OafEntity, String>) OafEntity::getId, Encoders.STRING())
|
||||
.mapGroups((MapGroupsFunction<String, OafEntity, OafEntity>) MergeUtils::mergeById, OAFENTITY_KRYO_ENC)
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, OafEntity, OafEntity>) (key, group) -> MergeUtils.mergeById(group, vocs),
|
||||
OAFENTITY_KRYO_ENC)
|
||||
.map(
|
||||
(MapFunction<OafEntity, Tuple2<String, OafEntity>>) t -> new Tuple2<>(
|
||||
t.getClass().getName(), t),
|
||||
|
|
|
@ -65,7 +65,13 @@ public class RunSQLSparkJob {
|
|||
for (String statement : sql.split(";\\s*/\\*\\s*EOS\\s*\\*/\\s*")) {
|
||||
log.info("executing: {}", statement);
|
||||
long startTime = System.currentTimeMillis();
|
||||
spark.sql(statement).show();
|
||||
try {
|
||||
spark.sql(statement).show();
|
||||
} catch (Exception e) {
|
||||
log.error("Error executing statement: {}", statement, e);
|
||||
System.err.println("Error executing statement: " + statement + "\n" + e);
|
||||
throw e;
|
||||
}
|
||||
log
|
||||
.info(
|
||||
"executed in {}",
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright (c) 2024.
|
||||
* SPDX-FileCopyrightText: © 2023 Consiglio Nazionale delle Ricerche
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
package eu.dnetlib.dhp.schema.oaf;
|
||||
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
|
||||
public class HashableStructuredProperty extends StructuredProperty {
|
||||
|
||||
private static final long serialVersionUID = 8371670185221126045L;
|
||||
|
||||
public static HashableStructuredProperty newInstance(String value, Qualifier qualifier, DataInfo dataInfo) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
final HashableStructuredProperty sp = new HashableStructuredProperty();
|
||||
sp.setValue(value);
|
||||
sp.setQualifier(qualifier);
|
||||
sp.setDataInfo(dataInfo);
|
||||
return sp;
|
||||
}
|
||||
|
||||
public static HashableStructuredProperty newInstance(StructuredProperty sp) {
|
||||
HashableStructuredProperty hsp = new HashableStructuredProperty();
|
||||
hsp.setQualifier(sp.getQualifier());
|
||||
hsp.setValue(sp.getValue());
|
||||
hsp.setQualifier(sp.getQualifier());
|
||||
return hsp;
|
||||
}
|
||||
|
||||
public static StructuredProperty toStructuredProperty(HashableStructuredProperty hsp) {
|
||||
StructuredProperty sp = new StructuredProperty();
|
||||
sp.setQualifier(hsp.getQualifier());
|
||||
sp.setValue(hsp.getValue());
|
||||
sp.setQualifier(hsp.getQualifier());
|
||||
return sp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return new HashCodeBuilder(11, 91)
|
||||
.append(getQualifier().getClassid())
|
||||
.append(getQualifier().getSchemeid())
|
||||
.append(getValue())
|
||||
.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
if (obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
final HashableStructuredProperty rhs = (HashableStructuredProperty) obj;
|
||||
return new EqualsBuilder()
|
||||
.append(getQualifier().getClassid(), rhs.getQualifier().getClassid())
|
||||
.append(getQualifier().getSchemeid(), rhs.getQualifier().getSchemeid())
|
||||
.append(getValue(), rhs.getValue())
|
||||
.isEquals();
|
||||
}
|
||||
}
|
|
@ -43,34 +43,4 @@ public class CleaningFunctions {
|
|||
return !PidBlacklistProvider.getBlacklist(s.getQualifier().getClassid()).contains(pidValue);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method that normalises PID values on a per-type basis.
|
||||
* @param pid the PID whose value will be normalised.
|
||||
* @return the PID containing the normalised value.
|
||||
*/
|
||||
public static StructuredProperty normalizePidValue(StructuredProperty pid) {
|
||||
pid
|
||||
.setValue(
|
||||
normalizePidValue(
|
||||
pid.getQualifier().getClassid(),
|
||||
pid.getValue()));
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
public static String normalizePidValue(String pidType, String pidValue) {
|
||||
String value = Optional
|
||||
.ofNullable(pidValue)
|
||||
.map(String::trim)
|
||||
.orElseThrow(() -> new IllegalArgumentException("PID value cannot be empty"));
|
||||
|
||||
switch (pidType) {
|
||||
|
||||
// TODO add cleaning for more PID types as needed
|
||||
case "doi":
|
||||
return value.toLowerCase().replaceFirst(DOI_PREFIX_REGEX, DOI_PREFIX);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,18 +6,11 @@ import org.apache.commons.lang3.StringUtils;
|
|||
public class DoiCleaningRule {
|
||||
|
||||
public static String clean(final String doi) {
|
||||
return doi
|
||||
.toLowerCase()
|
||||
.replaceAll("\\s", "")
|
||||
.replaceAll("^doi:", "")
|
||||
.replaceFirst(CleaningFunctions.DOI_PREFIX_REGEX, CleaningFunctions.DOI_PREFIX);
|
||||
}
|
||||
|
||||
public static String normalizeDoi(final String input) {
|
||||
if (input == null)
|
||||
if (doi == null)
|
||||
return null;
|
||||
final String replaced = input
|
||||
final String replaced = doi
|
||||
.replaceAll("\\n|\\r|\\t|\\s", "")
|
||||
.replaceAll("^doi:", "")
|
||||
.toLowerCase()
|
||||
.replaceFirst(CleaningFunctions.DOI_PREFIX_REGEX, CleaningFunctions.DOI_PREFIX);
|
||||
if (StringUtils.isEmpty(replaced))
|
||||
|
@ -32,7 +25,6 @@ public class DoiCleaningRule {
|
|||
return null;
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static eu.dnetlib.dhp.schema.common.ModelConstants.*;
|
||||
import static eu.dnetlib.dhp.schema.common.ModelConstants.OPENAIRE_META_RESOURCE_TYPE;
|
||||
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
|
@ -363,6 +362,8 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
// nothing to clean here
|
||||
} else if (value instanceof Project) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Person) {
|
||||
// nothing to clean here
|
||||
} else if (value instanceof Organization) {
|
||||
Organization o = (Organization) value;
|
||||
if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
|
||||
|
@ -563,12 +564,24 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
Optional
|
||||
.ofNullable(i.getPid())
|
||||
.ifPresent(pid -> {
|
||||
final Set<StructuredProperty> pids = Sets.newHashSet(pid);
|
||||
final Set<HashableStructuredProperty> pids = pid
|
||||
.stream()
|
||||
.map(HashableStructuredProperty::newInstance)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
Optional
|
||||
.ofNullable(i.getAlternateIdentifier())
|
||||
.ifPresent(altId -> {
|
||||
final Set<StructuredProperty> altIds = Sets.newHashSet(altId);
|
||||
i.setAlternateIdentifier(Lists.newArrayList(Sets.difference(altIds, pids)));
|
||||
final Set<HashableStructuredProperty> altIds = altId
|
||||
.stream()
|
||||
.map(HashableStructuredProperty::newInstance)
|
||||
.collect(Collectors.toCollection(HashSet::new));
|
||||
i
|
||||
.setAlternateIdentifier(
|
||||
Sets
|
||||
.difference(altIds, pids)
|
||||
.stream()
|
||||
.map(HashableStructuredProperty::toStructuredProperty)
|
||||
.collect(Collectors.toList()));
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -682,6 +695,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
}
|
||||
}
|
||||
|
||||
// set ORCID_PENDING to all orcid values that are not coming from ORCID provenance
|
||||
for (Author a : r.getAuthor()) {
|
||||
if (Objects.isNull(a.getPid())) {
|
||||
a.setPid(Lists.newArrayList());
|
||||
|
@ -738,6 +752,40 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
.collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
||||
// Identify clashing ORCIDS:that is same ORCID associated to multiple authors in this result
|
||||
Map<String, Integer> clashing_orcid = new HashMap<>();
|
||||
|
||||
for (Author a : r.getAuthor()) {
|
||||
a
|
||||
.getPid()
|
||||
.stream()
|
||||
.filter(
|
||||
p -> StringUtils
|
||||
.contains(StringUtils.lowerCase(p.getQualifier().getClassid()), ORCID_PENDING))
|
||||
.map(StructuredProperty::getValue)
|
||||
.distinct()
|
||||
.forEach(orcid -> clashing_orcid.compute(orcid, (k, v) -> (v == null) ? 1 : v + 1));
|
||||
}
|
||||
|
||||
Set<String> clashing = clashing_orcid
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(ee -> ee.getValue() > 1)
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
// filter out clashing orcids
|
||||
for (Author a : r.getAuthor()) {
|
||||
a
|
||||
.setPid(
|
||||
a
|
||||
.getPid()
|
||||
.stream()
|
||||
.filter(p -> !clashing.contains(p.getValue()))
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
||||
if (value instanceof Publication) {
|
||||
|
||||
|
@ -796,7 +844,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
return author;
|
||||
}
|
||||
|
||||
private static Optional<String> cleanDateField(Field<String> dateofacceptance) {
|
||||
public static Optional<String> cleanDateField(Field<String> dateofacceptance) {
|
||||
return Optional
|
||||
.ofNullable(dateofacceptance)
|
||||
.map(Field::getValue)
|
||||
|
|
|
@ -175,7 +175,7 @@ public class IdentifierFactory implements Serializable {
|
|||
return entity
|
||||
.getPid()
|
||||
.stream()
|
||||
.map(CleaningFunctions::normalizePidValue)
|
||||
.map(PidCleaner::normalizePidValue)
|
||||
.filter(CleaningFunctions::pidFilter)
|
||||
.collect(
|
||||
Collectors
|
||||
|
@ -204,10 +204,11 @@ public class IdentifierFactory implements Serializable {
|
|||
.map(
|
||||
pp -> pp
|
||||
.stream()
|
||||
.filter(p -> StringUtils.isNotBlank(p.getValue()))
|
||||
// filter away PIDs provided by a DS that is not considered an authority for the
|
||||
// given PID Type
|
||||
.filter(p -> shouldFilterPidByCriteria(collectedFrom, p, mapHandles))
|
||||
.map(CleaningFunctions::normalizePidValue)
|
||||
.map(PidCleaner::normalizePidValue)
|
||||
.filter(p -> isNotFromDelegatedAuthority(collectedFrom, p))
|
||||
.filter(CleaningFunctions::pidFilter))
|
||||
.orElse(Stream.empty());
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
|
||||
public class MergeEntitiesComparator implements Comparator<Oaf> {
|
||||
static final List<String> PID_AUTHORITIES = Arrays
|
||||
.asList(
|
||||
ModelConstants.ARXIV_ID,
|
||||
ModelConstants.PUBMED_CENTRAL_ID,
|
||||
ModelConstants.EUROPE_PUBMED_CENTRAL_ID,
|
||||
ModelConstants.DATACITE_ID,
|
||||
ModelConstants.CROSSREF_ID);
|
||||
|
||||
static final List<String> RESULT_TYPES = Arrays
|
||||
.asList(
|
||||
ModelConstants.ORP_RESULTTYPE_CLASSID,
|
||||
ModelConstants.SOFTWARE_RESULTTYPE_CLASSID,
|
||||
ModelConstants.DATASET_RESULTTYPE_CLASSID,
|
||||
ModelConstants.PUBLICATION_RESULTTYPE_CLASSID);
|
||||
|
||||
public static final Comparator<Oaf> INSTANCE = new MergeEntitiesComparator();
|
||||
|
||||
@Override
|
||||
public int compare(Oaf left, Oaf right) {
|
||||
if (left == null && right == null)
|
||||
return 0;
|
||||
if (left == null)
|
||||
return -1;
|
||||
if (right == null)
|
||||
return 1;
|
||||
|
||||
int res = 0;
|
||||
|
||||
// pid authority
|
||||
int cfp1 = Optional
|
||||
.ofNullable(left.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(kv -> PID_AUTHORITIES.indexOf(kv.getKey()))
|
||||
.max(Integer::compare)
|
||||
.orElse(-1))
|
||||
.orElse(-1);
|
||||
int cfp2 = Optional
|
||||
.ofNullable(right.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(kv -> PID_AUTHORITIES.indexOf(kv.getKey()))
|
||||
.max(Integer::compare)
|
||||
.orElse(-1))
|
||||
.orElse(-1);
|
||||
|
||||
if (cfp1 >= 0 && cfp1 > cfp2) {
|
||||
return 1;
|
||||
} else if (cfp2 >= 0 && cfp2 > cfp1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// trust
|
||||
if (left.getDataInfo() != null && right.getDataInfo() != null) {
|
||||
res = left.getDataInfo().getTrust().compareTo(right.getDataInfo().getTrust());
|
||||
}
|
||||
|
||||
// result type
|
||||
if (res == 0) {
|
||||
if (left instanceof Result && right instanceof Result) {
|
||||
Result r1 = (Result) left;
|
||||
Result r2 = (Result) right;
|
||||
|
||||
if (r1.getResulttype() == null || r1.getResulttype().getClassid() == null) {
|
||||
if (r2.getResulttype() != null && r2.getResulttype().getClassid() != null) {
|
||||
return -1;
|
||||
}
|
||||
} else if (r2.getResulttype() == null || r2.getResulttype().getClassid() == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int rt1 = RESULT_TYPES.indexOf(r1.getResulttype().getClassid());
|
||||
int rt2 = RESULT_TYPES.indexOf(r2.getResulttype().getClassid());
|
||||
|
||||
if (rt1 >= 0 && rt1 > rt2) {
|
||||
return 1;
|
||||
} else if (rt2 >= 0 && rt2 > rt1) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// id
|
||||
if (res == 0) {
|
||||
if (left instanceof OafEntity && right instanceof OafEntity) {
|
||||
res = ((OafEntity) right).getId().compareTo(((OafEntity) left).getId());
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -23,53 +23,87 @@ import org.apache.commons.lang3.tuple.Pair;
|
|||
import com.github.sisyphsu.dateparser.DateParserUtils;
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
|
||||
import eu.dnetlib.dhp.oa.merge.AuthorMerger;
|
||||
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
|
||||
import eu.dnetlib.dhp.schema.common.EntityType;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
|
||||
public class MergeUtils {
|
||||
public static <T extends Oaf> T mergeById(String s, Iterator<T> oafEntityIterator) {
|
||||
return mergeGroup(s, oafEntityIterator, true);
|
||||
|
||||
public static <T extends Oaf> T mergeById(Iterator<T> oafEntityIterator, VocabularyGroup vocs) {
|
||||
return mergeGroup(oafEntityIterator, true, vocs);
|
||||
}
|
||||
|
||||
public static <T extends Oaf> T mergeGroup(String s, Iterator<T> oafEntityIterator) {
|
||||
return mergeGroup(s, oafEntityIterator, false);
|
||||
public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator) {
|
||||
return mergeGroup(oafEntityIterator, false);
|
||||
}
|
||||
|
||||
public static <T extends Oaf> T mergeGroup(String s, Iterator<T> oafEntityIterator,
|
||||
boolean checkDelegateAuthority) {
|
||||
TreeSet<T> sortedEntities = new TreeSet<>((o1, o2) -> {
|
||||
int res = 0;
|
||||
public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator, boolean checkDelegateAuthority) {
|
||||
return mergeGroup(oafEntityIterator, checkDelegateAuthority, null);
|
||||
}
|
||||
|
||||
if (o1.getDataInfo() != null && o2.getDataInfo() != null) {
|
||||
res = o1.getDataInfo().getTrust().compareTo(o2.getDataInfo().getTrust());
|
||||
}
|
||||
public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator,
|
||||
boolean checkDelegateAuthority, VocabularyGroup vocs) {
|
||||
|
||||
if (res == 0) {
|
||||
if (o1 instanceof Result && o2 instanceof Result) {
|
||||
return ResultTypeComparator.INSTANCE.compare((Result) o1, (Result) o2);
|
||||
}
|
||||
}
|
||||
ArrayList<T> sortedEntities = new ArrayList<>();
|
||||
oafEntityIterator.forEachRemaining(sortedEntities::add);
|
||||
sortedEntities.sort(MergeEntitiesComparator.INSTANCE.reversed());
|
||||
|
||||
return res;
|
||||
});
|
||||
|
||||
while (oafEntityIterator.hasNext()) {
|
||||
sortedEntities.add(oafEntityIterator.next());
|
||||
}
|
||||
|
||||
Iterator<T> it = sortedEntities.descendingIterator();
|
||||
Iterator<T> it = sortedEntities.iterator();
|
||||
T merged = it.next();
|
||||
|
||||
while (it.hasNext()) {
|
||||
merged = checkedMerge(merged, it.next(), checkDelegateAuthority);
|
||||
if (!it.hasNext() && merged instanceof Result && vocs != null) {
|
||||
return enforceResultType(vocs, (Result) merged);
|
||||
} else {
|
||||
while (it.hasNext()) {
|
||||
merged = checkedMerge(merged, it.next(), checkDelegateAuthority);
|
||||
}
|
||||
}
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
private static <T extends Oaf> T enforceResultType(VocabularyGroup vocs, Result mergedResult) {
|
||||
if (Optional.ofNullable(mergedResult.getInstance()).map(List::isEmpty).orElse(true)) {
|
||||
return (T) mergedResult;
|
||||
} else {
|
||||
final Instance i = mergedResult.getInstance().get(0);
|
||||
|
||||
if (!vocs.vocabularyExists(ModelConstants.DNET_RESULT_TYPOLOGIES)) {
|
||||
return (T) mergedResult;
|
||||
} else {
|
||||
final String expectedResultType = Optional
|
||||
.ofNullable(
|
||||
vocs
|
||||
.lookupTermBySynonym(
|
||||
ModelConstants.DNET_RESULT_TYPOLOGIES, i.getInstancetype().getClassid()))
|
||||
.orElse(ModelConstants.ORP_DEFAULT_RESULTTYPE)
|
||||
.getClassid();
|
||||
|
||||
// there is a clash among the result types
|
||||
if (!expectedResultType.equals(mergedResult.getResulttype().getClassid())) {
|
||||
|
||||
Result result = (Result) Optional
|
||||
.ofNullable(ModelSupport.oafTypes.get(expectedResultType))
|
||||
.map(r -> {
|
||||
try {
|
||||
return r.newInstance();
|
||||
} catch (InstantiationException | IllegalAccessException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
})
|
||||
.orElse(new OtherResearchProduct());
|
||||
result.setId(mergedResult.getId());
|
||||
return (T) mergeResultFields(result, mergedResult);
|
||||
} else {
|
||||
return (T) mergedResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static <T extends Oaf> T checkedMerge(final T left, final T right, boolean checkDelegateAuthority) {
|
||||
return (T) merge(left, right, checkDelegateAuthority);
|
||||
}
|
||||
|
@ -103,7 +137,7 @@ public class MergeUtils {
|
|||
private static Oaf mergeEntities(Oaf left, Oaf right, boolean checkDelegatedAuthority) {
|
||||
|
||||
if (sameClass(left, right, Result.class)) {
|
||||
if (!left.getClass().equals(right.getClass()) || checkDelegatedAuthority) {
|
||||
if (checkDelegatedAuthority) {
|
||||
return mergeResultsOfDifferentTypes((Result) left, (Result) right);
|
||||
}
|
||||
|
||||
|
@ -120,7 +154,7 @@ public class MergeUtils {
|
|||
return mergeSoftware((Software) left, (Software) right);
|
||||
}
|
||||
|
||||
return mergeResultFields((Result) left, (Result) right);
|
||||
return left;
|
||||
} else if (sameClass(left, right, Datasource.class)) {
|
||||
// TODO
|
||||
final int trust = compareTrust(left, right);
|
||||
|
@ -143,7 +177,7 @@ public class MergeUtils {
|
|||
* https://graph.openaire.eu/docs/data-model/pids-and-identifiers#delegated-authorities and in that case it prefers
|
||||
* such version.
|
||||
* <p>
|
||||
* Otherwise, it considers a resulttype priority order implemented in {@link ResultTypeComparator}
|
||||
* Otherwise, it considers a resulttype priority order implemented in {@link MergeEntitiesComparator}
|
||||
* and proceeds with the canonical property merging.
|
||||
*
|
||||
* @param left
|
||||
|
@ -161,8 +195,9 @@ public class MergeUtils {
|
|||
if (!leftFromDelegatedAuthority && rightFromDelegatedAuthority) {
|
||||
return right;
|
||||
}
|
||||
|
||||
// TODO: raise trust to have preferred fields from one or the other??
|
||||
if (new ResultTypeComparator().compare(left, right) < 0) {
|
||||
if (MergeEntitiesComparator.INSTANCE.compare(left, right) > 0) {
|
||||
return mergeResultFields(left, right);
|
||||
} else {
|
||||
return mergeResultFields(right, left);
|
||||
|
@ -225,9 +260,9 @@ public class MergeUtils {
|
|||
|
||||
private static <T, K> List<T> mergeLists(final List<T> left, final List<T> right, int trust,
|
||||
Function<T, K> keyExtractor, BinaryOperator<T> merger) {
|
||||
if (left == null) {
|
||||
return right;
|
||||
} else if (right == null) {
|
||||
if (left == null || left.isEmpty()) {
|
||||
return right != null ? right : new ArrayList<>();
|
||||
} else if (right == null || right.isEmpty()) {
|
||||
return left;
|
||||
}
|
||||
|
||||
|
@ -342,7 +377,7 @@ public class MergeUtils {
|
|||
final T merged = mergeOafFields(original, enrich, trust);
|
||||
|
||||
merged.setOriginalId(unionDistinctListOfString(merged.getOriginalId(), enrich.getOriginalId()));
|
||||
merged.setPid(unionDistinctLists(merged.getPid(), enrich.getPid(), trust));
|
||||
merged.setPid(mergeLists(merged.getPid(), enrich.getPid(), trust, MergeUtils::spKeyExtractor, (p1, p2) -> p1));
|
||||
merged.setDateofcollection(LocalDateTime.now().toString());
|
||||
merged
|
||||
.setDateoftransformation(
|
||||
|
@ -405,7 +440,7 @@ public class MergeUtils {
|
|||
}
|
||||
|
||||
// should be an instance attribute, get the first non-null value
|
||||
merge.setLanguage(coalesce(merge.getLanguage(), enrich.getLanguage()));
|
||||
merge.setLanguage(coalesceQualifier(merge.getLanguage(), enrich.getLanguage()));
|
||||
|
||||
// distinct countries, do not manage datainfo
|
||||
merge.setCountry(mergeQualifiers(merge.getCountry(), enrich.getCountry(), trust));
|
||||
|
@ -478,6 +513,10 @@ public class MergeUtils {
|
|||
merge.setIsInDiamondJournal(booleanOR(merge.getIsInDiamondJournal(), enrich.getIsInDiamondJournal()));
|
||||
merge.setPubliclyFunded(booleanOR(merge.getPubliclyFunded(), enrich.getPubliclyFunded()));
|
||||
|
||||
if (StringUtils.isBlank(merge.getTransformativeAgreement())) {
|
||||
merge.setTransformativeAgreement(enrich.getTransformativeAgreement());
|
||||
}
|
||||
|
||||
return merge;
|
||||
}
|
||||
|
||||
|
@ -575,6 +614,13 @@ public class MergeUtils {
|
|||
return m != null ? m : e;
|
||||
}
|
||||
|
||||
private static Qualifier coalesceQualifier(Qualifier m, Qualifier e) {
|
||||
if (m == null || m.getClassid() == null || StringUtils.isBlank(m.getClassid())) {
|
||||
return e;
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
private static List<Author> mergeAuthors(List<Author> author, List<Author> author1, int trust) {
|
||||
List<List<Author>> authors = new ArrayList<>();
|
||||
if (author != null) {
|
||||
|
@ -587,6 +633,10 @@ public class MergeUtils {
|
|||
}
|
||||
|
||||
private static String instanceKeyExtractor(Instance i) {
|
||||
// three levels of concatenating:
|
||||
// 1. ::
|
||||
// 2. @@
|
||||
// 3. ||
|
||||
return String
|
||||
.join(
|
||||
"::",
|
||||
|
@ -594,10 +644,10 @@ public class MergeUtils {
|
|||
kvKeyExtractor(i.getCollectedfrom()),
|
||||
qualifierKeyExtractor(i.getAccessright()),
|
||||
qualifierKeyExtractor(i.getInstancetype()),
|
||||
Optional.ofNullable(i.getUrl()).map(u -> String.join("::", u)).orElse(null),
|
||||
Optional.ofNullable(i.getUrl()).map(u -> String.join("@@", u)).orElse(null),
|
||||
Optional
|
||||
.ofNullable(i.getPid())
|
||||
.map(pp -> pp.stream().map(MergeUtils::spKeyExtractor).collect(Collectors.joining("::")))
|
||||
.map(pp -> pp.stream().map(MergeUtils::spKeyExtractor).collect(Collectors.joining("@@")))
|
||||
.orElse(null));
|
||||
}
|
||||
|
||||
|
@ -652,9 +702,9 @@ public class MergeUtils {
|
|||
}
|
||||
|
||||
private static Field<String> selectOldestDate(Field<String> d1, Field<String> d2) {
|
||||
if (d1 == null || StringUtils.isBlank(d1.getValue())) {
|
||||
if (!GraphCleaningFunctions.cleanDateField(d1).isPresent()) {
|
||||
return d2;
|
||||
} else if (d2 == null || StringUtils.isBlank(d2.getValue())) {
|
||||
} else if (!GraphCleaningFunctions.cleanDateField(d2).isPresent()) {
|
||||
return d1;
|
||||
}
|
||||
|
||||
|
@ -706,7 +756,11 @@ public class MergeUtils {
|
|||
private static String spKeyExtractor(StructuredProperty sp) {
|
||||
return Optional
|
||||
.ofNullable(sp)
|
||||
.map(s -> Joiner.on("::").join(s, qualifierKeyExtractor(s.getQualifier())))
|
||||
.map(
|
||||
s -> Joiner
|
||||
.on("||")
|
||||
.useForNull("")
|
||||
.join(qualifierKeyExtractor(s.getQualifier()), s.getValue()))
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
|
@ -963,7 +1017,7 @@ public class MergeUtils {
|
|||
private static String extractKeyFromPid(final StructuredProperty pid) {
|
||||
if (pid == null)
|
||||
return null;
|
||||
final StructuredProperty normalizedPid = CleaningFunctions.normalizePidValue(pid);
|
||||
final StructuredProperty normalizedPid = PidCleaner.normalizePidValue(pid);
|
||||
|
||||
return String.format("%s::%s", normalizedPid.getQualifier().getClassid(), normalizedPid.getValue());
|
||||
}
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
|
||||
public class ModelHardLimits {
|
||||
|
||||
private ModelHardLimits() {
|
||||
|
@ -12,6 +18,7 @@ public class ModelHardLimits {
|
|||
|
||||
public static final int MAX_EXTERNAL_ENTITIES = 50;
|
||||
public static final int MAX_AUTHORS = 200;
|
||||
public static final int MAX_RELATED_AUTHORS = 20;
|
||||
public static final int MAX_AUTHOR_FULLNAME_LENGTH = 1000;
|
||||
public static final int MAX_TITLE_LENGTH = 5000;
|
||||
public static final int MAX_TITLES = 10;
|
||||
|
@ -19,6 +26,12 @@ public class ModelHardLimits {
|
|||
public static final int MAX_ABSTRACT_LENGTH = 150000;
|
||||
public static final int MAX_RELATED_ABSTRACT_LENGTH = 500;
|
||||
public static final int MAX_INSTANCES = 10;
|
||||
public static final Map<String, Long> MAX_RELATIONS_BY_RELCLASS = Maps.newHashMap();
|
||||
|
||||
static {
|
||||
MAX_RELATIONS_BY_RELCLASS.put(ModelConstants.PERSON_PERSON_HASCOAUTHORED, 500L);
|
||||
MAX_RELATIONS_BY_RELCLASS.put(ModelConstants.RESULT_PERSON_HASAUTHORED, 500L);
|
||||
}
|
||||
|
||||
public static String getCollectionName(String format) {
|
||||
return format + SEPARATOR + LAYOUT + SEPARATOR + INTERPRETATION;
|
||||
|
|
|
@ -26,7 +26,7 @@ public class PidCleaner {
|
|||
String value = Optional
|
||||
.ofNullable(pidValue)
|
||||
.map(String::trim)
|
||||
.orElseThrow(() -> new IllegalArgumentException("PID value cannot be empty"));
|
||||
.orElseThrow(() -> new IllegalArgumentException("PID (" + pidType + ") value cannot be empty"));
|
||||
|
||||
switch (pidType) {
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@ public class PidValueComparator implements Comparator<StructuredProperty> {
|
|||
if (right == null)
|
||||
return -1;
|
||||
|
||||
StructuredProperty l = CleaningFunctions.normalizePidValue(left);
|
||||
StructuredProperty r = CleaningFunctions.normalizePidValue(right);
|
||||
StructuredProperty l = PidCleaner.normalizePidValue(left);
|
||||
StructuredProperty r = PidCleaner.normalizePidValue(right);
|
||||
|
||||
return Optional
|
||||
.ofNullable(l.getValue())
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static eu.dnetlib.dhp.schema.common.ModelConstants.CROSSREF_ID;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
|
||||
public class ResultTypeComparator implements Comparator<Result> {
|
||||
|
||||
public static final ResultTypeComparator INSTANCE = new ResultTypeComparator();
|
||||
|
||||
@Override
|
||||
public int compare(Result left, Result right) {
|
||||
|
||||
if (left == null && right == null)
|
||||
return 0;
|
||||
if (left == null)
|
||||
return 1;
|
||||
if (right == null)
|
||||
return -1;
|
||||
|
||||
HashSet<String> lCf = getCollectedFromIds(left);
|
||||
HashSet<String> rCf = getCollectedFromIds(right);
|
||||
|
||||
if (lCf.contains(CROSSREF_ID) && !rCf.contains(CROSSREF_ID)) {
|
||||
return -1;
|
||||
}
|
||||
if (!lCf.contains(CROSSREF_ID) && rCf.contains(CROSSREF_ID)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (left.getResulttype() == null || left.getResulttype().getClassid() == null) {
|
||||
if (right.getResulttype() == null || right.getResulttype().getClassid() == null) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
} else if (right.getResulttype() == null || right.getResulttype().getClassid() == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
String lClass = left.getResulttype().getClassid();
|
||||
String rClass = right.getResulttype().getClassid();
|
||||
|
||||
if (!lClass.equals(rClass)) {
|
||||
if (lClass.equals(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.DATASET_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.DATASET_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.SOFTWARE_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.SOFTWARE_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.ORP_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.ORP_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Else (but unlikely), lexicographical ordering will do.
|
||||
return lClass.compareTo(rClass);
|
||||
}
|
||||
|
||||
protected HashSet<String> getCollectedFromIds(Result left) {
|
||||
return Optional
|
||||
.ofNullable(left.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(KeyValue::getKey)
|
||||
.collect(Collectors.toCollection(HashSet::new)))
|
||||
.orElse(new HashSet<>());
|
||||
}
|
||||
}
|
|
@ -28,6 +28,7 @@ import com.jayway.jsonpath.JsonPath;
|
|||
|
||||
import eu.dnetlib.dhp.schema.mdstore.MDStoreWithInfo;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.CleaningFunctions;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidCleaner;
|
||||
import net.minidev.json.JSONArray;
|
||||
import scala.collection.JavaConverters;
|
||||
import scala.collection.Seq;
|
||||
|
@ -104,7 +105,7 @@ public class DHPUtils {
|
|||
|
||||
public static String generateUnresolvedIdentifier(final String pid, final String pidType) {
|
||||
|
||||
final String cleanedPid = CleaningFunctions.normalizePidValue(pidType, pid);
|
||||
final String cleanedPid = PidCleaner.normalizePidValue(pidType, pid);
|
||||
|
||||
return String.format("unresolved::%s::%s", cleanedPid, pidType.toLowerCase().trim());
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ class IdentifierFactoryTest {
|
|||
"publication_doi2.json", "50|doi_________::79dbc7a2a56dc1532659f9038843256e", true);
|
||||
|
||||
verifyIdentifier(
|
||||
"publication_doi3.json", "50|pmc_________::94e4cb08c93f8733b48e2445d04002ac", true);
|
||||
"publication_doi3.json", "50|pmc_________::e2a339e0e11bfbf55462e14a07f1b304", true);
|
||||
|
||||
verifyIdentifier(
|
||||
"publication_doi4.json", "50|od______2852::38861c44e6052a8d49f59a4c39ba5e66", true);
|
||||
|
@ -41,7 +41,7 @@ class IdentifierFactoryTest {
|
|||
"publication_pmc1.json", "50|DansKnawCris::0829b5191605bdbea36d6502b8c1ce1f", true);
|
||||
|
||||
verifyIdentifier(
|
||||
"publication_pmc2.json", "50|pmc_________::94e4cb08c93f8733b48e2445d04002ac", true);
|
||||
"publication_pmc2.json", "50|pmc_________::e2a339e0e11bfbf55462e14a07f1b304", true);
|
||||
|
||||
verifyIdentifier(
|
||||
"publication_openapc.json", "50|doi_________::79dbc7a2a56dc1532659f9038843256e", true);
|
||||
|
|
|
@ -179,7 +179,7 @@ class OafMapperUtilsTest {
|
|||
assertEquals(
|
||||
ModelConstants.DATASET_RESULTTYPE_CLASSID,
|
||||
((Result) MergeUtils
|
||||
.merge(p2, d1))
|
||||
.merge(p2, d1, true))
|
||||
.getResulttype()
|
||||
.getClassid());
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
},
|
||||
{
|
||||
"qualifier": {"classid": "pmc"},
|
||||
"value": "21459329"
|
||||
"value": "PMC21459329"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
},
|
||||
{
|
||||
"qualifier":{"classid":"pmc"},
|
||||
"value":"21459329"
|
||||
"value":"PMC21459329"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -2,31 +2,41 @@
|
|||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("keywordsclustering")
|
||||
public class KeywordsClustering extends AbstractClusteringFunction {
|
||||
@ClusteringClass("legalnameclustering")
|
||||
public class LegalnameClustering extends AbstractClusteringFunction {
|
||||
|
||||
public KeywordsClustering(Map<String, Object> params) {
|
||||
private static final Pattern CITY_CODE_PATTERN = Pattern.compile("city::\\d+");
|
||||
private static final Pattern KEYWORD_CODE_PATTERN = Pattern.compile("key::\\d+");
|
||||
|
||||
public LegalnameClustering(Map<String, Object> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
public Set<String> getRegexList(String input, Pattern codeRegex) {
|
||||
Matcher matcher = codeRegex.matcher(input);
|
||||
Set<String> cities = new HashSet<>();
|
||||
while (matcher.find()) {
|
||||
cities.add(matcher.group());
|
||||
}
|
||||
return cities;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, String s) {
|
||||
|
||||
// takes city codes and keywords codes without duplicates
|
||||
Set<String> keywords = getKeywords(s, conf.translationMap(), paramOrDefault("windowSize", 4));
|
||||
Set<String> cities = getCities(s, paramOrDefault("windowSize", 4));
|
||||
|
||||
// list of combination to return as result
|
||||
final Collection<String> combinations = new LinkedHashSet<String>();
|
||||
|
||||
for (String keyword : keywordsToCodes(keywords, conf.translationMap())) {
|
||||
for (String city : citiesToCodes(cities)) {
|
||||
for (String keyword : getRegexList(s, KEYWORD_CODE_PATTERN)) {
|
||||
for (String city : getRegexList(s, CITY_CODE_PATTERN)) {
|
||||
combinations.add(keyword + "-" + city);
|
||||
if (combinations.size() >= paramOrDefault("max", 2)) {
|
||||
return combinations;
|
||||
|
@ -42,9 +52,6 @@ public class KeywordsClustering extends AbstractClusteringFunction {
|
|||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(KeywordsClustering::cleanup)
|
||||
.map(KeywordsClustering::normalize)
|
||||
.map(s -> filterAllStopWords(s))
|
||||
.map(s -> doApply(conf, s))
|
||||
.map(c -> filterBlacklisted(c, ngramBlacklist))
|
||||
.flatMap(c -> c.stream())
|
|
@ -38,7 +38,7 @@ public class NumAuthorsTitleSuffixPrefixChain extends AbstractClusteringFunction
|
|||
|
||||
@Override
|
||||
protected Collection<String> doApply(Config conf, String s) {
|
||||
return suffixPrefixChain(cleanup(s), param("mod"));
|
||||
return suffixPrefixChain(cleanup(s), paramOrDefault("mod", 10));
|
||||
}
|
||||
|
||||
private Collection<String> suffixPrefixChain(String s, int mod) {
|
||||
|
|
|
@ -27,6 +27,14 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
private static Map<String, String> cityMap = AbstractPaceFunctions
|
||||
.loadMapFromClasspath("/eu/dnetlib/pace/config/city_map.csv");
|
||||
|
||||
// keywords map to be used when translating the keyword names into codes
|
||||
private static Map<String, String> keywordMap = AbstractPaceFunctions
|
||||
.loadMapFromClasspath("/eu/dnetlib/pace/config/translation_map.csv");
|
||||
|
||||
// country map to be used when inferring the country from the city name
|
||||
private static Map<String, String> countryMap = AbstractPaceFunctions
|
||||
.loadCountryMapFromClasspath("/eu/dnetlib/pace/config/country_map.csv");
|
||||
|
||||
// list of stopwords in different languages
|
||||
protected static Set<String> stopwords_gr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_gr.txt");
|
||||
protected static Set<String> stopwords_en = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
|
||||
|
@ -74,6 +82,64 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return s12;
|
||||
}
|
||||
|
||||
public static String countryInference(final String original, String inferFrom) {
|
||||
if (!original.equalsIgnoreCase("unknown"))
|
||||
return original;
|
||||
|
||||
inferFrom = cleanup(inferFrom);
|
||||
inferFrom = normalize(inferFrom);
|
||||
inferFrom = filterAllStopWords(inferFrom);
|
||||
Set<String> cities = getCities(inferFrom, 4);
|
||||
return citiesToCountry(cities).stream().filter(Objects::nonNull).findFirst().orElse("UNKNOWN");
|
||||
}
|
||||
|
||||
public static String cityInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> cities = getCities(original, 4);
|
||||
|
||||
for (String city : cities) {
|
||||
original = original.replaceAll(city, cityMap.get(city));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
public static String keywordInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> keywords = getKeywords(original, keywordMap, 4);
|
||||
|
||||
for (String keyword : keywords) {
|
||||
original = original.replaceAll(keyword, keywordMap.get(keyword));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
public static String cityKeywordInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> keywords = getKeywords(original, keywordMap, 4);
|
||||
Set<String> cities = getCities(original, 4);
|
||||
|
||||
for (String keyword : keywords) {
|
||||
original = original.replaceAll(keyword, keywordMap.get(keyword));
|
||||
}
|
||||
|
||||
for (String city : cities) {
|
||||
original = original.replaceAll(city, cityMap.get(city));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
protected static String fixXML(final String a) {
|
||||
|
||||
return a
|
||||
|
@ -208,6 +274,30 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return m;
|
||||
}
|
||||
|
||||
public static Map<String, String> loadCountryMapFromClasspath(final String classpath) {
|
||||
|
||||
Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
|
||||
final Map<String, String> m = new HashMap<>();
|
||||
try {
|
||||
for (final String s : IOUtils
|
||||
.readLines(AbstractPaceFunctions.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
|
||||
// string is like this: country_code;city1;city2;city3
|
||||
String[] line = s.split(";");
|
||||
String value = line[0];
|
||||
for (int i = 1; i < line.length; i++) {
|
||||
String city = fixAliases(transliterator.transliterate(line[i].toLowerCase()));
|
||||
String code = cityMap.get(city);
|
||||
m.put(code, value);
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
return m;
|
||||
|
||||
}
|
||||
|
||||
public static String removeKeywords(String s, Set<String> keywords) {
|
||||
|
||||
s = " " + s + " ";
|
||||
|
@ -237,6 +327,10 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return toCodes(keywords, cityMap);
|
||||
}
|
||||
|
||||
public static Set<String> citiesToCountry(Set<String> cities) {
|
||||
return toCodes(toCodes(cities, cityMap), countryMap);
|
||||
}
|
||||
|
||||
protected static String firstLC(final String s) {
|
||||
return StringUtils.substring(s, 0, 1).toLowerCase();
|
||||
}
|
||||
|
|
|
@ -47,9 +47,37 @@ public class FieldDef implements Serializable {
|
|||
|
||||
private String clean;
|
||||
|
||||
private String infer;
|
||||
|
||||
private String inferenceFrom;
|
||||
|
||||
public FieldDef() {
|
||||
}
|
||||
|
||||
public FieldDef clone() {
|
||||
FieldDef fieldDef = new FieldDef();
|
||||
fieldDef.setName(this.name);
|
||||
fieldDef.setPath(this.path);
|
||||
fieldDef.setType(this.type);
|
||||
fieldDef.setOverrideMatch(this.overrideMatch);
|
||||
fieldDef.setSize(this.size);
|
||||
fieldDef.setLength(this.length);
|
||||
fieldDef.setFilter(this.filter);
|
||||
fieldDef.setSorted(this.sorted);
|
||||
fieldDef.setClean(this.clean);
|
||||
fieldDef.setInfer(this.infer);
|
||||
fieldDef.setInferenceFrom(this.inferenceFrom);
|
||||
return fieldDef;
|
||||
}
|
||||
|
||||
public String getInferenceFrom() {
|
||||
return inferenceFrom;
|
||||
}
|
||||
|
||||
public void setInferenceFrom(final String inferenceFrom) {
|
||||
this.inferenceFrom = inferenceFrom;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
@ -126,6 +154,14 @@ public class FieldDef implements Serializable {
|
|||
this.clean = clean;
|
||||
}
|
||||
|
||||
public String getInfer() {
|
||||
return infer;
|
||||
}
|
||||
|
||||
public void setInfer(String infer) {
|
||||
this.infer = infer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
|
|
|
@ -19,48 +19,10 @@ case class SparkDeduper(conf: DedupConfig) extends Serializable {
|
|||
val model: SparkModel = SparkModel(conf)
|
||||
|
||||
val dedup: (Dataset[Row] => Dataset[Row]) = df => {
|
||||
df.transform(filterAndCleanup)
|
||||
.transform(generateClustersWithCollect)
|
||||
df.transform(generateClustersWithCollect)
|
||||
.transform(processBlocks)
|
||||
}
|
||||
|
||||
|
||||
val filterAndCleanup: (Dataset[Row] => Dataset[Row]) = df => {
|
||||
val df_with_filters = conf.getPace.getModel.asScala.foldLeft(df)((res, fdef) => {
|
||||
if (conf.blacklists.containsKey(fdef.getName)) {
|
||||
res.withColumn(
|
||||
fdef.getName + "_filtered",
|
||||
filterColumnUDF(fdef).apply(new Column(fdef.getName))
|
||||
)
|
||||
} else {
|
||||
res
|
||||
}
|
||||
})
|
||||
|
||||
df_with_filters
|
||||
}
|
||||
|
||||
def filterColumnUDF(fdef: FieldDef): UserDefinedFunction = {
|
||||
val blacklist: Predicate[String] = conf.blacklists().get(fdef.getName)
|
||||
|
||||
if (blacklist == null) {
|
||||
throw new IllegalArgumentException("Column: " + fdef.getName + " does not have any filter")
|
||||
} else {
|
||||
fdef.getType match {
|
||||
case Type.List | Type.JSON =>
|
||||
udf[Array[String], Array[String]](values => {
|
||||
values.filter((v: String) => !blacklist.test(v))
|
||||
})
|
||||
|
||||
case _ =>
|
||||
udf[String, String](v => {
|
||||
if (blacklist.test(v)) ""
|
||||
else v
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val generateClustersWithCollect: (Dataset[Row] => Dataset[Row]) = df_with_filters => {
|
||||
var df_with_clustering_keys: Dataset[Row] = null
|
||||
|
||||
|
|
|
@ -5,12 +5,12 @@ import eu.dnetlib.pace.common.AbstractPaceFunctions
|
|||
import eu.dnetlib.pace.config.{DedupConfig, Type}
|
||||
import eu.dnetlib.pace.util.{MapDocumentUtil, SparkCompatUtils}
|
||||
import org.apache.commons.lang3.StringUtils
|
||||
import org.apache.spark.sql.catalyst.encoders.RowEncoder
|
||||
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
|
||||
import org.apache.spark.sql.types.{DataTypes, Metadata, StructField, StructType}
|
||||
import org.apache.spark.sql.{Dataset, Row}
|
||||
|
||||
import java.util.Locale
|
||||
import java.util.function.Predicate
|
||||
import java.util.regex.Pattern
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
|
@ -29,8 +29,20 @@ case class SparkModel(conf: DedupConfig) {
|
|||
identifier.setName(identifierFieldName)
|
||||
identifier.setType(Type.String)
|
||||
|
||||
// create fields for blacklist
|
||||
val filtered = conf.getPace.getModel.asScala.flatMap(fdef => {
|
||||
if (conf.blacklists().containsKey(fdef.getName)) {
|
||||
val fdef_filtered = fdef.clone()
|
||||
fdef_filtered.setName(fdef.getName + "_filtered")
|
||||
Seq(fdef, fdef_filtered)
|
||||
}
|
||||
else {
|
||||
Seq(fdef)
|
||||
}
|
||||
})
|
||||
|
||||
// Construct a Spark StructType representing the schema of the model
|
||||
(Seq(identifier) ++ conf.getPace.getModel.asScala)
|
||||
(Seq(identifier) ++ filtered)
|
||||
.foldLeft(
|
||||
new StructType()
|
||||
)((resType, fieldDef) => {
|
||||
|
@ -44,7 +56,6 @@ case class SparkModel(conf: DedupConfig) {
|
|||
})
|
||||
})
|
||||
|
||||
|
||||
}
|
||||
|
||||
val identityFieldPosition: Int = schema.fieldIndex(identifierFieldName)
|
||||
|
@ -52,7 +63,8 @@ case class SparkModel(conf: DedupConfig) {
|
|||
val orderingFieldPosition: Int = schema.fieldIndex(orderingFieldName)
|
||||
|
||||
val parseJsonDataset: (Dataset[String] => Dataset[Row]) = df => {
|
||||
df.map(r => rowFromJson(r))(SparkCompatUtils.encoderFor(schema))
|
||||
df
|
||||
.map(r => rowFromJson(r))(SparkCompatUtils.encoderFor(schema))
|
||||
}
|
||||
|
||||
def rowFromJson(json: String): Row = {
|
||||
|
@ -64,41 +76,63 @@ case class SparkModel(conf: DedupConfig) {
|
|||
|
||||
schema.fieldNames.zipWithIndex.foldLeft(values) {
|
||||
case ((res, (fname, index))) =>
|
||||
val fdef = conf.getPace.getModelMap.get(fname)
|
||||
|
||||
val fdef = conf.getPace.getModelMap.get(fname.split("_filtered")(0))
|
||||
|
||||
if (fdef != null) {
|
||||
res(index) = fdef.getType match {
|
||||
case Type.String | Type.Int =>
|
||||
MapDocumentUtil.truncateValue(
|
||||
MapDocumentUtil.getJPathString(fdef.getPath, documentContext),
|
||||
fdef.getLength
|
||||
)
|
||||
if (!fname.contains("_filtered")) { //process fields with no blacklist
|
||||
res(index) = fdef.getType match {
|
||||
case Type.String | Type.Int =>
|
||||
MapDocumentUtil.truncateValue(
|
||||
MapDocumentUtil.getJPathString(fdef.getPath, documentContext),
|
||||
fdef.getLength
|
||||
)
|
||||
|
||||
case Type.URL =>
|
||||
var uv = MapDocumentUtil.getJPathString(fdef.getPath, documentContext)
|
||||
if (!URL_REGEX.matcher(uv).matches)
|
||||
uv = ""
|
||||
uv
|
||||
case Type.URL =>
|
||||
var uv = MapDocumentUtil.getJPathString(fdef.getPath, documentContext)
|
||||
if (!URL_REGEX.matcher(uv).matches)
|
||||
uv = ""
|
||||
uv
|
||||
|
||||
case Type.List | Type.JSON =>
|
||||
MapDocumentUtil.truncateList(
|
||||
MapDocumentUtil.getJPathList(fdef.getPath, documentContext, fdef.getType),
|
||||
fdef.getSize
|
||||
).asScala
|
||||
case Type.List | Type.JSON =>
|
||||
MapDocumentUtil.truncateList(
|
||||
MapDocumentUtil.getJPathList(fdef.getPath, documentContext, fdef.getType),
|
||||
fdef.getSize
|
||||
).asScala
|
||||
|
||||
case Type.StringConcat =>
|
||||
val jpaths = CONCAT_REGEX.split(fdef.getPath)
|
||||
case Type.StringConcat =>
|
||||
val jpaths = CONCAT_REGEX.split(fdef.getPath)
|
||||
|
||||
MapDocumentUtil.truncateValue(
|
||||
jpaths
|
||||
.map(jpath => MapDocumentUtil.getJPathString(jpath, documentContext))
|
||||
.mkString(" "),
|
||||
fdef.getLength
|
||||
)
|
||||
MapDocumentUtil.truncateValue(
|
||||
jpaths
|
||||
.map(jpath => MapDocumentUtil.getJPathString(jpath, documentContext))
|
||||
.mkString(" "),
|
||||
fdef.getLength
|
||||
)
|
||||
|
||||
case Type.DoubleArray =>
|
||||
MapDocumentUtil.getJPathArray(fdef.getPath, json)
|
||||
case Type.DoubleArray =>
|
||||
MapDocumentUtil.getJPathArray(fdef.getPath, json)
|
||||
}
|
||||
}
|
||||
else { //process fields with blacklist
|
||||
val blacklist: Predicate[String] = conf.blacklists().get(fdef.getName)
|
||||
|
||||
res(index) = fdef.getType match {
|
||||
case Type.List | Type.JSON =>
|
||||
MapDocumentUtil.truncateList(
|
||||
MapDocumentUtil.getJPathList(fdef.getPath, documentContext, fdef.getType),
|
||||
fdef.getSize
|
||||
).asScala.filter((v: String) => !blacklist.test(v))
|
||||
|
||||
case _ =>
|
||||
val value: String = MapDocumentUtil.truncateValue(
|
||||
MapDocumentUtil.getJPathString(fdef.getPath, documentContext),
|
||||
fdef.getLength
|
||||
)
|
||||
if (blacklist.test(value)) "" else value
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
val filter = fdef.getFilter
|
||||
|
||||
|
@ -123,12 +157,22 @@ case class SparkModel(conf: DedupConfig) {
|
|||
case _ => res(index)
|
||||
}
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(fdef.getInfer)) {
|
||||
val inferFrom: String = if (StringUtils.isNotBlank(fdef.getInferenceFrom)) fdef.getInferenceFrom else fdef.getPath
|
||||
res(index) = res(index) match {
|
||||
case x: Seq[String] => x.map(inference(_, MapDocumentUtil.getJPathString(inferFrom, documentContext), fdef.getInfer))
|
||||
case _ => inference(res(index).toString, MapDocumentUtil.getJPathString(inferFrom, documentContext), fdef.getInfer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
|
||||
}
|
||||
|
||||
new GenericRowWithSchema(values, schema)
|
||||
|
||||
}
|
||||
|
||||
def clean(value: String, cleantype: String) : String = {
|
||||
|
@ -146,5 +190,17 @@ case class SparkModel(conf: DedupConfig) {
|
|||
res
|
||||
}
|
||||
|
||||
def inference(value: String, inferfrom: String, infertype: String) : String = {
|
||||
val res = infertype match {
|
||||
case "country" => AbstractPaceFunctions.countryInference(value, inferfrom)
|
||||
case "city" => AbstractPaceFunctions.cityInference(value)
|
||||
case "keyword" => AbstractPaceFunctions.keywordInference(value)
|
||||
case "city_keyword" => AbstractPaceFunctions.cityKeywordInference(value)
|
||||
case _ => value
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("cityMatch")
|
||||
public class CityMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public CityMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = citiesToCodes(cities1);
|
||||
Set<String> codes2 = citiesToCodes(cities2);
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1; // undefined if one of the two has no cities
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("codeMatch")
|
||||
public class CodeMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
private Pattern CODE_REGEX;
|
||||
|
||||
public CodeMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
this.CODE_REGEX = Pattern.compile(params.getOrDefault("codeRegex", "[a-zA-Z]+::\\d+"));
|
||||
}
|
||||
|
||||
public Set<String> getRegexList(String input) {
|
||||
Matcher matcher = this.CODE_REGEX.matcher(input);
|
||||
Set<String> cities = new HashSet<>();
|
||||
while (matcher.find()) {
|
||||
cities.add(matcher.group());
|
||||
}
|
||||
return cities;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
Set<String> codes1 = getRegexList(a);
|
||||
Set<String> codes2 = getRegexList(b);
|
||||
|
||||
// if no codes are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1; // undefined if one of the two has no codes
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("countryMatch")
|
||||
public class CountryMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public CountryMatch(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public CountryMatch(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected CountryMatch(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
if (a.isEmpty() || b.isEmpty()) {
|
||||
return -1.0; // return -1 if a field is missing
|
||||
}
|
||||
if (a.equalsIgnoreCase("unknown") || b.equalsIgnoreCase("unknown")) {
|
||||
return -1.0; // return -1 if a country is UNKNOWN
|
||||
}
|
||||
|
||||
return a.equals(b) ? 1.0 : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.time.DateTimeException;
|
||||
import java.time.LocalDate;
|
||||
import java.time.Period;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("dateRange")
|
||||
public class DateRange extends AbstractStringComparator {
|
||||
|
||||
int YEAR_RANGE;
|
||||
|
||||
public DateRange(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
YEAR_RANGE = Integer.parseInt(params.getOrDefault("year_range", "3"));
|
||||
}
|
||||
|
||||
public DateRange(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected DateRange(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
public static boolean isNumeric(String str) {
|
||||
return str.matches("\\d+"); // match a number with optional '-' and decimal.
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
if (a.isEmpty() || b.isEmpty()) {
|
||||
return -1.0; // return -1 if a field is missing
|
||||
}
|
||||
|
||||
try {
|
||||
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ENGLISH);
|
||||
LocalDate d1 = LocalDate.parse(a, formatter);
|
||||
LocalDate d2 = LocalDate.parse(b, formatter);
|
||||
Period period = Period.between(d1, d2);
|
||||
|
||||
return period.getYears() <= YEAR_RANGE ? 1.0 : 0.0;
|
||||
} catch (DateTimeException e) {
|
||||
return -1.0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("jaroWinklerLegalname")
|
||||
public class JaroWinklerLegalname extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
private final String CITY_CODE_REGEX = "city::\\d+";
|
||||
private final String KEYWORD_CODE_REGEX = "key::\\d+";
|
||||
|
||||
public JaroWinklerLegalname(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public JaroWinklerLegalname(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerLegalname(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
|
||||
String ca = a.replaceAll(CITY_CODE_REGEX, "").replaceAll(KEYWORD_CODE_REGEX, " ");
|
||||
String cb = b.replaceAll(CITY_CODE_REGEX, "").replaceAll(KEYWORD_CODE_REGEX, " ");
|
||||
|
||||
ca = ca.replaceAll("[ ]{2,}", " ");
|
||||
cb = cb.replaceAll("[ ]{2,}", " ");
|
||||
|
||||
if (ca.isEmpty() && cb.isEmpty())
|
||||
return 1.0;
|
||||
else
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("jaroWinklerNormalizedName")
|
||||
public class JaroWinklerNormalizedName extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public JaroWinklerNormalizedName(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public JaroWinklerNormalizedName(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerNormalizedName(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
ca = removeKeywords(ca, keywords1);
|
||||
ca = removeKeywords(ca, cities1);
|
||||
cb = removeKeywords(cb, keywords2);
|
||||
cb = removeKeywords(cb, cities2);
|
||||
|
||||
ca = ca.replaceAll("[ ]{2,}", " ");
|
||||
cb = cb.replaceAll("[ ]{2,}", " ");
|
||||
|
||||
if (ca.isEmpty() && cb.isEmpty())
|
||||
return 1.0;
|
||||
else
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -41,21 +41,38 @@ public class JsonListMatch extends AbstractListComparator {
|
|||
return -1;
|
||||
}
|
||||
|
||||
final Set<String> ca = sa.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
final Set<String> cb = sb.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
Set<String> ca = sa.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
Set<String> cb = sb.stream().map(this::toComparableString).collect(Collectors.toSet());
|
||||
|
||||
int incommon = Sets.intersection(ca, cb).size();
|
||||
int simDiff = Sets.symmetricDifference(ca, cb).size();
|
||||
switch (MODE) {
|
||||
case "count":
|
||||
return Sets.intersection(ca, cb).size();
|
||||
|
||||
if (incommon + simDiff == 0) {
|
||||
return 0.0;
|
||||
case "percentage":
|
||||
int incommon = Sets.intersection(ca, cb).size();
|
||||
int simDiff = Sets.symmetricDifference(ca, cb).size();
|
||||
if (incommon + simDiff == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
return (double) incommon / (incommon + simDiff);
|
||||
|
||||
case "type":
|
||||
Set<String> typesA = ca.stream().map(s -> s.split("::")[0]).collect(Collectors.toSet());
|
||||
Set<String> typesB = cb.stream().map(s -> s.split("::")[0]).collect(Collectors.toSet());
|
||||
|
||||
Set<String> types = Sets.intersection(typesA, typesB);
|
||||
|
||||
if (types.isEmpty()) // if no common type, it is impossible to compare
|
||||
return -1;
|
||||
|
||||
ca = ca.stream().filter(s -> types.contains(s.split("::")[0])).collect(Collectors.toSet());
|
||||
cb = cb.stream().filter(s -> types.contains(s.split("::")[0])).collect(Collectors.toSet());
|
||||
|
||||
return (double) Sets.intersection(ca, cb).size() / types.size();
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (MODE.equals("percentage"))
|
||||
return (double) incommon / (incommon + simDiff);
|
||||
else
|
||||
return incommon;
|
||||
|
||||
}
|
||||
|
||||
// converts every json into a comparable string basing on parameters
|
||||
|
@ -69,7 +86,7 @@ public class JsonListMatch extends AbstractListComparator {
|
|||
// for each path in the param list
|
||||
for (String key : params.keySet().stream().filter(k -> k.contains("jpath")).collect(Collectors.toList())) {
|
||||
String path = params.get(key);
|
||||
String value = MapDocumentUtil.getJPathString(path, documentContext);
|
||||
String value = MapDocumentUtil.getJPathString(path, documentContext).toLowerCase();
|
||||
if (value == null || value.isEmpty())
|
||||
value = "";
|
||||
st.append(value);
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("keywordMatch")
|
||||
public class KeywordMatch extends AbstractStringComparator {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
public KeywordMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = toCodes(keywords1, conf.translationMap());
|
||||
Set<String> codes2 = toCodes(keywords2, conf.translationMap());
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1.0; // undefined if one of the two has no keywords
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -8,6 +8,7 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.mongodb.connection.Cluster;
|
||||
|
||||
import eu.dnetlib.pace.AbstractPaceTest;
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
|
@ -177,41 +178,16 @@ public class ClusteringFunctionTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testKeywordsClustering() {
|
||||
public void legalnameClustering() {
|
||||
|
||||
final ClusteringFunction cf = new KeywordsClustering(params);
|
||||
final String s = "Polytechnic University of Turin";
|
||||
final ClusteringFunction cf = new LegalnameClustering(params);
|
||||
String s = "key::1 key::2 city::1";
|
||||
System.out.println(s);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s)));
|
||||
|
||||
final String s1 = "POLITECNICO DI TORINO";
|
||||
System.out.println(s1);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s1)));
|
||||
|
||||
final String s2 = "Universita farmaceutica culturale di milano bergamo";
|
||||
System.out.println("s2 = " + s2);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s2)));
|
||||
|
||||
final String s3 = "universita universita milano milano";
|
||||
System.out.println("s3 = " + s3);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s3)));
|
||||
|
||||
final String s4 = "Politechniki Warszawskiej (Warsaw University of Technology)";
|
||||
System.out.println("s4 = " + s4);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s4)));
|
||||
|
||||
final String s5 = "İstanbul Ticarət Universiteti";
|
||||
System.out.println("s5 = " + s5);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s5)));
|
||||
|
||||
final String s6 = "National and Kapodistrian University of Athens";
|
||||
System.out.println("s6 = " + s6);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s6)));
|
||||
|
||||
final String s7 = "Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών";
|
||||
System.out.println("s7 = " + s7);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s7)));
|
||||
|
||||
s = "key::1 key::2 city::1 city::2";
|
||||
System.out.println(s);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s)));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -251,4 +227,17 @@ public class ClusteringFunctionTest extends AbstractPaceTest {
|
|||
System.out.println(cf.apply(conf, Lists.newArrayList(s)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNumAuthorsTitleSuffixPrefixChain() {
|
||||
|
||||
final ClusteringFunction cf = new NumAuthorsTitleSuffixPrefixChain(params);
|
||||
params.put("mod", 10);
|
||||
|
||||
final String title = "PARP-2 Regulates SIRT1 Expression and Whole-Body Energy Expenditure";
|
||||
final String num_authors = "10";
|
||||
System.out.println("title = " + title);
|
||||
System.out.println("num_authors = " + num_authors);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(num_authors, title)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
|
||||
package eu.dnetlib.pace.common;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
import org.junit.jupiter.api.*;
|
||||
|
||||
|
@ -54,4 +53,56 @@ public class PaceFunctionTest extends AbstractPaceFunctions {
|
|||
System.out.println("Fixed aliases : " + fixAliases(TEST_STRING));
|
||||
}
|
||||
|
||||
@Test()
|
||||
public void countryInferenceTest_NPE() {
|
||||
assertThrows(
|
||||
NullPointerException.class,
|
||||
() -> countryInference("UNKNOWN", null),
|
||||
"Expected countryInference() to throw an NPE");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void countryInferenceTest() {
|
||||
assertEquals("UNKNOWN", countryInference("UNKNOWN", ""));
|
||||
assertEquals("IT", countryInference("UNKNOWN", "Università di Bologna"));
|
||||
assertEquals("UK", countryInference("UK", "Università di Bologna"));
|
||||
assertEquals("IT", countryInference("UNKNOWN", "Universiteé de Naples"));
|
||||
assertEquals("UNKNOWN", countryInference("UNKNOWN", "Università del Lavoro"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cityInferenceTest() {
|
||||
assertEquals("universita city::3181928", cityInference("Università di Bologna"));
|
||||
assertEquals("university city::3170647", cityInference("University of Pisa"));
|
||||
assertEquals("universita", cityInference("Università del lavoro"));
|
||||
assertEquals("universita city::3173331 city::3169522", cityInference("Università di Modena e Reggio Emilia"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void keywordInferenceTest() {
|
||||
assertEquals("key::41 turin", keywordInference("Polytechnic University of Turin"));
|
||||
assertEquals("key::41 torino", keywordInference("POLITECNICO DI TORINO"));
|
||||
assertEquals(
|
||||
"key::1 key::60 key::81 milano bergamo",
|
||||
keywordInference("Universita farmaceutica culturale di milano bergamo"));
|
||||
assertEquals("key::1 key::1 milano milano", keywordInference("universita universita milano milano"));
|
||||
assertEquals(
|
||||
"key::10 kapodistriako panepistemio athenon",
|
||||
keywordInference("Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cityKeywordInferenceTest() {
|
||||
assertEquals("key::41 city::3165524", cityKeywordInference("Polytechnic University of Turin"));
|
||||
assertEquals("key::41 city::3165524", cityKeywordInference("POLITECNICO DI TORINO"));
|
||||
assertEquals(
|
||||
"key::1 key::60 key::81 city::3173435 city::3182164",
|
||||
cityKeywordInference("Universita farmaceutica culturale di milano bergamo"));
|
||||
assertEquals(
|
||||
"key::1 key::1 city::3173435 city::3173435", cityKeywordInference("universita universita milano milano"));
|
||||
assertEquals(
|
||||
"key::10 kapodistriako panepistemio city::264371",
|
||||
cityKeywordInference("Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
params.put("name_th", "0.95");
|
||||
params.put("jpath_value", "$.value");
|
||||
params.put("jpath_classid", "$.qualifier.classid");
|
||||
params.put("codeRegex", "key::\\d+");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -44,53 +45,61 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void cityMatchTest() {
|
||||
final CityMatch cityMatch = new CityMatch(params);
|
||||
public void codeMatchTest() {
|
||||
CodeMatch codeMatch = new CodeMatch(params);
|
||||
|
||||
// both names with no cities
|
||||
assertEquals(1.0, cityMatch.distance("Università", "Centro di ricerca", conf));
|
||||
// both names with no codes
|
||||
assertEquals(1.0, codeMatch.distance("testing1", "testing2", conf));
|
||||
|
||||
// one of the two names with no cities
|
||||
assertEquals(-1.0, cityMatch.distance("Università di Bologna", "Centro di ricerca", conf));
|
||||
// one of the two names with no codes
|
||||
assertEquals(-1.0, codeMatch.distance("testing1 key::1", "testing", conf));
|
||||
|
||||
// both names with cities (same)
|
||||
assertEquals(1.0, cityMatch.distance("Universita di Bologna", "Biblioteca di Bologna", conf));
|
||||
// both names with codes (same)
|
||||
assertEquals(1.0, codeMatch.distance("testing1 key::1", "testing2 key::1", conf));
|
||||
|
||||
// both names with cities (different)
|
||||
assertEquals(0.0, cityMatch.distance("Universita di Bologna", "Universita di Torino", conf));
|
||||
assertEquals(0.0, cityMatch.distance("Franklin College", "Concordia College", conf));
|
||||
// both names with codes (different)
|
||||
assertEquals(0.0, codeMatch.distance("testing1 key::1", "testing2 key::2", conf));
|
||||
|
||||
// particular cases
|
||||
assertEquals(1.0, cityMatch.distance("Free University of Bozen-Bolzano", "Università di Bolzano", conf));
|
||||
assertEquals(
|
||||
1.0,
|
||||
cityMatch
|
||||
.distance(
|
||||
"Politechniki Warszawskiej (Warsaw University of Technology)", "Warsaw University of Technology",
|
||||
conf));
|
||||
// both names with codes (1 same, 1 different)
|
||||
assertEquals(0.5, codeMatch.distance("key::1 key::2 testing1", "key::1 testing", conf));
|
||||
|
||||
// failing becasuse 'Allen' is a transliterrated greek stopword
|
||||
// assertEquals(-1.0, cityMatch.distance("Allen (United States)", "United States Military Academy", conf));
|
||||
assertEquals(-1.0, cityMatch.distance("Washington (United States)", "United States Military Academy", conf));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void keywordMatchTest() {
|
||||
params.put("threshold", "0.5");
|
||||
public void datasetVersionCodeMatchTest() {
|
||||
|
||||
final KeywordMatch keywordMatch = new KeywordMatch(params);
|
||||
params.put("codeRegex", "(?=[\\w-]*[a-zA-Z])(?=[\\w-]*\\d)[\\w-]+");
|
||||
CodeMatch codeMatch = new CodeMatch(params);
|
||||
|
||||
// names have different codes
|
||||
assertEquals(
|
||||
0.5, keywordMatch.distance("Biblioteca dell'Universita di Bologna", "Università di Bologna", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Universita degli studi di Pisa", "Universita di Pisa", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Polytechnic University of Turin", "POLITECNICO DI TORINO", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Istanbul Commerce University", "İstanbul Ticarət Universiteti", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Franklin College", "Concordia College", conf));
|
||||
assertEquals(2.0 / 3.0, keywordMatch.distance("University of Georgia", "Georgia State University", conf));
|
||||
assertEquals(0.5, keywordMatch.distance("University College London", "University of London", conf));
|
||||
assertEquals(0.5, keywordMatch.distance("Washington State University", "University of Washington", conf));
|
||||
assertEquals(-1.0, keywordMatch.distance("Allen (United States)", "United States Military Academy", conf));
|
||||
0.0,
|
||||
codeMatch
|
||||
.distance(
|
||||
"physical oceanography at ctd station june 1998 ev02a",
|
||||
"physical oceanography at ctd station june 1998 ir02", conf));
|
||||
|
||||
// names have same code
|
||||
assertEquals(
|
||||
1.0,
|
||||
codeMatch
|
||||
.distance(
|
||||
"physical oceanography at ctd station june 1998 ev02a",
|
||||
"physical oceanography at ctd station june 1998 ev02a", conf));
|
||||
|
||||
// code is not in both names
|
||||
assertEquals(
|
||||
-1,
|
||||
codeMatch
|
||||
.distance(
|
||||
"physical oceanography at ctd station june 1998",
|
||||
"physical oceanography at ctd station june 1998 ev02a", conf));
|
||||
assertEquals(
|
||||
1.0,
|
||||
codeMatch
|
||||
.distance(
|
||||
"physical oceanography at ctd station june 1998", "physical oceanography at ctd station june 1998",
|
||||
conf));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -155,15 +164,15 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void jaroWinklerNormalizedNameTest() {
|
||||
public void jaroWinklerLegalnameTest() {
|
||||
|
||||
final JaroWinklerNormalizedName jaroWinklerNormalizedName = new JaroWinklerNormalizedName(params);
|
||||
final JaroWinklerLegalname jaroWinklerLegalname = new JaroWinklerLegalname(params);
|
||||
|
||||
double result = jaroWinklerNormalizedName
|
||||
.distance("AT&T (United States)", "United States Military Academy", conf);
|
||||
double result = jaroWinklerLegalname
|
||||
.distance("AT&T (United States)", "United States key::2 key::1", conf);
|
||||
System.out.println("result = " + result);
|
||||
|
||||
result = jaroWinklerNormalizedName.distance("NOAA - Servicio Meteorol\\u00f3gico Nacional", "NOAA - NWS", conf);
|
||||
result = jaroWinklerLegalname.distance("NOAA - Servicio Meteorol\\u00f3gico Nacional", "NOAA - NWS", conf);
|
||||
System.out.println("result = " + result);
|
||||
|
||||
}
|
||||
|
@ -285,15 +294,15 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
List<String> a = createFieldList(
|
||||
Arrays
|
||||
.asList(
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"doi\",\"classname\":\"Digital Object Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"10.1111/pbi.12655\"}"),
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"grid\",\"classname\":\"GRID Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"grid_1\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"ror\",\"classname\":\"Research Organization Registry\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"ror_1\"}"),
|
||||
"authors");
|
||||
List<String> b = createFieldList(
|
||||
Arrays
|
||||
.asList(
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"pmc\",\"classname\":\"PubMed Central ID\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"PMC5399005\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"pmid\",\"classname\":\"PubMed ID\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"27775869\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"user:claim\",\"classname\":\"Linked by user\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"doi\",\"classname\":\"Digital Object Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"10.1111/pbi.12655\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"handle\",\"classname\":\"Handle\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"1854/LU-8523529\"}"),
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"grid\",\"classname\":\"GRID Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"grid_1\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"ror\",\"classname\":\"Research Organization Registry\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"ror_2\"}",
|
||||
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"user:claim\",\"classname\":\"Linked by user\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"isni\",\"classname\":\"ISNI Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"isni_1\"}"),
|
||||
"authors");
|
||||
|
||||
double result = jsonListMatch.compare(a, b, conf);
|
||||
|
@ -305,6 +314,13 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
result = jsonListMatch.compare(a, b, conf);
|
||||
|
||||
assertEquals(1.0, result);
|
||||
|
||||
params.put("mode", "type");
|
||||
jsonListMatch = new JsonListMatch(params);
|
||||
result = jsonListMatch.compare(a, b, conf);
|
||||
|
||||
assertEquals(0.5, result);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -336,4 +352,53 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
System.out.println("compare = " + compare);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void countryMatch() {
|
||||
|
||||
CountryMatch countryMatch = new CountryMatch(params);
|
||||
|
||||
double result = countryMatch.distance("UNKNOWN", "UNKNOWN", conf);
|
||||
assertEquals(-1.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "UNKNOWN", conf);
|
||||
assertEquals(-1.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "IT", conf);
|
||||
assertEquals(0.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "CL", conf);
|
||||
assertEquals(1.0, result);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void dateMatch() {
|
||||
|
||||
DateRange dateRange = new DateRange(params);
|
||||
|
||||
double result = dateRange.distance("2021-05-13", "2023-05-13", conf);
|
||||
assertEquals(1.0, result);
|
||||
|
||||
result = dateRange.distance("2021-05-13", "2025-05-13", conf);
|
||||
assertEquals(0.0, result);
|
||||
|
||||
result = dateRange.distance("", "2020-05-05", conf);
|
||||
assertEquals(-1.0, result);
|
||||
|
||||
result = dateRange.distance("invalid date", "2021-05-02", conf);
|
||||
assertEquals(-1.0, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void titleVersionMatchTest() {
|
||||
|
||||
TitleVersionMatch titleVersionMatch = new TitleVersionMatch(params);
|
||||
|
||||
double result = titleVersionMatch
|
||||
.compare(
|
||||
"parp 2 regulates sirt 1 expression and whole body energy expenditure",
|
||||
"parp 2 regulates sirt 1 expression and whole body energy expenditure", conf);
|
||||
assertEquals(1.0, result);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
|
||||
|
||||
public class UtilTest {
|
||||
|
||||
static Map<String, String> params;
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<parent>
|
||||
<artifactId>dhp</artifactId>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>dhp-shade-package</artifactId>
|
||||
<description>This module create a jar of all module dependencies</description>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer>
|
||||
<mainClass>eu.dnetlib.dhp.oa.dedup.SparkCreateSimRels</mainClass>
|
||||
</transformer>
|
||||
<transformer />
|
||||
<transformer>
|
||||
<resource>META-INF/cxf/bus-extensions.txt</resource>
|
||||
</transformer>
|
||||
</transformers>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/maven/**</exclude>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com</pattern>
|
||||
<shadedPattern>repackaged.com.google.common</shadedPattern>
|
||||
<includes>
|
||||
<include>com.google.common.**</include>
|
||||
</includes>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<version>1.18.28</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>5.6.1</version>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-api</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-params</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>byte-buddy</artifactId>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>byte-buddy-agent</artifactId>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-junit-jupiter</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<distributionManagement>
|
||||
<site>
|
||||
<id>DHPSite</id>
|
||||
<url>${dhp.site.stage.path}/dhp-common</url>
|
||||
</site>
|
||||
</distributionManagement>
|
||||
</project>
|
|
@ -26,16 +26,16 @@
|
|||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-actionmanager</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-aggregation</artifactId>-->
|
||||
<!-- <artifactId>dhp-actionmanager</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-aggregation</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-blacklist</artifactId>-->
|
||||
|
@ -56,61 +56,61 @@
|
|||
<!-- <artifactId>dhp-enrichment</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-graph-mapper</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-graph-provision</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-impact-indicators</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-actionsets</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-hist-snaps</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-monitor-irish</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-promote</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-update</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-swh</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-usage-raw-data-update</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-usage-stats-build</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-graph-mapper</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-graph-provision</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-impact-indicators</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-stats-actionsets</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-stats-hist-snaps</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-stats-monitor-irish</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-stats-promote</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-stats-update</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-swh</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-usage-raw-data-update</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-usage-stats-build</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
</dependencies>
|
||||
|
||||
|
||||
|
|
|
@ -51,48 +51,5 @@
|
|||
<artifactId>hadoop-distcp</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-actionmanager-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-actionmanager-common</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-openaireplus-mapping-utils</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>saxonica</groupId>
|
||||
<artifactId>saxon</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>saxonica</groupId>
|
||||
<artifactId>saxon-dom</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>jgrapht</groupId>
|
||||
<artifactId>jgrapht</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.ehcache</groupId>
|
||||
<artifactId>ehcache</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-test</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.*</groupId>
|
||||
<artifactId>*</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>apache</groupId>
|
||||
<artifactId>*</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
|
@ -4,7 +4,6 @@ package eu.dnetlib.dhp.actionmanager;
|
|||
import java.io.Serializable;
|
||||
import java.io.StringReader;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -22,7 +21,6 @@ import com.google.common.base.Splitter;
|
|||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.actionmanager.rmi.ActionManagerException;
|
||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpException;
|
||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
|
||||
|
@ -65,7 +63,7 @@ public class ISClient implements Serializable {
|
|||
.map(t -> buildDirectory(basePath, t))
|
||||
.collect(Collectors.toList()))
|
||||
.orElseThrow(() -> new IllegalStateException("empty set list"));
|
||||
} catch (ActionManagerException | ISLookUpException e) {
|
||||
} catch (ISLookUpException e) {
|
||||
throw new IllegalStateException("unable to query ActionSets info from the IS");
|
||||
}
|
||||
}
|
||||
|
@ -89,31 +87,18 @@ public class ISClient implements Serializable {
|
|||
return Joiner.on("/").join(basePath, t.getMiddle(), t.getRight());
|
||||
}
|
||||
|
||||
private String getBasePathHDFS(ISLookUpService isLookup) throws ActionManagerException {
|
||||
private String getBasePathHDFS(ISLookUpService isLookup) throws ISLookUpException {
|
||||
return queryServiceProperty(isLookup, "basePath");
|
||||
}
|
||||
|
||||
private String queryServiceProperty(ISLookUpService isLookup, final String propertyName)
|
||||
throws ActionManagerException {
|
||||
throws ISLookUpException {
|
||||
final String q = "for $x in /RESOURCE_PROFILE[.//RESOURCE_TYPE/@value='ActionManagerServiceResourceType'] return $x//SERVICE_PROPERTIES/PROPERTY[./@ key='"
|
||||
+ propertyName
|
||||
+ "']/@value/string()";
|
||||
log.debug("quering for service property: {}", q);
|
||||
try {
|
||||
final List<String> value = isLookup.quickSearchProfile(q);
|
||||
return Iterables.getOnlyElement(value);
|
||||
} catch (ISLookUpException e) {
|
||||
String msg = "Error accessing service profile, using query: " + q;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
} catch (NoSuchElementException e) {
|
||||
String msg = "missing service property: " + propertyName;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
String msg = "found more than one service property: " + propertyName;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
}
|
||||
|
||||
final List<String> value = isLookup.quickSearchProfile(q);
|
||||
return Iterables.getOnlyElement(value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,12 +151,17 @@ public class PromoteActionPayloadForGraphTableJob {
|
|||
SparkSession spark, String path, Class<G> rowClazz) {
|
||||
logger.info("Reading graph table from path: {}", path);
|
||||
|
||||
return spark
|
||||
.read()
|
||||
.textFile(path)
|
||||
.map(
|
||||
(MapFunction<String, G>) value -> OBJECT_MAPPER.readValue(value, rowClazz),
|
||||
Encoders.bean(rowClazz));
|
||||
if (HdfsSupport.exists(path, spark.sparkContext().hadoopConfiguration())) {
|
||||
return spark
|
||||
.read()
|
||||
.textFile(path)
|
||||
.map(
|
||||
(MapFunction<String, G>) value -> OBJECT_MAPPER.readValue(value, rowClazz),
|
||||
Encoders.bean(rowClazz));
|
||||
} else {
|
||||
logger.info("Found empty graph table from path: {}", path);
|
||||
return spark.emptyDataset(Encoders.bean(rowClazz));
|
||||
}
|
||||
}
|
||||
|
||||
private static <A extends Oaf> Dataset<A> readActionPayload(
|
||||
|
@ -223,7 +228,7 @@ public class PromoteActionPayloadForGraphTableJob {
|
|||
rowClazz,
|
||||
actionPayloadClazz);
|
||||
|
||||
if (shouldGroupById) {
|
||||
if (Boolean.TRUE.equals(shouldGroupById)) {
|
||||
return PromoteActionPayloadFunctions
|
||||
.groupGraphTableByIdAndMerge(
|
||||
joinedAndMerged, rowIdFn, mergeRowsAndGetFn, zeroFn, isNotZeroFn, rowClazz);
|
||||
|
@ -250,6 +255,8 @@ public class PromoteActionPayloadForGraphTableJob {
|
|||
return () -> clazz.cast(new eu.dnetlib.dhp.schema.oaf.Relation());
|
||||
case "eu.dnetlib.dhp.schema.oaf.Software":
|
||||
return () -> clazz.cast(new eu.dnetlib.dhp.schema.oaf.Software());
|
||||
case "eu.dnetlib.dhp.schema.oaf.Person":
|
||||
return () -> clazz.cast(new eu.dnetlib.dhp.schema.oaf.Person());
|
||||
default:
|
||||
throw new RuntimeException("unknown class: " + clazz.getCanonicalName());
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class PromoteActionPayloadFunctions {
|
|||
PromoteAction.Strategy promoteActionStrategy,
|
||||
Class<G> rowClazz,
|
||||
Class<A> actionPayloadClazz) {
|
||||
if (!isSubClass(rowClazz, actionPayloadClazz)) {
|
||||
if (Boolean.FALSE.equals(isSubClass(rowClazz, actionPayloadClazz))) {
|
||||
throw new RuntimeException(
|
||||
"action payload type must be the same or be a super type of table row type");
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -156,6 +157,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -7,3 +7,4 @@ promote_action_payload_for_project_table classpath eu/dnetlib/dhp/actionmanager/
|
|||
promote_action_payload_for_publication_table classpath eu/dnetlib/dhp/actionmanager/wf/publication/oozie_app
|
||||
promote_action_payload_for_relation_table classpath eu/dnetlib/dhp/actionmanager/wf/relation/oozie_app
|
||||
promote_action_payload_for_software_table classpath eu/dnetlib/dhp/actionmanager/wf/software/oozie_app
|
||||
promote_action_payload_for_person_table classpath eu/dnetlib/dhp/actionmanager/wf/person/oozie_app
|
||||
|
|
|
@ -125,6 +125,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -134,21 +135,10 @@
|
|||
<arg>--outputPath</arg><arg>${workingDir}/action_payload_by_type</arg>
|
||||
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
|
||||
</spark>
|
||||
<ok to="ForkPromote"/>
|
||||
<ok to="PromoteActionPayloadForDatasetTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<fork name="ForkPromote">
|
||||
<path start="PromoteActionPayloadForDatasetTable"/>
|
||||
<path start="PromoteActionPayloadForDatasourceTable"/>
|
||||
<path start="PromoteActionPayloadForOrganizationTable"/>
|
||||
<path start="PromoteActionPayloadForOtherResearchProductTable"/>
|
||||
<path start="PromoteActionPayloadForProjectTable"/>
|
||||
<path start="PromoteActionPayloadForPublicationTable"/>
|
||||
<path start="PromoteActionPayloadForRelationTable"/>
|
||||
<path start="PromoteActionPayloadForSoftwareTable"/>
|
||||
</fork>
|
||||
|
||||
<action name="PromoteActionPayloadForDatasetTable">
|
||||
<sub-workflow>
|
||||
<app-path>${wf:appPath()}/promote_action_payload_for_dataset_table</app-path>
|
||||
|
@ -160,7 +150,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForDatasourceTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -175,7 +165,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForOrganizationTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -190,7 +180,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForOtherResearchProductTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -205,7 +195,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForProjectTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -220,7 +210,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForPublicationTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -235,7 +225,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForRelationTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -250,7 +240,7 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="PromoteActionPayloadForSoftwareTable"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
@ -265,11 +255,9 @@
|
|||
</property>
|
||||
</configuration>
|
||||
</sub-workflow>
|
||||
<ok to="JoinPromote"/>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<join name="JoinPromote" to="End"/>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -155,11 +156,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=2560
|
||||
--conf spark.sql.shuffle.partitions=8000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/otherresearchproduct</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
<workflow-app name="promote_action_payload_for_person_table" xmlns="uri:oozie:workflow:0.5">
|
||||
<parameters>
|
||||
<property>
|
||||
<name>activePromotePersonActionPayload</name>
|
||||
<description>when true will promote actions with eu.dnetlib.dhp.schema.oaf.Person payload</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>inputGraphRootPath</name>
|
||||
<description>root location of input materialized graph</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>inputActionPayloadRootPath</name>
|
||||
<description>root location of action payloads to promote</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>outputGraphRootPath</name>
|
||||
<description>root location for output materialized graph</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>mergeAndGetStrategy</name>
|
||||
<description>strategy for merging graph table objects with action payload instances, MERGE_FROM_AND_GET or SELECT_NEWER_AND_GET</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkDriverMemory</name>
|
||||
<description>memory for driver process</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorMemory</name>
|
||||
<description>memory for individual executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorCores</name>
|
||||
<description>number of cores used by single executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozieActionShareLibForSpark2</name>
|
||||
<description>oozie action sharelib for spark 2.*</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2ExtraListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
||||
<description>spark 2.* extra listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2SqlQueryExecutionListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||
<description>spark 2.* sql query execution listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2YarnHistoryServerAddress</name>
|
||||
<description>spark 2.* yarn history server address</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2EventLogDir</name>
|
||||
<description>spark 2.* event log dir location</description>
|
||||
</property>
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>oozie.action.sharelib.for.spark</name>
|
||||
<value>${oozieActionShareLibForSpark2}</value>
|
||||
</property>
|
||||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="DecisionPromotePersonActionPayload"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
|
||||
<decision name="DecisionPromotePersonActionPayload">
|
||||
<switch>
|
||||
<case to="PromotePersonActionPayloadForPersonTable">
|
||||
${(activePromotePersonActionPayload eq "true") and
|
||||
(fs:exists(concat(concat(concat(concat(wf:conf('nameNode'),'/'),wf:conf('inputActionPayloadRootPath')),'/'),'clazz=eu.dnetlib.dhp.schema.oaf.Person')) eq "true")}
|
||||
</case>
|
||||
<default to="SkipPromotePersonActionPayloadForPersonTable"/>
|
||||
</switch>
|
||||
</decision>
|
||||
|
||||
<action name="PromotePersonActionPayloadForPersonTable">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn-cluster</master>
|
||||
<mode>cluster</mode>
|
||||
<name>PromotePersonActionPayloadForPersonTable</name>
|
||||
<class>eu.dnetlib.dhp.actionmanager.promote.PromoteActionPayloadForGraphTableJob</class>
|
||||
<jar>dhp-actionmanager-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${inputGraphRootPath}/person</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Person</arg>
|
||||
<arg>--inputActionPayloadPath</arg><arg>${inputActionPayloadRootPath}/clazz=eu.dnetlib.dhp.schema.oaf.Person</arg>
|
||||
<arg>--actionPayloadClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Person</arg>
|
||||
<arg>--outputGraphTablePath</arg><arg>${outputGraphRootPath}/person</arg>
|
||||
<arg>--mergeAndGetStrategy</arg><arg>${mergeAndGetStrategy}</arg>
|
||||
<arg>--promoteActionStrategy</arg><arg>${promoteActionStrategy}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="SkipPromotePersonActionPayloadForPersonTable">
|
||||
<distcp xmlns="uri:oozie:distcp-action:0.2">
|
||||
<prepare>
|
||||
<delete path="${outputGraphRootPath}/person"/>
|
||||
</prepare>
|
||||
<arg>-pb</arg>
|
||||
<arg>${inputGraphRootPath}/person</arg>
|
||||
<arg>${outputGraphRootPath}/person</arg>
|
||||
</distcp>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -103,11 +103,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=7000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${inputGraphRootPath}/publication</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||
|
@ -156,11 +157,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=7000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/publication</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||
|
|
|
@ -95,11 +95,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=10000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${inputGraphRootPath}/relation</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Relation</arg>
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -155,11 +156,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=2560
|
||||
--conf spark.sql.shuffle.partitions=4000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/software</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
|
||||
|
|
|
@ -0,0 +1,210 @@
|
|||
/*
|
||||
* Copyright (c) 2024.
|
||||
* SPDX-FileCopyrightText: © 2023 Consiglio Nazionale delle Ricerche
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
package eu.dnetlib.dhp.actionmanager.promote;
|
||||
|
||||
import static eu.dnetlib.dhp.common.FunctionalInterfaceSupport.*;
|
||||
import static eu.dnetlib.dhp.schema.common.ModelSupport.isSubClass;
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
|
||||
public class PromoteResultWithMeasuresTest {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(PromoteResultWithMeasuresTest.class);
|
||||
|
||||
private static SparkSession spark;
|
||||
|
||||
private static Path tempDir;
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
@BeforeAll
|
||||
public static void beforeAll() throws IOException {
|
||||
tempDir = Files.createTempDirectory(PromoteResultWithMeasuresTest.class.getSimpleName());
|
||||
log.info("using work dir {}", tempDir);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
conf.setMaster("local[*]");
|
||||
conf.setAppName(PromoteResultWithMeasuresTest.class.getSimpleName());
|
||||
conf.set("spark.driver.host", "localhost");
|
||||
|
||||
conf.set("hive.metastore.local", "true");
|
||||
conf.set("spark.ui.enabled", "false");
|
||||
|
||||
conf.set("spark.sql.warehouse.dir", tempDir.toString());
|
||||
conf.set("hive.metastore.warehouse.dir", tempDir.resolve("warehouse").toString());
|
||||
|
||||
spark = SparkSession.builder().config(conf).getOrCreate();
|
||||
}
|
||||
|
||||
@AfterAll
|
||||
public static void afterAll() throws IOException {
|
||||
spark.stop();
|
||||
FileUtils.deleteDirectory(tempDir.toFile());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testPromoteResultWithMeasures_job() throws Exception {
|
||||
|
||||
final String inputGraphTablePath = getClass()
|
||||
.getResource("/eu/dnetlib/dhp/actionmanager/promote/measures/graph")
|
||||
.getPath();
|
||||
|
||||
final String inputActionPayloadPath = getClass()
|
||||
.getResource("/eu/dnetlib/dhp/actionmanager/promote/measures/actionPayloads")
|
||||
.getPath();
|
||||
|
||||
final String actionPayloadsPath = tempDir.resolve("actionPayloads").toString();
|
||||
|
||||
spark
|
||||
.read()
|
||||
.text(inputActionPayloadPath)
|
||||
.withColumn("payload", col("value"))
|
||||
.select("payload")
|
||||
.write()
|
||||
.parquet(actionPayloadsPath);
|
||||
|
||||
final Path outputGraphTablePath = tempDir.resolve("outputGraphTablePath");
|
||||
|
||||
PromoteActionPayloadForGraphTableJob
|
||||
.main(new String[] {
|
||||
"--isSparkSessionManaged", Boolean.FALSE.toString(),
|
||||
"--graphTableClassName", Publication.class.getCanonicalName(),
|
||||
"--inputGraphTablePath", inputGraphTablePath,
|
||||
"--inputActionPayloadPath", actionPayloadsPath,
|
||||
"--actionPayloadClassName", Result.class.getCanonicalName(),
|
||||
"--outputGraphTablePath", outputGraphTablePath.toString(),
|
||||
"--mergeAndGetStrategy", MergeAndGet.Strategy.MERGE_FROM_AND_GET.toString(),
|
||||
"--promoteActionStrategy", PromoteAction.Strategy.ENRICH.toString(),
|
||||
"--shouldGroupById", "true"
|
||||
});
|
||||
|
||||
assertFalse(isDirEmpty(outputGraphTablePath));
|
||||
|
||||
final Encoder<Publication> pubEncoder = Encoders.bean(Publication.class);
|
||||
List<Publication> results = spark
|
||||
.read()
|
||||
.schema(pubEncoder.schema())
|
||||
.json(outputGraphTablePath.toString())
|
||||
.as(pubEncoder)
|
||||
.collectAsList();
|
||||
|
||||
verify(results);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testPromoteResultWithMeasures_internal() throws JsonProcessingException {
|
||||
|
||||
Dataset<Publication> rowDS = spark
|
||||
.read()
|
||||
.schema(Encoders.bean(Publication.class).schema())
|
||||
.json("src/test/resources/eu/dnetlib/dhp/actionmanager/promote/measures/graph")
|
||||
.as(Encoders.bean(Publication.class));
|
||||
|
||||
Dataset<Result> actionPayloadDS = spark
|
||||
.read()
|
||||
.schema(Encoders.bean(Result.class).schema())
|
||||
.json("src/test/resources/eu/dnetlib/dhp/actionmanager/promote/measures/actionPayloads")
|
||||
.as(Encoders.bean(Result.class));
|
||||
|
||||
final MergeAndGet.Strategy mergeFromAndGet = MergeAndGet.Strategy.MERGE_FROM_AND_GET;
|
||||
|
||||
final SerializableSupplier<Function<Publication, String>> rowIdFn = ModelSupport::idFn;
|
||||
final SerializableSupplier<BiFunction<Publication, Result, Publication>> mergeAndGetFn = MergeAndGet
|
||||
.functionFor(mergeFromAndGet);
|
||||
final SerializableSupplier<Publication> zeroFn = () -> Publication.class
|
||||
.cast(new eu.dnetlib.dhp.schema.oaf.Publication());
|
||||
final SerializableSupplier<Function<Publication, Boolean>> isNotZeroFn = PromoteResultWithMeasuresTest::isNotZeroFnUsingIdOrSourceAndTarget;
|
||||
|
||||
Dataset<Publication> joinedResults = PromoteActionPayloadFunctions
|
||||
.joinGraphTableWithActionPayloadAndMerge(
|
||||
rowDS,
|
||||
actionPayloadDS,
|
||||
rowIdFn,
|
||||
ModelSupport::idFn,
|
||||
mergeAndGetFn,
|
||||
PromoteAction.Strategy.ENRICH,
|
||||
Publication.class,
|
||||
Result.class);
|
||||
|
||||
SerializableSupplier<BiFunction<Publication, Publication, Publication>> mergeRowsAndGetFn = MergeAndGet
|
||||
.functionFor(mergeFromAndGet);
|
||||
|
||||
Dataset<Publication> mergedResults = PromoteActionPayloadFunctions
|
||||
.groupGraphTableByIdAndMerge(
|
||||
joinedResults, rowIdFn, mergeRowsAndGetFn, zeroFn, isNotZeroFn, Publication.class);
|
||||
|
||||
verify(mergedResults.collectAsList());
|
||||
}
|
||||
|
||||
private static void verify(List<Publication> results) throws JsonProcessingException {
|
||||
assertNotNull(results);
|
||||
assertEquals(1, results.size());
|
||||
|
||||
Result r = results.get(0);
|
||||
|
||||
log.info(OBJECT_MAPPER.writeValueAsString(r));
|
||||
|
||||
assertNotNull(r.getMeasures());
|
||||
assertFalse(r.getMeasures().isEmpty());
|
||||
assertTrue(
|
||||
r
|
||||
.getMeasures()
|
||||
.stream()
|
||||
.map(Measure::getId)
|
||||
.collect(Collectors.toCollection(HashSet::new))
|
||||
.containsAll(
|
||||
Lists
|
||||
.newArrayList(
|
||||
"downloads", "views", "influence", "popularity", "influence_alt", "popularity_alt",
|
||||
"impulse")));
|
||||
}
|
||||
|
||||
private static <T extends Oaf> Function<T, Boolean> isNotZeroFnUsingIdOrSourceAndTarget() {
|
||||
return t -> {
|
||||
if (isSubClass(t, Relation.class)) {
|
||||
final Relation rel = (Relation) t;
|
||||
return StringUtils.isNotBlank(rel.getSource()) && StringUtils.isNotBlank(rel.getTarget());
|
||||
}
|
||||
return StringUtils.isNotBlank(((OafEntity) t).getId());
|
||||
};
|
||||
}
|
||||
|
||||
private static boolean isDirEmpty(final Path directory) throws IOException {
|
||||
try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory)) {
|
||||
return !dirStream.iterator().hasNext();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
{"collectedfrom":null,"dataInfo":null,"lastupdatetimestamp":null,"id":"50|doi_dedup___::02317b7093277ec8aa0311d5c6a25b9b","originalId":null,"pid":null,"dateofcollection":null,"dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":[{"id":"downloads","unit":[{"key":"opendoar____::358aee4cc897452c00244351e4d91f69||ZENODO","value":"125","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:usage_counts","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]},{"id":"views","unit":[{"key":"opendoar____::358aee4cc897452c00244351e4d91f69||ZENODO","value":"35","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:usage_counts","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]}],"context":null,"processingchargeamount":null,"processingchargecurrency":null,"author":null,"resulttype":null,"metaResourceType":null,"language":null,"country":null,"subject":null,"title":null,"relevantdate":null,"description":null,"dateofacceptance":null,"publisher":null,"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"externalReference":null,"instance":null,"eoscifguidelines":null,"openAccessColor":null,"publiclyFunded":null,"transformativeAgreement":null,"isGreen":null,"isInDiamondJournal":null}
|
||||
{"collectedfrom":null,"dataInfo":null,"lastupdatetimestamp":null,"id":"50|doi_dedup___::02317b7093277ec8aa0311d5c6a25b9b","originalId":null,"pid":null,"dateofcollection":null,"dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":[{"id":"influence","unit":[{"key":"score","value":"3.1167566E-9","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}},{"key":"class","value":"C5","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]},{"id":"popularity","unit":[{"key":"score","value":"7.335433E-9","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}},{"key":"class","value":"C4","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]},{"id":"influence_alt","unit":[{"key":"score","value":"4","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}},{"key":"class","value":"C5","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]},{"id":"popularity_alt","unit":[{"key":"score","value":"2.96","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}},{"key":"class","value":"C4","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]},{"id":"impulse","unit":[{"key":"score","value":"4","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}},{"key":"class","value":"C5","dataInfo":{"invisible":false,"inferred":true,"deletedbyinference":false,"trust":"","inferenceprovenance":"update","provenanceaction":{"classid":"measure:bip","classname":"Inferred by OpenAIRE","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"}}}]}],"context":null,"processingchargeamount":null,"processingchargecurrency":null,"author":null,"resulttype":null,"metaResourceType":null,"language":null,"country":null,"subject":null,"title":null,"relevantdate":null,"description":null,"dateofacceptance":null,"publisher":null,"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"externalReference":null,"instance":null,"eoscifguidelines":null,"openAccessColor":null,"publiclyFunded":null,"transformativeAgreement":null,"isGreen":null,"isInDiamondJournal":null}
|
||||
{"collectedfrom":null,"dataInfo":null,"lastupdatetimestamp":null,"id":"50|doi_dedup___::02317b7093277ec8aa0311d5c6a25b9b","originalId":null,"pid":null,"dateofcollection":null,"dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"context":null,"processingchargeamount":null,"processingchargecurrency":null,"author":null,"resulttype":null,"metaResourceType":null,"language":null,"country":null,"subject":null,"title":null,"relevantdate":null,"description":null,"dateofacceptance":null,"publisher":null,"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"externalReference":null,"instance":null,"eoscifguidelines":null,"openAccessColor":"hybrid","publiclyFunded":false,"transformativeAgreement":null,"isGreen":true,"isInDiamondJournal":false}
|
File diff suppressed because one or more lines are too long
|
@ -13,6 +13,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Instance;
|
||||
import eu.dnetlib.dhp.schema.oaf.Qualifier;
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
import eu.dnetlib.dhp.schema.oaf.Subject;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
|
@ -42,6 +44,9 @@ public class Constants {
|
|||
public static final String NULL = "NULL";
|
||||
public static final String NA = "N/A";
|
||||
|
||||
public static final String WEB_CRAWL_ID = "10|openaire____::fb98a192f6a055ba495ef414c330834b";
|
||||
public static final String WEB_CRAWL_NAME = "Web Crawl";
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
private Constants() {
|
||||
|
|
|
@ -10,7 +10,6 @@ import java.util.List;
|
|||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
|
@ -29,21 +28,26 @@ import eu.dnetlib.dhp.schema.action.AtomicAction;
|
|||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.CleaningFunctions;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.DoiCleaningRule;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import scala.Tuple2;
|
||||
|
||||
/**
|
||||
* Creates action sets for Crossref affiliation relations inferred by BIP!
|
||||
* Creates action sets for Crossref affiliation relations inferred by OpenAIRE
|
||||
*/
|
||||
public class PrepareAffiliationRelations implements Serializable {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(PrepareAffiliationRelations.class);
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final String ID_PREFIX = "50|doi_________::";
|
||||
public static final String BIP_AFFILIATIONS_CLASSID = "result:organization:bipinference";
|
||||
public static final String BIP_AFFILIATIONS_CLASSNAME = "Affiliation relation inferred by BIP!";
|
||||
public static final String BIP_INFERENCE_PROVENANCE = "bip:affiliation:crossref";
|
||||
public static final String BIP_AFFILIATIONS_CLASSID = "result:organization:openaireinference";
|
||||
public static final String BIP_AFFILIATIONS_CLASSNAME = "Affiliation relation inferred by OpenAIRE";
|
||||
public static final String BIP_INFERENCE_PROVENANCE = "openaire:affiliation";
|
||||
public static final String OPENAIRE_DATASOURCE_ID = "10|infrastruct_::f66f1bd369679b5b077dcdf006089556";
|
||||
public static final String OPENAIRE_DATASOURCE_NAME = "OpenAIRE";
|
||||
public static final String DOI_URL_PREFIX = "https://doi.org/";
|
||||
public static final int DOI_URL_PREFIX_LENGTH = 16;
|
||||
|
||||
public static <I extends Result> void main(String[] args) throws Exception {
|
||||
|
||||
|
@ -71,6 +75,12 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
final String dataciteInputPath = parser.get("dataciteInputPath");
|
||||
log.info("dataciteInputPath: {}", dataciteInputPath);
|
||||
|
||||
final String webcrawlInputPath = parser.get("webCrawlInputPath");
|
||||
log.info("webcrawlInputPath: {}", webcrawlInputPath);
|
||||
|
||||
final String publisherInputPath = parser.get("publisherInputPath");
|
||||
log.info("publisherInputPath: {}", publisherInputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
|
@ -81,47 +91,107 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
Constants.removeOutputDir(spark, outputPath);
|
||||
|
||||
List<KeyValue> collectedFromCrossref = OafMapperUtils
|
||||
.listKeyValues(ModelConstants.CROSSREF_ID, "Crossref");
|
||||
JavaPairRDD<Text, Text> crossrefRelations = prepareAffiliationRelations(
|
||||
spark, crossrefInputPath, collectedFromCrossref);
|
||||
|
||||
List<KeyValue> collectedFromPubmed = OafMapperUtils
|
||||
.listKeyValues(ModelConstants.PUBMED_CENTRAL_ID, "Pubmed");
|
||||
JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations(
|
||||
spark, pubmedInputPath, collectedFromPubmed);
|
||||
|
||||
List<KeyValue> collectedFromOpenAPC = OafMapperUtils
|
||||
.listKeyValues(ModelConstants.OPEN_APC_ID, "OpenAPC");
|
||||
JavaPairRDD<Text, Text> openAPCRelations = prepareAffiliationRelations(
|
||||
spark, openapcInputPath, collectedFromOpenAPC);
|
||||
|
||||
List<KeyValue> collectedFromDatacite = OafMapperUtils
|
||||
.listKeyValues(ModelConstants.DATACITE_ID, "Datacite");
|
||||
JavaPairRDD<Text, Text> dataciteRelations = prepareAffiliationRelations(
|
||||
spark, dataciteInputPath, collectedFromDatacite);
|
||||
|
||||
crossrefRelations
|
||||
.union(pubmedRelations)
|
||||
.union(openAPCRelations)
|
||||
.union(dataciteRelations)
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
|
||||
createActionSet(
|
||||
spark, crossrefInputPath, pubmedInputPath, openapcInputPath, dataciteInputPath, webcrawlInputPath,
|
||||
publisherInputPath, outputPath);
|
||||
});
|
||||
}
|
||||
|
||||
private static void createActionSet(SparkSession spark, String crossrefInputPath, String pubmedInputPath,
|
||||
String openapcInputPath, String dataciteInputPath, String webcrawlInputPath, String publisherlInputPath,
|
||||
String outputPath) {
|
||||
List<KeyValue> collectedfromOpenAIRE = OafMapperUtils
|
||||
.listKeyValues(OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
|
||||
|
||||
JavaPairRDD<Text, Text> crossrefRelations = prepareAffiliationRelationsNewModel(
|
||||
spark, crossrefInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":crossref");
|
||||
|
||||
JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations(
|
||||
spark, pubmedInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":pubmed");
|
||||
|
||||
JavaPairRDD<Text, Text> openAPCRelations = prepareAffiliationRelationsNewModel(
|
||||
spark, openapcInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":openapc");
|
||||
|
||||
JavaPairRDD<Text, Text> dataciteRelations = prepareAffiliationRelationsNewModel(
|
||||
spark, dataciteInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":datacite");
|
||||
|
||||
JavaPairRDD<Text, Text> webCrawlRelations = prepareAffiliationRelationsNewModel(
|
||||
spark, webcrawlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":rawaff");
|
||||
|
||||
JavaPairRDD<Text, Text> publisherRelations = prepareAffiliationRelationFromPublisherNewModel(
|
||||
spark, publisherlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":webcrawl");
|
||||
|
||||
crossrefRelations
|
||||
.union(pubmedRelations)
|
||||
.union(openAPCRelations)
|
||||
.union(dataciteRelations)
|
||||
.union(webCrawlRelations)
|
||||
.union(publisherRelations)
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
}
|
||||
|
||||
private static JavaPairRDD<Text, Text> prepareAffiliationRelationFromPublisherNewModel(SparkSession spark,
|
||||
String inputPath,
|
||||
List<KeyValue> collectedfrom,
|
||||
String dataprovenance) {
|
||||
|
||||
Dataset<Row> df = spark
|
||||
.read()
|
||||
.schema(
|
||||
"`DOI` STRING, `Organizations` ARRAY<STRUCT<`PID`:STRING, `Value`:STRING,`Confidence`:DOUBLE, `Status`:STRING>>")
|
||||
.json(inputPath)
|
||||
.where("DOI is not null");
|
||||
|
||||
return getTextTextJavaPairRDDNew(
|
||||
collectedfrom, df.selectExpr("DOI", "Organizations as Matchings"), dataprovenance);
|
||||
|
||||
}
|
||||
|
||||
private static JavaPairRDD<Text, Text> prepareAffiliationRelationFromPublisher(SparkSession spark, String inputPath,
|
||||
List<KeyValue> collectedfrom, String dataprovenance) {
|
||||
|
||||
Dataset<Row> df = spark
|
||||
.read()
|
||||
.schema("`DOI` STRING, `Organizations` ARRAY<STRUCT<`RORid`:STRING,`Confidence`:DOUBLE>>")
|
||||
.json(inputPath)
|
||||
.where("DOI is not null");
|
||||
|
||||
return getTextTextJavaPairRDD(
|
||||
collectedfrom, df.selectExpr("DOI", "Organizations as Matchings"), dataprovenance);
|
||||
|
||||
}
|
||||
|
||||
private static <I extends Result> JavaPairRDD<Text, Text> prepareAffiliationRelations(SparkSession spark,
|
||||
String inputPath,
|
||||
List<KeyValue> collectedfrom) {
|
||||
List<KeyValue> collectedfrom, String dataprovenance) {
|
||||
|
||||
// load and parse affiliation relations from HDFS
|
||||
Dataset<Row> df = spark
|
||||
.read()
|
||||
.schema("`DOI` STRING, `Matchings` ARRAY<STRUCT<`RORid`:STRING,`Confidence`:DOUBLE>>")
|
||||
.json(inputPath);
|
||||
.json(inputPath)
|
||||
.where("DOI is not null");
|
||||
|
||||
return getTextTextJavaPairRDD(collectedfrom, df, dataprovenance);
|
||||
}
|
||||
|
||||
private static <I extends Result> JavaPairRDD<Text, Text> prepareAffiliationRelationsNewModel(SparkSession spark,
|
||||
String inputPath,
|
||||
List<KeyValue> collectedfrom, String dataprovenance) {
|
||||
// load and parse affiliation relations from HDFS
|
||||
Dataset<Row> df = spark
|
||||
.read()
|
||||
.schema(
|
||||
"`DOI` STRING, `Matchings` ARRAY<STRUCT<`PID`:STRING, `Value`:STRING,`Confidence`:DOUBLE, `Status`:STRING>>")
|
||||
.json(inputPath)
|
||||
.where("DOI is not null");
|
||||
|
||||
return getTextTextJavaPairRDDNew(collectedfrom, df, dataprovenance);
|
||||
}
|
||||
|
||||
private static JavaPairRDD<Text, Text> getTextTextJavaPairRDD(List<KeyValue> collectedfrom, Dataset<Row> df,
|
||||
String dataprovenance) {
|
||||
// unroll nested arrays
|
||||
df = df
|
||||
.withColumn("matching", functions.explode(new Column("Matchings")))
|
||||
|
@ -137,7 +207,7 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
|
||||
// DOI to OpenAIRE id
|
||||
final String paperId = ID_PREFIX
|
||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", row.getAs("doi")));
|
||||
+ IdentifierFactory.md5(DoiCleaningRule.clean(removePrefix(row.getAs("doi"))));
|
||||
|
||||
// ROR id to OpenAIRE id
|
||||
final String affId = GenerateRorActionSetJob.calculateOpenaireId(row.getAs("rorid"));
|
||||
|
@ -153,7 +223,7 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
DataInfo dataInfo = OafMapperUtils
|
||||
.dataInfo(
|
||||
false,
|
||||
BIP_INFERENCE_PROVENANCE,
|
||||
dataprovenance,
|
||||
true,
|
||||
false,
|
||||
qualifier,
|
||||
|
@ -169,6 +239,70 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
new Text(OBJECT_MAPPER.writeValueAsString(aa))));
|
||||
}
|
||||
|
||||
private static JavaPairRDD<Text, Text> getTextTextJavaPairRDDNew(List<KeyValue> collectedfrom, Dataset<Row> df,
|
||||
String dataprovenance) {
|
||||
// unroll nested arrays
|
||||
df = df
|
||||
.withColumn("matching", functions.explode(new Column("Matchings")))
|
||||
.select(
|
||||
new Column("DOI").as("doi"),
|
||||
new Column("matching.PID").as("pidtype"),
|
||||
new Column("matching.Value").as("pidvalue"),
|
||||
new Column("matching.Confidence").as("confidence"),
|
||||
new Column("matching.Status").as("status"))
|
||||
.where("status = 'active'");
|
||||
|
||||
// prepare action sets for affiliation relations
|
||||
return df
|
||||
.toJavaRDD()
|
||||
.flatMap((FlatMapFunction<Row, Relation>) row -> {
|
||||
|
||||
// DOI to OpenAIRE id
|
||||
final String paperId = ID_PREFIX
|
||||
+ IdentifierFactory.md5(DoiCleaningRule.clean(removePrefix(row.getAs("doi"))));
|
||||
|
||||
// Organization to OpenAIRE identifier
|
||||
String affId = null;
|
||||
if (row.getAs("pidtype").equals("ROR"))
|
||||
// ROR id to OpenIARE id
|
||||
affId = GenerateRorActionSetJob.calculateOpenaireId(row.getAs("pidvalue"));
|
||||
else
|
||||
// getting the OpenOrgs identifier for the organization
|
||||
affId = row.getAs("pidvalue");
|
||||
|
||||
Qualifier qualifier = OafMapperUtils
|
||||
.qualifier(
|
||||
BIP_AFFILIATIONS_CLASSID,
|
||||
BIP_AFFILIATIONS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS);
|
||||
|
||||
// format data info; setting `confidence` into relation's `trust`
|
||||
DataInfo dataInfo = OafMapperUtils
|
||||
.dataInfo(
|
||||
false,
|
||||
dataprovenance,
|
||||
true,
|
||||
false,
|
||||
qualifier,
|
||||
Double.toString(row.getAs("confidence")));
|
||||
|
||||
// return bi-directional relations
|
||||
return getAffiliationRelationPair(paperId, affId, collectedfrom, dataInfo).iterator();
|
||||
|
||||
})
|
||||
.map(p -> new AtomicAction(Relation.class, p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))));
|
||||
}
|
||||
|
||||
private static String removePrefix(String doi) {
|
||||
if (doi.startsWith(DOI_URL_PREFIX))
|
||||
return doi.substring(DOI_URL_PREFIX_LENGTH);
|
||||
return doi;
|
||||
}
|
||||
|
||||
private static List<Relation> getAffiliationRelationPair(String paperId, String affId, List<KeyValue> collectedfrom,
|
||||
DataInfo dataInfo) {
|
||||
return Arrays
|
||||
|
|
|
@ -115,19 +115,7 @@ public class PrepareFOSSparkJob implements Serializable {
|
|||
.forEach(
|
||||
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
||||
r.setSubject(sbjs);
|
||||
r
|
||||
.setDataInfo(
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, true,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ModelConstants.PROVENANCE_ENRICH,
|
||||
null,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
null));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,26 +6,23 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.api.java.function.MapGroupsFunction;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SaveMode;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.model.SDGDataModel;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
import eu.dnetlib.dhp.schema.oaf.Subject;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
|
||||
public class PrepareSDGSparkJob implements Serializable {
|
||||
|
@ -52,54 +49,91 @@ public class PrepareSDGSparkJob implements Serializable {
|
|||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
final Boolean distributeDOI = Optional
|
||||
.ofNullable(parser.get("distributeDoi"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("distribute doi {}", distributeDOI);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
doPrepare(
|
||||
spark,
|
||||
sourcePath,
|
||||
if (distributeDOI)
|
||||
doPrepare(
|
||||
spark,
|
||||
sourcePath,
|
||||
|
||||
outputPath);
|
||||
else
|
||||
doPrepareoaid(spark, sourcePath, outputPath);
|
||||
|
||||
outputPath);
|
||||
});
|
||||
}
|
||||
|
||||
private static void doPrepare(SparkSession spark, String sourcePath, String outputPath) {
|
||||
Dataset<SDGDataModel> sdgDataset = readPath(spark, sourcePath, SDGDataModel.class);
|
||||
Dataset<Row> sdgDataset = spark
|
||||
.read()
|
||||
.format("csv")
|
||||
.option("sep", DEFAULT_DELIMITER)
|
||||
.option("inferSchema", "true")
|
||||
.option("header", "true")
|
||||
.option("quotes", "\"")
|
||||
.load(sourcePath);
|
||||
|
||||
sdgDataset
|
||||
.groupByKey((MapFunction<SDGDataModel, String>) r -> r.getDoi().toLowerCase(), Encoders.STRING())
|
||||
.mapGroups((MapGroupsFunction<String, SDGDataModel, Result>) (k, it) -> {
|
||||
Result r = new Result();
|
||||
r.setId(DHPUtils.generateUnresolvedIdentifier(k, DOI));
|
||||
SDGDataModel first = it.next();
|
||||
List<Subject> sbjs = new ArrayList<>();
|
||||
sbjs.add(getSubject(first.getSbj(), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID));
|
||||
it
|
||||
.forEachRemaining(
|
||||
s -> sbjs
|
||||
.add(getSubject(s.getSbj(), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID)));
|
||||
r.setSubject(sbjs);
|
||||
r
|
||||
.setDataInfo(
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, true,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ModelConstants.PROVENANCE_ENRICH,
|
||||
null,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
null));
|
||||
return r;
|
||||
}, Encoders.bean(Result.class))
|
||||
.groupByKey((MapFunction<Row, String>) v -> ((String) v.getAs("doi")).toLowerCase(), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Row, Result>) (k,
|
||||
it) -> getResult(
|
||||
DHPUtils
|
||||
.generateUnresolvedIdentifier(
|
||||
ModelSupport.entityIdPrefix.get(Result.class.getSimpleName().toLowerCase()) + "|" + k,
|
||||
DOI),
|
||||
it),
|
||||
Encoders.bean(Result.class))
|
||||
.write()
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath + "/sdg");
|
||||
}
|
||||
|
||||
private static void doPrepareoaid(SparkSession spark, String sourcePath, String outputPath) {
|
||||
Dataset<Row> sdgDataset = spark
|
||||
.read()
|
||||
.format("csv")
|
||||
.option("sep", DEFAULT_DELIMITER)
|
||||
.option("inferSchema", "true")
|
||||
.option("header", "true")
|
||||
.option("quotes", "\"")
|
||||
.load(sourcePath);
|
||||
;
|
||||
|
||||
sdgDataset
|
||||
.groupByKey((MapFunction<Row, String>) r -> "50|" + ((String) r.getAs("oaid")), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Row, Result>) PrepareSDGSparkJob::getResult, Encoders.bean(Result.class))
|
||||
.write()
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath + "/sdg");
|
||||
}
|
||||
|
||||
private static @NotNull Result getResult(String id, Iterator<Row> it) {
|
||||
Result r = new Result();
|
||||
r.setId(id);
|
||||
Row first = it.next();
|
||||
List<Subject> sbjs = new ArrayList<>();
|
||||
sbjs.add(getSubject(first.getAs("sdg"), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID));
|
||||
it
|
||||
.forEachRemaining(
|
||||
s -> sbjs
|
||||
.add(getSubject(s.getAs("sdg"), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID)));
|
||||
r.setSubject(sbjs);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -13,9 +13,6 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
|
@ -24,13 +21,9 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class CreateActionSetSparkJob implements Serializable {
|
||||
|
|
|
@ -46,6 +46,9 @@ public class GetOpenCitationsRefs implements Serializable {
|
|||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}", outputPath);
|
||||
|
||||
final String backupPath = parser.get("backupPath");
|
||||
log.info("backupPath {}", backupPath);
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("fs.defaultFS", hdfsNameNode);
|
||||
|
||||
|
@ -53,11 +56,11 @@ public class GetOpenCitationsRefs implements Serializable {
|
|||
|
||||
GetOpenCitationsRefs ocr = new GetOpenCitationsRefs();
|
||||
|
||||
ocr.doExtract(inputPath, outputPath, fileSystem);
|
||||
ocr.doExtract(inputPath, outputPath, backupPath, fileSystem);
|
||||
|
||||
}
|
||||
|
||||
private void doExtract(String inputPath, String outputPath, FileSystem fileSystem)
|
||||
private void doExtract(String inputPath, String outputPath, String backupPath, FileSystem fileSystem)
|
||||
throws IOException {
|
||||
|
||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||
|
@ -89,6 +92,7 @@ public class GetOpenCitationsRefs implements Serializable {
|
|||
}
|
||||
|
||||
}
|
||||
fileSystem.rename(fileStatus.getPath(), new Path(backupPath));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -107,7 +107,8 @@ public class ReadCOCI implements Serializable {
|
|||
.mode(SaveMode.Append)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath);
|
||||
fileSystem.rename(fileStatus.getPath(), new Path("/tmp/miriam/OC/DONE"));
|
||||
|
||||
fileSystem.delete(fileStatus.getPath());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class Couples implements Serializable {
|
||||
Person p;
|
||||
Relation r;
|
||||
|
||||
public Couples() {
|
||||
|
||||
}
|
||||
|
||||
public Person getP() {
|
||||
return p;
|
||||
}
|
||||
|
||||
public void setP(Person p) {
|
||||
this.p = p;
|
||||
}
|
||||
|
||||
public Relation getR() {
|
||||
return r;
|
||||
}
|
||||
|
||||
public void setR(Relation r) {
|
||||
this.r = r;
|
||||
}
|
||||
|
||||
public static <Tuples> Couples newInstance(Tuple2<Person, Relation> couple) {
|
||||
Couples c = new Couples();
|
||||
c.p = couple._1();
|
||||
c.r = couple._2();
|
||||
return c;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,527 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Serializable;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.*;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Author;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Employment;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Work;
|
||||
import eu.dnetlib.dhp.common.DbClient;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.common.person.CoAuthorshipIterator;
|
||||
import eu.dnetlib.dhp.common.person.Coauthors;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidCleaner;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidType;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class ExtractPerson implements Serializable {
|
||||
private static final Logger log = LoggerFactory.getLogger(ExtractPerson.class);
|
||||
private static final String QUERY = "SELECT * FROM project_person WHERE pid_type = 'ORCID'";
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final String OPENAIRE_PREFIX = "openaire____";
|
||||
private static final String SEPARATOR = "::";
|
||||
private static final String orcidKey = "10|" + OPENAIRE_PREFIX + SEPARATOR
|
||||
+ DHPUtils.md5(ModelConstants.ORCID.toLowerCase());
|
||||
|
||||
private static final String DOI_PREFIX = "50|doi_________::";
|
||||
|
||||
private static final String PMID_PREFIX = "50|pmid________::";
|
||||
private static final String ARXIV_PREFIX = "50|arXiv_______::";
|
||||
|
||||
private static final String PMCID_PREFIX = "50|pmcid_______::";
|
||||
private static final String ROR_PREFIX = "20|ror_________::";
|
||||
private static final String PERSON_PREFIX = ModelSupport.getIdPrefix(Person.class)
|
||||
+ IdentifierFactory.ID_PREFIX_SEPARATOR + ModelConstants.ORCID + "_______";
|
||||
private static final String PROJECT_ID_PREFIX = ModelSupport.getIdPrefix(Project.class)
|
||||
+ IdentifierFactory.ID_PREFIX_SEPARATOR;
|
||||
|
||||
public static final String ORCID_AUTHORS_CLASSID = "sysimport:crosswalk:orcid";
|
||||
public static final String ORCID_AUTHORS_CLASSNAME = "Imported from ORCID";
|
||||
public static final String FUNDER_AUTHORS_CLASSID = "sysimport:crosswalk:funderdatabase";
|
||||
public static final String FUNDER_AUTHORS_CLASSNAME = "Imported from Funder Database";
|
||||
public static final String OPENAIRE_DATASOURCE_ID = "10|infrastruct_::f66f1bd369679b5b077dcdf006089556";
|
||||
public static final String OPENAIRE_DATASOURCE_NAME = "OpenAIRE";
|
||||
|
||||
public static List<KeyValue> collectedfromOpenAIRE = OafMapperUtils
|
||||
.listKeyValues(OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
|
||||
|
||||
public static final DataInfo ORCIDDATAINFO = OafMapperUtils
|
||||
.dataInfo(
|
||||
false,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID,
|
||||
ORCID_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91");
|
||||
|
||||
public static final DataInfo FUNDERDATAINFO = OafMapperUtils
|
||||
.dataInfo(
|
||||
false,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
FUNDER_AUTHORS_CLASSID,
|
||||
FUNDER_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91");
|
||||
|
||||
public static void main(final String[] args) throws IOException, ParseException {
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
Objects
|
||||
.requireNonNull(
|
||||
ExtractPerson.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/personentity/as_parameters.json"))));
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
final String inputPath = parser.get("inputPath");
|
||||
log.info("inputPath {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}", outputPath);
|
||||
|
||||
final String workingDir = parser.get("workingDir");
|
||||
log.info("workingDir {}", workingDir);
|
||||
|
||||
final String dbUrl = parser.get("postgresUrl");
|
||||
final String dbUser = parser.get("postgresUser");
|
||||
final String dbPassword = parser.get("postgresPassword");
|
||||
|
||||
final String hdfsNameNode = parser.get("hdfsNameNode");
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
|
||||
extractInfoForActionSetFromORCID(spark, inputPath, workingDir);
|
||||
extractInfoForActionSetFromProjects(
|
||||
spark, inputPath, workingDir, dbUrl, dbUser, dbPassword, workingDir + "/project", hdfsNameNode);
|
||||
createActionSet(spark, outputPath, workingDir);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
private static void extractInfoForActionSetFromProjects(SparkSession spark, String inputPath, String workingDir,
|
||||
String dbUrl, String dbUser, String dbPassword, String hdfsPath, String hdfsNameNode) throws IOException {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("fs.defaultFS", hdfsNameNode);
|
||||
|
||||
FileSystem fileSystem = FileSystem.get(conf);
|
||||
Path hdfsWritePath = new Path(hdfsPath);
|
||||
FSDataOutputStream fos = fileSystem.create(hdfsWritePath);
|
||||
try (DbClient dbClient = new DbClient(dbUrl, dbUser, dbPassword)) {
|
||||
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(fos, StandardCharsets.UTF_8))) {
|
||||
dbClient.processResults(QUERY, rs -> writeRelation(getRelationWithProject(rs), writer));
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static Relation getRelationWithProject(ResultSet rs) {
|
||||
try {
|
||||
return getProjectRelation(
|
||||
rs.getString("project"), rs.getString("pid"),
|
||||
rs.getString("role"));
|
||||
} catch (final SQLException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static Relation getProjectRelation(String project, String orcid, String role) {
|
||||
|
||||
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid);
|
||||
String target = PROJECT_ID_PREFIX + StringUtils.substringBefore(project, "::") + "::"
|
||||
+ IdentifierFactory.md5(StringUtils.substringAfter(project, "::"));
|
||||
List<KeyValue> properties = new ArrayList<>();
|
||||
|
||||
Relation relation = OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.PROJECT_PERSON_RELTYPE, ModelConstants.PROJECT_PERSON_SUBRELTYPE,
|
||||
ModelConstants.PROJECT_PERSON_PARTICIPATES,
|
||||
collectedfromOpenAIRE,
|
||||
FUNDERDATAINFO,
|
||||
null);
|
||||
relation.setValidated(true);
|
||||
|
||||
if (StringUtils.isNotBlank(role)) {
|
||||
KeyValue kv = new KeyValue();
|
||||
kv.setKey("role");
|
||||
kv.setValue(role);
|
||||
properties.add(kv);
|
||||
}
|
||||
|
||||
if (!properties.isEmpty())
|
||||
relation.setProperties(properties);
|
||||
return relation;
|
||||
|
||||
}
|
||||
|
||||
protected static void writeRelation(final Relation relation, BufferedWriter writer) {
|
||||
try {
|
||||
writer.write(OBJECT_MAPPER.writeValueAsString(relation));
|
||||
writer.newLine();
|
||||
} catch (final IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static void createActionSet(SparkSession spark, String outputPath, String workingDir) {
|
||||
|
||||
Dataset<Person> people;
|
||||
people = spark
|
||||
.read()
|
||||
.textFile(workingDir + "/people")
|
||||
.map(
|
||||
(MapFunction<String, Person>) value -> OBJECT_MAPPER
|
||||
.readValue(value, Person.class),
|
||||
Encoders.bean(Person.class));
|
||||
|
||||
people
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/authorship").toJavaRDD().map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/coauthorship")
|
||||
.toJavaRDD()
|
||||
.map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/affiliation")
|
||||
.toJavaRDD()
|
||||
.map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/project")
|
||||
.toJavaRDD()
|
||||
.map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
}
|
||||
|
||||
private static void extractInfoForActionSetFromORCID(SparkSession spark, String inputPath, String workingDir) {
|
||||
Dataset<Author> authors = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Authors")
|
||||
.as(Encoders.bean(Author.class));
|
||||
|
||||
Dataset<Work> works = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Works")
|
||||
.as(Encoders.bean(Work.class))
|
||||
.filter(
|
||||
(FilterFunction<Work>) w -> Optional.ofNullable(w.getPids()).isPresent() &&
|
||||
w
|
||||
.getPids()
|
||||
.stream()
|
||||
.anyMatch(
|
||||
p -> p.getSchema().equalsIgnoreCase("doi") ||
|
||||
p.getSchema().equalsIgnoreCase("pmc") ||
|
||||
p.getSchema().equalsIgnoreCase("pmid") ||
|
||||
p.getSchema().equalsIgnoreCase("arxiv")));
|
||||
|
||||
Dataset<Employment> employmentDataset = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Employments")
|
||||
.as(Encoders.bean(Employment.class));
|
||||
|
||||
Dataset<Employment> employment = employmentDataset
|
||||
.joinWith(authors, employmentDataset.col("orcid").equalTo(authors.col("orcid")))
|
||||
.map((MapFunction<Tuple2<Employment, Author>, Employment>) t2 -> t2._1(), Encoders.bean(Employment.class));
|
||||
|
||||
// Mapping all the orcid profiles even if the profile has no visible works
|
||||
|
||||
authors.map((MapFunction<Author, Person>) op -> {
|
||||
Person person = new Person();
|
||||
person.setId(DHPUtils.generateIdentifier(op.getOrcid(), PERSON_PREFIX));
|
||||
person
|
||||
.setBiography(
|
||||
Optional
|
||||
.ofNullable(op.getBiography())
|
||||
|
||||
.orElse(""));
|
||||
KeyValue kv = OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS);
|
||||
kv.setDataInfo(null);
|
||||
person.setCollectedfrom(Arrays.asList(kv));
|
||||
person
|
||||
.setAlternativeNames(
|
||||
Optional
|
||||
.ofNullable(op.getOtherNames())
|
||||
|
||||
.orElse(new ArrayList<>()));
|
||||
person
|
||||
.setFamilyName(
|
||||
Optional
|
||||
.ofNullable(op.getFamilyName())
|
||||
|
||||
.orElse(""));
|
||||
person
|
||||
.setGivenName(
|
||||
Optional
|
||||
.ofNullable(op.getGivenName())
|
||||
|
||||
.orElse(""));
|
||||
person
|
||||
.setPid(
|
||||
Optional
|
||||
.ofNullable(op.getOtherPids())
|
||||
.map(
|
||||
v -> v
|
||||
.stream()
|
||||
.map(
|
||||
p -> OafMapperUtils
|
||||
.structuredProperty(
|
||||
p.getValue(), p.getSchema(), p.getSchema(), ModelConstants.DNET_PID_TYPES,
|
||||
ModelConstants.DNET_PID_TYPES, null))
|
||||
.collect(Collectors.toList()))
|
||||
.orElse(new ArrayList<>()));
|
||||
person
|
||||
.getPid()
|
||||
.add(
|
||||
OafMapperUtils
|
||||
.structuredProperty(
|
||||
op.getOrcid(), ModelConstants.ORCID, ModelConstants.ORCID_CLASSNAME,
|
||||
ModelConstants.DNET_PID_TYPES, ModelConstants.DNET_PID_TYPES,
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY,
|
||||
ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY,
|
||||
ModelConstants.DNET_PID_TYPES,
|
||||
ModelConstants.DNET_PID_TYPES),
|
||||
"0.91")));
|
||||
person.setDateofcollection(op.getLastModifiedDate());
|
||||
person.setOriginalId(Arrays.asList(op.getOrcid()));
|
||||
person.setDataInfo(ORCIDDATAINFO);
|
||||
return person;
|
||||
}, Encoders.bean(Person.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/people");
|
||||
|
||||
works
|
||||
.flatMap(
|
||||
(FlatMapFunction<Work, Relation>) ExtractPerson::getAuthorshipRelationIterator,
|
||||
Encoders.bean(Relation.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/authorship");
|
||||
|
||||
Dataset<Relation> coauthorship = works
|
||||
.flatMap((FlatMapFunction<Work, Tuple2<String, String>>) w -> {
|
||||
List<Tuple2<String, String>> lista = new ArrayList<>();
|
||||
w.getPids().stream().forEach(p -> {
|
||||
if (p.getSchema().equalsIgnoreCase("doi") || p.getSchema().equalsIgnoreCase("pmc")
|
||||
|| p.getSchema().equalsIgnoreCase("pmid") || p.getSchema().equalsIgnoreCase("arxiv"))
|
||||
lista.add(new Tuple2<>(p.getValue(), w.getOrcid()));
|
||||
});
|
||||
return lista.iterator();
|
||||
}, Encoders.tuple(Encoders.STRING(), Encoders.STRING()))
|
||||
.groupByKey((MapFunction<Tuple2<String, String>, String>) Tuple2::_1, Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Tuple2<String, String>, Coauthors>) (k, it) -> extractCoAuthors(it),
|
||||
Encoders.bean(Coauthors.class))
|
||||
.flatMap(
|
||||
(FlatMapFunction<Coauthors, Relation>) c -> new CoAuthorshipIterator(c.getCoauthors()),
|
||||
Encoders.bean(Relation.class))
|
||||
.groupByKey((MapFunction<Relation, String>) r -> r.getSource() + r.getTarget(), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Relation, Relation>) (k, it) -> it.next(), Encoders.bean(Relation.class));
|
||||
|
||||
coauthorship
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/coauthorship");
|
||||
|
||||
employment
|
||||
.filter((FilterFunction<Employment>) e -> Optional.ofNullable(e.getAffiliationId()).isPresent())
|
||||
.filter((FilterFunction<Employment>) e -> e.getAffiliationId().getSchema().equalsIgnoreCase("ror"))
|
||||
.map(
|
||||
(MapFunction<Employment, Relation>) ExtractPerson::getAffiliationRelation,
|
||||
Encoders.bean(Relation.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/affiliation");
|
||||
}
|
||||
|
||||
private static Dataset<Relation> getRelations(SparkSession spark, String path) {
|
||||
return spark
|
||||
.read()
|
||||
.textFile(path)
|
||||
.map(
|
||||
(MapFunction<String, Relation>) value -> OBJECT_MAPPER
|
||||
.readValue(value, Relation.class),
|
||||
Encoders.bean(Relation.class));// spark.read().json(path).as(Encoders.bean(Relation.class));
|
||||
}
|
||||
|
||||
private static Coauthors extractCoAuthors(Iterator<Tuple2<String, String>> it) {
|
||||
Coauthors coauth = new Coauthors();
|
||||
List<String> coauthors = new ArrayList<>();
|
||||
while (it.hasNext())
|
||||
coauthors.add(it.next()._2());
|
||||
coauth.setCoauthors(coauthors);
|
||||
|
||||
return coauth;
|
||||
}
|
||||
|
||||
private static Relation getAffiliationRelation(Employment row) {
|
||||
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(row.getOrcid());
|
||||
String target = ROR_PREFIX
|
||||
+ IdentifierFactory.md5(PidCleaner.normalizePidValue("ROR", row.getAffiliationId().getValue()));
|
||||
List<KeyValue> properties = new ArrayList<>();
|
||||
|
||||
Relation relation = OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.ORG_PERSON_RELTYPE, ModelConstants.ORG_PERSON_SUBRELTYPE,
|
||||
ModelConstants.ORG_PERSON_PARTICIPATES,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
ORCIDDATAINFO,
|
||||
null);
|
||||
relation.setValidated(true);
|
||||
|
||||
if (Optional.ofNullable(row.getStartDate()).isPresent() && StringUtils.isNotBlank(row.getStartDate())) {
|
||||
KeyValue kv = new KeyValue();
|
||||
kv.setKey("startDate");
|
||||
kv.setValue(row.getStartDate());
|
||||
properties.add(kv);
|
||||
}
|
||||
if (Optional.ofNullable(row.getEndDate()).isPresent() && StringUtils.isNotBlank(row.getEndDate())) {
|
||||
KeyValue kv = new KeyValue();
|
||||
kv.setKey("endDate");
|
||||
kv.setValue(row.getEndDate());
|
||||
properties.add(kv);
|
||||
}
|
||||
|
||||
if (properties.size() > 0)
|
||||
relation.setProperties(properties);
|
||||
return relation;
|
||||
|
||||
}
|
||||
|
||||
private static @NotNull Iterator<Relation> getAuthorshipRelationIterator(Work w) {
|
||||
|
||||
if (Optional.ofNullable(w.getPids()).isPresent())
|
||||
return w
|
||||
.getPids()
|
||||
.stream()
|
||||
.map(pid -> getRelation(w.getOrcid(), pid))
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toList())
|
||||
.iterator();
|
||||
List<Relation> ret = new ArrayList<>();
|
||||
return ret.iterator();
|
||||
}
|
||||
|
||||
private static Relation getRelation(String orcid, eu.dnetlib.dhp.collection.orcid.model.Pid pid) {
|
||||
String target;
|
||||
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid);
|
||||
switch (pid.getSchema()) {
|
||||
case "doi":
|
||||
target = DOI_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), pid.getValue()));
|
||||
break;
|
||||
case "pmid":
|
||||
target = PMID_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), pid.getValue()));
|
||||
break;
|
||||
case "arxiv":
|
||||
target = ARXIV_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.arXiv.toString(), pid.getValue()));
|
||||
break;
|
||||
case "pmcid":
|
||||
target = PMCID_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.pmc.toString(), pid.getValue()));
|
||||
break;
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
Relation relation = OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.RESULT_PERSON_RELTYPE,
|
||||
ModelConstants.RESULT_PERSON_SUBRELTYPE,
|
||||
ModelConstants.RESULT_PERSON_HASAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
ORCIDDATAINFO,
|
||||
null);
|
||||
relation.setValidated(true);
|
||||
return relation;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Work;
|
||||
|
||||
public class WorkList implements Serializable {
|
||||
private ArrayList<Work> workArrayList;
|
||||
|
||||
public ArrayList<Work> getWorkArrayList() {
|
||||
return workArrayList;
|
||||
}
|
||||
|
||||
public void setWorkArrayList(ArrayList<Work> workArrayList) {
|
||||
this.workArrayList = workArrayList;
|
||||
}
|
||||
|
||||
public WorkList() {
|
||||
workArrayList = new ArrayList<>();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.raid;
|
||||
|
||||
import static eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson.OPENAIRE_DATASOURCE_ID;
|
||||
import static eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson.OPENAIRE_DATASOURCE_NAME;
|
||||
import static eu.dnetlib.dhp.common.Constants.*;
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import static eu.dnetlib.dhp.schema.common.ModelConstants.*;
|
||||
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.*;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.raid.model.RAiDEntity;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class GenerateRAiDActionSetJob {
|
||||
|
||||
private static final Logger log = LoggerFactory
|
||||
.getLogger(eu.dnetlib.dhp.actionmanager.raid.GenerateRAiDActionSetJob.class);
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
private static final List<KeyValue> RAID_COLLECTED_FROM = listKeyValues(
|
||||
OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
|
||||
|
||||
private static final Qualifier RAID_QUALIFIER = qualifier(
|
||||
"0049", "Research Activity Identifier", DNET_PUBLICATION_RESOURCE, DNET_PUBLICATION_RESOURCE);
|
||||
|
||||
private static final Qualifier RAID_INFERENCE_QUALIFIER = qualifier(
|
||||
"raid:openaireinference", "Inferred by OpenAIRE", DNET_PROVENANCE_ACTIONS, DNET_PROVENANCE_ACTIONS);
|
||||
|
||||
private static final DataInfo RAID_DATA_INFO = dataInfo(
|
||||
false, OPENAIRE_DATASOURCE_NAME, true, false, RAID_INFERENCE_QUALIFIER, "0.92");
|
||||
|
||||
public static void main(final String[] args) throws Exception {
|
||||
|
||||
final String jsonConfiguration = IOUtils
|
||||
.toString(
|
||||
eu.dnetlib.dhp.actionmanager.raid.GenerateRAiDActionSetJob.class
|
||||
.getResourceAsStream("/eu/dnetlib/dhp/actionmanager/raid/action_set_parameters.json"));
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
final Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
final String inputPath = parser.get("inputPath");
|
||||
log.info("inputPath: {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}: ", outputPath);
|
||||
|
||||
final SparkConf conf = new SparkConf();
|
||||
|
||||
runWithSparkSession(conf, isSparkSessionManaged, spark -> {
|
||||
removeOutputDir(spark, outputPath);
|
||||
processRAiDEntities(spark, inputPath, outputPath);
|
||||
});
|
||||
}
|
||||
|
||||
private static void removeOutputDir(final SparkSession spark, final String path) {
|
||||
HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration());
|
||||
}
|
||||
|
||||
static void processRAiDEntities(final SparkSession spark,
|
||||
final String inputPath,
|
||||
final String outputPath) {
|
||||
readInputPath(spark, inputPath)
|
||||
.map(GenerateRAiDActionSetJob::prepareRAiD)
|
||||
.flatMap(List::iterator)
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
|
||||
|
||||
}
|
||||
|
||||
protected static List<AtomicAction<? extends Oaf>> prepareRAiD(final RAiDEntity r) {
|
||||
|
||||
final Date now = new Date();
|
||||
final OtherResearchProduct orp = new OtherResearchProduct();
|
||||
final List<AtomicAction<? extends Oaf>> res = new ArrayList<>();
|
||||
String raidId = calculateOpenaireId(r.getRaid());
|
||||
|
||||
orp.setId(raidId);
|
||||
orp.setCollectedfrom(RAID_COLLECTED_FROM);
|
||||
orp.setDataInfo(RAID_DATA_INFO);
|
||||
orp
|
||||
.setTitle(
|
||||
Collections
|
||||
.singletonList(
|
||||
structuredProperty(
|
||||
r.getTitle(),
|
||||
qualifier("main title", "main title", DNET_DATACITE_TITLE, DNET_DATACITE_TITLE),
|
||||
RAID_DATA_INFO)));
|
||||
orp.setDescription(listFields(RAID_DATA_INFO, r.getSummary()));
|
||||
|
||||
Instance instance = new Instance();
|
||||
instance.setInstancetype(RAID_QUALIFIER);
|
||||
orp.setInstance(Collections.singletonList(instance));
|
||||
orp
|
||||
.setSubject(
|
||||
r
|
||||
.getSubjects()
|
||||
.stream()
|
||||
.map(
|
||||
s -> subject(
|
||||
s,
|
||||
qualifier(
|
||||
DNET_SUBJECT_KEYWORD, DNET_SUBJECT_KEYWORD, DNET_SUBJECT_TYPOLOGIES,
|
||||
DNET_SUBJECT_TYPOLOGIES),
|
||||
RAID_DATA_INFO))
|
||||
.collect(Collectors.toList()));
|
||||
orp
|
||||
.setRelevantdate(
|
||||
Arrays
|
||||
.asList(
|
||||
structuredProperty(
|
||||
r.getEndDate(), qualifier(END_DATE, END_DATE, DNET_DATACITE_DATE, DNET_DATACITE_DATE),
|
||||
RAID_DATA_INFO),
|
||||
structuredProperty(
|
||||
r.getStartDate(),
|
||||
qualifier(START_DATE, START_DATE, DNET_DATACITE_DATE, DNET_DATACITE_DATE),
|
||||
RAID_DATA_INFO)));
|
||||
orp.setLastupdatetimestamp(now.getTime());
|
||||
orp.setDateofacceptance(field(r.getStartDate(), RAID_DATA_INFO));
|
||||
|
||||
res.add(new AtomicAction<>(OtherResearchProduct.class, orp));
|
||||
|
||||
for (String resultId : r.getIds()) {
|
||||
Relation rel1 = OafMapperUtils
|
||||
.getRelation(
|
||||
raidId,
|
||||
resultId,
|
||||
ModelConstants.RESULT_RESULT,
|
||||
PART,
|
||||
HAS_PART,
|
||||
orp);
|
||||
Relation rel2 = OafMapperUtils
|
||||
.getRelation(
|
||||
resultId,
|
||||
raidId,
|
||||
ModelConstants.RESULT_RESULT,
|
||||
PART,
|
||||
IS_PART_OF,
|
||||
orp);
|
||||
res.add(new AtomicAction<>(Relation.class, rel1));
|
||||
res.add(new AtomicAction<>(Relation.class, rel2));
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
public static String calculateOpenaireId(final String raid) {
|
||||
return String.format("50|%s::%s", RAID_NS_PREFIX, DHPUtils.md5(raid));
|
||||
}
|
||||
|
||||
public static List<Author> createAuthors(final List<String> author) {
|
||||
return author.stream().map(s -> {
|
||||
Author a = new Author();
|
||||
a.setFullname(s);
|
||||
return a;
|
||||
}).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private static JavaRDD<RAiDEntity> readInputPath(
|
||||
final SparkSession spark,
|
||||
final String path) {
|
||||
|
||||
return spark
|
||||
.read()
|
||||
.json(path)
|
||||
.as(Encoders.bean(RAiDEntity.class))
|
||||
.toJavaRDD();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.raid.model;
|
||||
|
||||
public class GenerateRAiDActionSetJob {
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.raid.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
public class RAiDEntity implements Serializable {
|
||||
|
||||
String raid;
|
||||
List<String> authors;
|
||||
String startDate;
|
||||
String endDate;
|
||||
List<String> subjects;
|
||||
List<String> titles;
|
||||
List<String> ids;
|
||||
String title;
|
||||
String summary;
|
||||
|
||||
public RAiDEntity() {
|
||||
}
|
||||
|
||||
public RAiDEntity(String raid, List<String> authors, String startDate, String endDate, List<String> subjects,
|
||||
List<String> titles, List<String> ids, String title, String summary) {
|
||||
this.raid = raid;
|
||||
this.authors = authors;
|
||||
this.startDate = startDate;
|
||||
this.endDate = endDate;
|
||||
this.subjects = subjects;
|
||||
this.titles = titles;
|
||||
this.ids = ids;
|
||||
this.title = title;
|
||||
this.summary = summary;
|
||||
}
|
||||
|
||||
public String getRaid() {
|
||||
return raid;
|
||||
}
|
||||
|
||||
public void setRaid(String raid) {
|
||||
this.raid = raid;
|
||||
}
|
||||
|
||||
public List<String> getAuthors() {
|
||||
return authors;
|
||||
}
|
||||
|
||||
public void setAuthors(List<String> authors) {
|
||||
this.authors = authors;
|
||||
}
|
||||
|
||||
public String getStartDate() {
|
||||
return startDate;
|
||||
}
|
||||
|
||||
public void setStartDate(String startDate) {
|
||||
this.startDate = startDate;
|
||||
}
|
||||
|
||||
public String getEndDate() {
|
||||
return endDate;
|
||||
}
|
||||
|
||||
public void setEndDate(String endDate) {
|
||||
this.endDate = endDate;
|
||||
}
|
||||
|
||||
public List<String> getSubjects() {
|
||||
return subjects;
|
||||
}
|
||||
|
||||
public void setSubjects(List<String> subjects) {
|
||||
this.subjects = subjects;
|
||||
}
|
||||
|
||||
public List<String> getTitles() {
|
||||
return titles;
|
||||
}
|
||||
|
||||
public void setTitles(List<String> titles) {
|
||||
this.titles = titles;
|
||||
}
|
||||
|
||||
public List<String> getIds() {
|
||||
return ids;
|
||||
}
|
||||
|
||||
public void setIds(List<String> ids) {
|
||||
this.ids = ids;
|
||||
}
|
||||
|
||||
public String getTitle() {
|
||||
return title;
|
||||
}
|
||||
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
public String getSummary() {
|
||||
return summary;
|
||||
}
|
||||
|
||||
public void setSummary(String summary) {
|
||||
this.summary = summary;
|
||||
}
|
||||
}
|
|
@ -44,13 +44,7 @@ import eu.dnetlib.dhp.common.Constants;
|
|||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.DataInfo;
|
||||
import eu.dnetlib.dhp.schema.oaf.Field;
|
||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
||||
import eu.dnetlib.dhp.schema.oaf.Organization;
|
||||
import eu.dnetlib.dhp.schema.oaf.Qualifier;
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
import eu.dnetlib.dhp.schema.oaf.*;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
import scala.Tuple2;
|
||||
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.sdgnodoi;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.fs.Hdfs;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class CreateActionSetSparkJob implements Serializable {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(final String[] args) throws IOException, ParseException {
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
Objects
|
||||
.requireNonNull(
|
||||
CreateActionSetSparkJob.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/fosnodoi/as_parameters.json"))));
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
final String inputPath = parser.get("sourcePath");
|
||||
log.info("inputPath {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}", outputPath);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
|
||||
createActionSet(spark, inputPath, outputPath);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
private static void createActionSet(SparkSession spark, String inputPath, String outputPath) {
|
||||
spark
|
||||
.read()
|
||||
.textFile(inputPath)
|
||||
.map(
|
||||
(MapFunction<String, Result>) value -> OBJECT_MAPPER.readValue(value, Result.class),
|
||||
Encoders.bean(Result.class))
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
}
|
||||
|
||||
}
|
|
@ -5,11 +5,10 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
|
@ -21,6 +20,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.Constants;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
|
@ -44,8 +44,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
private static final String PMID_PREFIX = "50|pmid________::";
|
||||
|
||||
private static final String PMCID_PREFIX = "50|pmc_________::";
|
||||
private static final String WEB_CRAWL_ID = "10|openaire____::fb98a192f6a055ba495ef414c330834b";
|
||||
private static final String WEB_CRAWL_NAME = "Web Crawl";
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -104,8 +103,6 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
final String ror = ROR_PREFIX
|
||||
+ IdentifierFactory.md5(PidCleaner.normalizePidValue("ROR", row.getAs("ror")));
|
||||
ret.addAll(createAffiliationRelationPairDOI(row.getAs("doi"), ror));
|
||||
ret.addAll(createAffiliationRelationPairPMID(row.getAs("pmid"), ror));
|
||||
ret.addAll(createAffiliationRelationPairPMCID(row.getAs("pmcid"), ror));
|
||||
|
||||
return ret
|
||||
.iterator();
|
||||
|
@ -115,7 +112,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
|
||||
}
|
||||
|
||||
|
@ -139,8 +136,9 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
"institution", functions
|
||||
.explode(
|
||||
functions.col("institutions")))
|
||||
|
||||
.selectExpr(
|
||||
"id", "doi", "ids.pmcid as pmcid", "ids.pmid as pmid", "institution.ror as ror",
|
||||
"id", "doi", "institution.ror as ror",
|
||||
"institution.country_code as country_code", "publication_year")
|
||||
.distinct();
|
||||
|
||||
|
@ -150,8 +148,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
|
||||
return spark
|
||||
.read()
|
||||
.option("header", true)
|
||||
.csv(inputPath)
|
||||
.json(inputPath)
|
||||
.select("OpenAlexId");
|
||||
}
|
||||
|
||||
|
@ -214,7 +211,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
ModelConstants.IS_AUTHOR_INSTITUTION_OF,
|
||||
Arrays
|
||||
.asList(
|
||||
OafMapperUtils.keyValue(WEB_CRAWL_ID, WEB_CRAWL_NAME)),
|
||||
OafMapperUtils.keyValue(Constants.WEB_CRAWL_ID, Constants.WEB_CRAWL_NAME)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
|
@ -233,7 +230,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
ModelConstants.HAS_AUTHOR_INSTITUTION,
|
||||
Arrays
|
||||
.asList(
|
||||
OafMapperUtils.keyValue(WEB_CRAWL_ID, WEB_CRAWL_NAME)),
|
||||
OafMapperUtils.keyValue(Constants.WEB_CRAWL_ID, Constants.WEB_CRAWL_NAME)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
|
|
|
@ -0,0 +1,158 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.webcrawl;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.Serializable;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.io.filefilter.DirectoryFileFilter;
|
||||
import org.apache.commons.io.filefilter.FileFileFilter;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.types.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class RemoveRelationFromActionSet
|
||||
implements Serializable {
|
||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetFromWebEntries.class);
|
||||
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
private static final StructType KV_SCHEMA = StructType$.MODULE$
|
||||
.apply(
|
||||
Arrays
|
||||
.asList(
|
||||
StructField$.MODULE$.apply("key", DataTypes.StringType, false, Metadata.empty()),
|
||||
StructField$.MODULE$.apply("value", DataTypes.StringType, false, Metadata.empty())));
|
||||
|
||||
private static final StructType ATOMIC_ACTION_SCHEMA = StructType$.MODULE$
|
||||
.apply(
|
||||
Arrays
|
||||
.asList(
|
||||
StructField$.MODULE$.apply("clazz", DataTypes.StringType, false, Metadata.empty()),
|
||||
StructField$.MODULE$
|
||||
.apply(
|
||||
"payload", DataTypes.StringType, false, Metadata.empty())));
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String jsonConfiguration = IOUtils
|
||||
.toString(
|
||||
CreateActionSetFromWebEntries.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/as_parameters.json"));
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
// the actionSet path
|
||||
final String inputPath = parser.get("sourcePath");
|
||||
log.info("inputPath: {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
final String blackListInputPath = parser.get("blackListPath");
|
||||
log.info("blackListInputPath: {}", blackListInputPath);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
|
||||
removeFromActionSet(spark, inputPath, outputPath, blackListInputPath);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private static void removeFromActionSet(SparkSession spark, String inputPath, String outputPath,
|
||||
String blackListInputPath) {
|
||||
// read the blacklist
|
||||
Dataset<String> blackList = readBlackList(spark, blackListInputPath)
|
||||
.map(
|
||||
(MapFunction<Row, String>) r -> IdentifierFactory
|
||||
.idFromPid("50", "doi", ((String) r.getAs("doi")).substring(16), true),
|
||||
Encoders.STRING());
|
||||
|
||||
// read the old actionset and get the relations in the payload
|
||||
JavaPairRDD<Text, Text> seq = JavaSparkContext
|
||||
.fromSparkContext(spark.sparkContext())
|
||||
.sequenceFile(inputPath, Text.class, Text.class);
|
||||
|
||||
JavaRDD<Row> rdd = seq
|
||||
.map(x -> RowFactory.create(x._1().toString(), x._2().toString()));
|
||||
|
||||
Dataset<Row> actionSet = spark
|
||||
.createDataFrame(rdd, KV_SCHEMA)
|
||||
.withColumn("atomic_action", from_json(col("value"), ATOMIC_ACTION_SCHEMA))
|
||||
.select(expr("atomic_action.*"));
|
||||
|
||||
Dataset<Relation> relation = actionSet
|
||||
.map(
|
||||
(MapFunction<Row, Relation>) r -> MAPPER.readValue((String) r.getAs("payload"), Relation.class),
|
||||
Encoders.bean(Relation.class));
|
||||
|
||||
// select only the relation not matching any pid in the blacklist as source for the relation
|
||||
Dataset<Relation> relNoSource = relation
|
||||
.joinWith(blackList, relation.col("source").equalTo(blackList.col("value")), "left")
|
||||
.filter((FilterFunction<Tuple2<Relation, String>>) t2 -> t2._2() == null)
|
||||
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
||||
|
||||
// select only the relation not matching any pid in the blacklist as target of the relation
|
||||
relNoSource
|
||||
.joinWith(blackList, relNoSource.col("target").equalTo(blackList.col("value")), "left")
|
||||
.filter((FilterFunction<Tuple2<Relation, String>>) t2 -> t2._2() == null)
|
||||
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
;
|
||||
|
||||
}
|
||||
|
||||
private static Dataset<Row> readBlackList(SparkSession spark, String inputPath) {
|
||||
|
||||
return spark
|
||||
.read()
|
||||
.json(inputPath)
|
||||
.select("doi");
|
||||
}
|
||||
|
||||
}
|
|
@ -22,10 +22,13 @@ import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
|||
import eu.dnetlib.dhp.collection.plugin.base.BaseCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.file.FileCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.file.FileGZipCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.gtr2.Gtr2PublicationsCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.mongodb.MDStoreCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.mongodb.MongoDbDumpCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.oai.OaiCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.osf.OsfPreprintsCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.rest.RestCollectorPlugin;
|
||||
import eu.dnetlib.dhp.collection.plugin.zenodo.CollectZenodoDumpCollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
|
@ -58,7 +61,7 @@ public class CollectorWorker extends ReportingJob {
|
|||
|
||||
public void collect() throws UnknownCollectorPluginException, CollectorException, IOException {
|
||||
|
||||
final String outputPath = mdStoreVersion.getHdfsPath() + SEQUENCE_FILE_NAME;
|
||||
final String outputPath = this.mdStoreVersion.getHdfsPath() + SEQUENCE_FILE_NAME;
|
||||
log.info("outputPath path is {}", outputPath);
|
||||
|
||||
final CollectorPlugin plugin = getCollectorPlugin();
|
||||
|
@ -68,36 +71,36 @@ public class CollectorWorker extends ReportingJob {
|
|||
|
||||
try (SequenceFile.Writer writer = SequenceFile
|
||||
.createWriter(
|
||||
fileSystem.getConf(),
|
||||
SequenceFile.Writer.file(new Path(outputPath)),
|
||||
SequenceFile.Writer.keyClass(IntWritable.class),
|
||||
SequenceFile.Writer.valueClass(Text.class),
|
||||
this.fileSystem.getConf(), SequenceFile.Writer.file(new Path(outputPath)), SequenceFile.Writer
|
||||
.keyClass(IntWritable.class),
|
||||
SequenceFile.Writer
|
||||
.valueClass(Text.class),
|
||||
SequenceFile.Writer.compression(SequenceFile.CompressionType.BLOCK, new DeflateCodec()))) {
|
||||
final IntWritable key = new IntWritable(counter.get());
|
||||
final Text value = new Text();
|
||||
plugin
|
||||
.collect(api, report)
|
||||
.forEach(
|
||||
content -> {
|
||||
key.set(counter.getAndIncrement());
|
||||
value.set(content);
|
||||
try {
|
||||
writer.append(key, value);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
report.put(e.getClass().getName(), e.getMessage());
|
||||
.collect(this.api, this.report)
|
||||
.forEach(content -> {
|
||||
key.set(counter.getAndIncrement());
|
||||
value.set(content);
|
||||
try {
|
||||
writer.append(key, value);
|
||||
} catch (final Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
} catch (final Throwable e) {
|
||||
this.report.put(e.getClass().getName(), e.getMessage());
|
||||
throw new CollectorException(e);
|
||||
} finally {
|
||||
shutdown();
|
||||
report.ongoing(counter.longValue(), counter.longValue());
|
||||
this.report.ongoing(counter.longValue(), counter.longValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void scheduleReport(AtomicInteger counter) {
|
||||
private void scheduleReport(final AtomicInteger counter) {
|
||||
schedule(new ReporterCallback() {
|
||||
|
||||
@Override
|
||||
public Long getCurrent() {
|
||||
return counter.longValue();
|
||||
|
@ -112,33 +115,39 @@ public class CollectorWorker extends ReportingJob {
|
|||
|
||||
private CollectorPlugin getCollectorPlugin() throws UnknownCollectorPluginException {
|
||||
|
||||
switch (CollectorPlugin.NAME.valueOf(api.getProtocol())) {
|
||||
switch (CollectorPlugin.NAME.valueOf(this.api.getProtocol())) {
|
||||
case oai:
|
||||
return new OaiCollectorPlugin(clientParams);
|
||||
return new OaiCollectorPlugin(this.clientParams);
|
||||
case rest_json2xml:
|
||||
return new RestCollectorPlugin(clientParams);
|
||||
return new RestCollectorPlugin(this.clientParams);
|
||||
case file:
|
||||
return new FileCollectorPlugin(fileSystem);
|
||||
return new FileCollectorPlugin(this.fileSystem);
|
||||
case fileGzip:
|
||||
return new FileGZipCollectorPlugin(fileSystem);
|
||||
return new FileGZipCollectorPlugin(this.fileSystem);
|
||||
case baseDump:
|
||||
return new BaseCollectorPlugin(this.fileSystem);
|
||||
case gtr2Publications:
|
||||
return new Gtr2PublicationsCollectorPlugin(this.clientParams);
|
||||
case osfPreprints:
|
||||
return new OsfPreprintsCollectorPlugin(this.clientParams);
|
||||
case zenodoDump:
|
||||
return new CollectZenodoDumpCollectorPlugin();
|
||||
case other:
|
||||
final CollectorPlugin.NAME.OTHER_NAME plugin = Optional
|
||||
.ofNullable(api.getParams().get("other_plugin_type"))
|
||||
.ofNullable(this.api.getParams().get("other_plugin_type"))
|
||||
.map(CollectorPlugin.NAME.OTHER_NAME::valueOf)
|
||||
.orElseThrow(() -> new IllegalArgumentException("invalid other_plugin_type"));
|
||||
|
||||
switch (plugin) {
|
||||
case mdstore_mongodb_dump:
|
||||
return new MongoDbDumpCollectorPlugin(fileSystem);
|
||||
return new MongoDbDumpCollectorPlugin(this.fileSystem);
|
||||
case mdstore_mongodb:
|
||||
return new MDStoreCollectorPlugin();
|
||||
default:
|
||||
throw new UnknownCollectorPluginException("plugin is not managed: " + plugin);
|
||||
}
|
||||
default:
|
||||
throw new UnknownCollectorPluginException("protocol is not managed: " + api.getProtocol());
|
||||
throw new UnknownCollectorPluginException("protocol is not managed: " + this.api.getProtocol());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -154,14 +154,13 @@ public class ORCIDExtractor extends Thread {
|
|||
extractedItem++;
|
||||
if (extractedItem % 100000 == 0) {
|
||||
log.info("Thread {}: Extracted {} items", id, extractedItem);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
for (SequenceFile.Writer k : fileMap.values()) {
|
||||
log.info("Thread {}: Completed processed {} items", id, extractedItem);
|
||||
log.info("Thread {}: Completed processed {} items", id, extractedItem);
|
||||
k.hflush();
|
||||
k.close();
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@ public class Author extends ORCIDItem {
|
|||
|
||||
private String lastModifiedDate;
|
||||
|
||||
public Author() {
|
||||
}
|
||||
|
||||
public String getBiography() {
|
||||
return biography;
|
||||
}
|
||||
|
|
|
@ -11,4 +11,7 @@ public class ORCIDItem {
|
|||
public void setOrcid(String orcid) {
|
||||
this.orcid = orcid;
|
||||
}
|
||||
|
||||
public ORCIDItem() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,4 +32,6 @@ public class Work extends ORCIDItem {
|
|||
pids.add(pid);
|
||||
}
|
||||
|
||||
public Work() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ public interface CollectorPlugin {
|
|||
|
||||
enum NAME {
|
||||
|
||||
oai, other, rest_json2xml, file, fileGzip, baseDump;
|
||||
oai, other, rest_json2xml, file, fileGzip, baseDump, gtr2Publications, osfPreprints, zenodoDump;
|
||||
|
||||
public enum OTHER_NAME {
|
||||
mdstore_mongodb_dump, mdstore_mongodb
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.gtr2;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
|
||||
public class Gtr2PublicationsCollectorPlugin implements CollectorPlugin {
|
||||
|
||||
private final HttpClientParams clientParams;
|
||||
|
||||
public Gtr2PublicationsCollectorPlugin(final HttpClientParams clientParams) {
|
||||
this.clientParams = clientParams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<String> collect(final ApiDescriptor api, final AggregatorReport report) throws CollectorException {
|
||||
|
||||
final String baseUrl = api.getBaseUrl();
|
||||
final String startPage = api.getParams().get("startPage");
|
||||
final String endPage = api.getParams().get("endPage");
|
||||
final String fromDate = api.getParams().get("fromDate");
|
||||
|
||||
if ((fromDate != null) && !fromDate.matches("\\d{4}-\\d{2}-\\d{2}")) {
|
||||
throw new CollectorException("Invalid date (YYYY-MM-DD): " + fromDate);
|
||||
}
|
||||
|
||||
final Iterator<String> iterator = new Gtr2PublicationsIterator(baseUrl, fromDate, startPage, endPage,
|
||||
this.clientParams);
|
||||
final Spliterator<String> spliterator = Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED);
|
||||
|
||||
return StreamSupport.stream(spliterator, false);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,215 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.gtr2;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.LocalDate;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
import org.dom4j.Document;
|
||||
import org.dom4j.DocumentException;
|
||||
import org.dom4j.DocumentHelper;
|
||||
import org.dom4j.Element;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
import eu.dnetlib.dhp.common.collection.HttpConnector2;
|
||||
|
||||
public class Gtr2PublicationsIterator implements Iterator<String> {
|
||||
|
||||
public static final int PAGE_SIZE = 20;
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(Gtr2PublicationsIterator.class);
|
||||
|
||||
private final HttpConnector2 connector;
|
||||
private static final DateTimeFormatter simpleDateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
|
||||
|
||||
private static final int MAX_ATTEMPTS = 10;
|
||||
|
||||
private final String baseUrl;
|
||||
private int currPage;
|
||||
private int endPage;
|
||||
private boolean incremental = false;
|
||||
private LocalDate fromDate;
|
||||
|
||||
private final Map<String, String> cache = new HashMap<>();
|
||||
|
||||
private final Queue<String> queue = new LinkedList<>();
|
||||
|
||||
private String nextElement;
|
||||
|
||||
public Gtr2PublicationsIterator(final String baseUrl, final String fromDate, final String startPage,
|
||||
final String endPage,
|
||||
final HttpClientParams clientParams)
|
||||
throws CollectorException {
|
||||
|
||||
this.baseUrl = baseUrl;
|
||||
this.currPage = NumberUtils.toInt(startPage, 1);
|
||||
this.endPage = NumberUtils.toInt(endPage, Integer.MAX_VALUE);
|
||||
this.incremental = StringUtils.isNotBlank(fromDate);
|
||||
this.connector = new HttpConnector2(clientParams);
|
||||
|
||||
if (this.incremental) {
|
||||
this.fromDate = parseDate(fromDate);
|
||||
}
|
||||
|
||||
prepareNextElement();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return this.nextElement != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
try {
|
||||
return this.nextElement;
|
||||
} finally {
|
||||
prepareNextElement();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private void prepareNextElement() {
|
||||
while ((this.currPage <= this.endPage) && this.queue.isEmpty()) {
|
||||
log.debug("FETCHING PAGE + " + this.currPage + "/" + this.endPage);
|
||||
this.queue.addAll(fetchPage(this.currPage++));
|
||||
}
|
||||
this.nextElement = this.queue.poll();
|
||||
}
|
||||
|
||||
private List<String> fetchPage(final int pageNumber) {
|
||||
|
||||
final List<String> res = new ArrayList<>();
|
||||
try {
|
||||
final Document doc = loadURL(cleanURL(this.baseUrl + "/outcomes/publications?p=" + pageNumber), 0);
|
||||
|
||||
if (this.endPage == Integer.MAX_VALUE) {
|
||||
this.endPage = NumberUtils.toInt(doc.valueOf("/*/@*[local-name() = 'totalPages']"));
|
||||
}
|
||||
|
||||
for (final Object po : doc.selectNodes("//*[local-name() = 'publication']")) {
|
||||
final Element mainEntity = (Element) ((Element) po).detach();
|
||||
|
||||
if (filterIncremental(mainEntity)) {
|
||||
res.add(expandMainEntity(mainEntity));
|
||||
} else {
|
||||
log.debug("Skipped entity");
|
||||
}
|
||||
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
log.error("Exception fetching page " + pageNumber, e);
|
||||
throw new RuntimeException("Exception fetching page " + pageNumber, e);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
private void addLinkedEntities(final Element master, final String relType, final Element newRoot,
|
||||
final Function<Document, Element> mapper) {
|
||||
|
||||
for (final Object o : master.selectNodes(".//*[local-name()='link']")) {
|
||||
final String rel = ((Element) o).valueOf("@*[local-name()='rel']");
|
||||
final String href = ((Element) o).valueOf("@*[local-name()='href']");
|
||||
|
||||
if (relType.equals(rel) && StringUtils.isNotBlank(href)) {
|
||||
final String cacheKey = relType + "#" + href;
|
||||
if (this.cache.containsKey(cacheKey)) {
|
||||
try {
|
||||
log.debug(" * from cache (" + relType + "): " + href);
|
||||
newRoot.add(DocumentHelper.parseText(this.cache.get(cacheKey)).getRootElement());
|
||||
} catch (final DocumentException e) {
|
||||
log.error("Error retrieving cache element: " + cacheKey, e);
|
||||
throw new RuntimeException("Error retrieving cache element: " + cacheKey, e);
|
||||
}
|
||||
} else {
|
||||
final Document doc = loadURL(cleanURL(href), 0);
|
||||
final Element elem = mapper.apply(doc);
|
||||
newRoot.add(elem);
|
||||
this.cache.put(cacheKey, elem.asXML());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean filterIncremental(final Element e) {
|
||||
if (!this.incremental || isAfter(e.valueOf("@*[local-name() = 'created']"), this.fromDate)
|
||||
|| isAfter(e.valueOf("@*[local-name() = 'updated']"), this.fromDate)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private String expandMainEntity(final Element mainEntity) {
|
||||
final Element newRoot = DocumentHelper.createElement("doc");
|
||||
newRoot.add(mainEntity);
|
||||
addLinkedEntities(mainEntity, "PROJECT", newRoot, this::asProjectElement);
|
||||
return DocumentHelper.createDocument(newRoot).asXML();
|
||||
}
|
||||
|
||||
private Element asProjectElement(final Document doc) {
|
||||
final Element newOrg = DocumentHelper.createElement("project");
|
||||
newOrg.addElement("id").setText(doc.valueOf("/*/@*[local-name()='id']"));
|
||||
newOrg
|
||||
.addElement("code")
|
||||
.setText(doc.valueOf("//*[local-name()='identifier' and @*[local-name()='type'] = 'RCUK']"));
|
||||
newOrg.addElement("title").setText(doc.valueOf("//*[local-name()='title']"));
|
||||
return newOrg;
|
||||
}
|
||||
|
||||
private static String cleanURL(final String url) {
|
||||
String cleaned = url;
|
||||
if (cleaned.contains("gtr.gtr")) {
|
||||
cleaned = cleaned.replace("gtr.gtr", "gtr");
|
||||
}
|
||||
if (cleaned.startsWith("http://")) {
|
||||
cleaned = cleaned.replaceFirst("http://", "https://");
|
||||
}
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
private Document loadURL(final String cleanUrl, final int attempt) {
|
||||
try {
|
||||
log.debug(" * Downloading Url: {}", cleanUrl);
|
||||
final byte[] bytes = this.connector.getInputSource(cleanUrl).getBytes(StandardCharsets.UTF_8);
|
||||
return DocumentHelper.parseText(new String(bytes));
|
||||
} catch (final Throwable e) {
|
||||
log.error("Error dowloading url: {}, attempt = {}", cleanUrl, attempt, e);
|
||||
if (attempt >= MAX_ATTEMPTS) {
|
||||
throw new RuntimeException("Error downloading url: " + cleanUrl, e);
|
||||
}
|
||||
try {
|
||||
Thread.sleep(60000); // I wait for a minute
|
||||
} catch (final InterruptedException e1) {
|
||||
throw new RuntimeException("Error downloading url: " + cleanUrl, e);
|
||||
}
|
||||
return loadURL(cleanUrl, attempt + 1);
|
||||
}
|
||||
}
|
||||
|
||||
private LocalDate parseDate(final String s) {
|
||||
return LocalDate.parse(s.contains("T") ? s.substring(0, s.indexOf("T")) : s, simpleDateTimeFormatter);
|
||||
}
|
||||
|
||||
private boolean isAfter(final String d, final LocalDate fromDate) {
|
||||
return StringUtils.isNotBlank(d) && parseDate(d).isAfter(fromDate);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.osf;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
|
||||
public class OsfPreprintsCollectorPlugin implements CollectorPlugin {
|
||||
|
||||
public static final int PAGE_SIZE_VALUE_DEFAULT = 100;
|
||||
|
||||
private final HttpClientParams clientParams;
|
||||
|
||||
public OsfPreprintsCollectorPlugin(final HttpClientParams clientParams) {
|
||||
this.clientParams = clientParams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<String> collect(final ApiDescriptor api, final AggregatorReport report) throws CollectorException {
|
||||
final String baseUrl = api.getBaseUrl();
|
||||
|
||||
final int pageSize = Optional
|
||||
.ofNullable(api.getParams().get("pageSize"))
|
||||
.filter(StringUtils::isNotBlank)
|
||||
.map(s -> NumberUtils.toInt(s, PAGE_SIZE_VALUE_DEFAULT))
|
||||
.orElse(PAGE_SIZE_VALUE_DEFAULT);
|
||||
|
||||
if (StringUtils.isBlank(baseUrl)) {
|
||||
throw new CollectorException("Param 'baseUrl' is null or empty");
|
||||
}
|
||||
|
||||
final OsfPreprintsIterator it = new OsfPreprintsIterator(baseUrl, pageSize, getClientParams());
|
||||
|
||||
return StreamSupport
|
||||
.stream(Spliterators.spliteratorUnknownSize(it, Spliterator.ORDERED), false);
|
||||
}
|
||||
|
||||
public HttpClientParams getClientParams() {
|
||||
return this.clientParams;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.osf;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.dom4j.Document;
|
||||
import org.dom4j.DocumentHelper;
|
||||
import org.dom4j.Element;
|
||||
import org.dom4j.Node;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.collection.plugin.utils.JsonUtils;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
import eu.dnetlib.dhp.common.collection.HttpConnector2;
|
||||
|
||||
public class OsfPreprintsIterator implements Iterator<String> {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(OsfPreprintsIterator.class);
|
||||
|
||||
private static final int MAX_ATTEMPTS = 5;
|
||||
|
||||
private final HttpClientParams clientParams;
|
||||
|
||||
private final String baseUrl;
|
||||
private final int pageSize;
|
||||
|
||||
private String currentUrl;
|
||||
|
||||
private final Queue<String> recordQueue = new PriorityBlockingQueue<>();
|
||||
|
||||
public OsfPreprintsIterator(
|
||||
final String baseUrl,
|
||||
final int pageSize,
|
||||
final HttpClientParams clientParams) {
|
||||
|
||||
this.clientParams = clientParams;
|
||||
this.baseUrl = baseUrl;
|
||||
this.pageSize = pageSize;
|
||||
|
||||
initQueue();
|
||||
}
|
||||
|
||||
private void initQueue() {
|
||||
this.currentUrl = this.baseUrl + "?filter:is_published:d=true&format=json&page[size]=" + this.pageSize;
|
||||
|
||||
log.info("REST calls starting with {}", this.currentUrl);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
synchronized (this.recordQueue) {
|
||||
while (this.recordQueue.isEmpty() && StringUtils.isNotBlank(this.currentUrl)
|
||||
&& this.currentUrl.startsWith("http")) {
|
||||
try {
|
||||
this.currentUrl = downloadPage(this.currentUrl);
|
||||
} catch (final CollectorException e) {
|
||||
log.debug("CollectorPlugin.next()-Exception: {}", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.recordQueue.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (this.recordQueue) {
|
||||
return this.recordQueue.poll();
|
||||
}
|
||||
}
|
||||
|
||||
private String downloadPage(final String url) throws CollectorException {
|
||||
|
||||
final Document doc = downloadUrl(url, 0);
|
||||
|
||||
for (final Object o : doc.selectNodes("/*/data")) {
|
||||
|
||||
final Element n = (Element) ((Element) o).detach();
|
||||
|
||||
final Element group = DocumentHelper.createElement("group");
|
||||
group.addAttribute("id", n.valueOf("./id"));
|
||||
|
||||
group.addElement("preprint").add(n);
|
||||
|
||||
for (final Object o1 : n.selectNodes(".//contributors//href")) {
|
||||
final String href = ((Node) o1).getText();
|
||||
if (StringUtils.isNotBlank(href) && href.startsWith("http")) {
|
||||
final Document doc1 = downloadUrl(href, 0);
|
||||
group.addElement("contributors").add(doc1.getRootElement().detach());
|
||||
}
|
||||
}
|
||||
for (final Object o1 : n.selectNodes(".//primary_file//href")) {
|
||||
final String href = ((Node) o1).getText();
|
||||
if (StringUtils.isNotBlank(href) && href.startsWith("http")) {
|
||||
final Document doc1 = downloadUrl(href, 0);
|
||||
group.addElement("primary_file").add(doc1.getRootElement().detach());
|
||||
}
|
||||
}
|
||||
|
||||
this.recordQueue.add(DocumentHelper.createDocument(group).asXML());
|
||||
}
|
||||
|
||||
return doc.valueOf("/*/links/next");
|
||||
|
||||
}
|
||||
|
||||
private Document downloadUrl(final String url, final int attempt) throws CollectorException {
|
||||
if (attempt > MAX_ATTEMPTS) {
|
||||
throw new CollectorException("Max Number of attempts reached, url:" + url);
|
||||
}
|
||||
|
||||
if (attempt > 0) {
|
||||
final int delay = (attempt * 5000);
|
||||
log.debug("Attempt {} with delay {}", attempt, delay);
|
||||
try {
|
||||
Thread.sleep(delay);
|
||||
} catch (final InterruptedException e) {
|
||||
new CollectorException(e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("requesting URL [{}]", url);
|
||||
|
||||
final HttpConnector2 connector = new HttpConnector2(this.clientParams);
|
||||
|
||||
final String json = connector.getInputSource(url);
|
||||
final String xml = JsonUtils.convertToXML(json);
|
||||
|
||||
return DocumentHelper.parseText(xml);
|
||||
|
||||
} catch (final Throwable e) {
|
||||
log.warn(e.getMessage(), e);
|
||||
if ((e instanceof CollectorException) && e.getMessage().contains("401")) {
|
||||
final Element root = DocumentHelper.createElement("error_401_authorization_required");
|
||||
return DocumentHelper.createDocument(root);
|
||||
}
|
||||
return downloadUrl(url, attempt + 1);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.researchfi;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.entity.UrlEncodedFormEntity;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.message.BasicNameValuePair;
|
||||
import org.json.JSONObject;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class ResearchFiCollectorPlugin implements CollectorPlugin {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ResearchFiCollectorPlugin.class);
|
||||
|
||||
@Override
|
||||
public Stream<String> collect(final ApiDescriptor api, final AggregatorReport report)
|
||||
throws CollectorException {
|
||||
|
||||
final String authUrl = api.getParams().get("auth_url");
|
||||
final String clientId = api.getParams().get("auth_client_id");
|
||||
final String clientSecret = api.getParams().get("auth_client_secret");
|
||||
|
||||
final String authToken = authenticate(authUrl, clientId, clientSecret);
|
||||
|
||||
final Iterator<String> iter = new ResearchFiIterator(api.getBaseUrl(), authToken);
|
||||
|
||||
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iter, Spliterator.ORDERED), false);
|
||||
}
|
||||
|
||||
private String authenticate(final String authUrl, final String clientId, final String clientSecret)
|
||||
throws CollectorException {
|
||||
try (final CloseableHttpClient client = HttpClients.createDefault()) {
|
||||
final HttpPost req = new HttpPost(authUrl);
|
||||
final List<NameValuePair> params = new ArrayList<>();
|
||||
params.add(new BasicNameValuePair("grant_type", "client_credentials"));
|
||||
params.add(new BasicNameValuePair("client_id", clientId));
|
||||
params.add(new BasicNameValuePair("client_secret", clientSecret));
|
||||
|
||||
req.setEntity(new UrlEncodedFormEntity(params, "UTF-8"));
|
||||
|
||||
try (final CloseableHttpResponse response = client.execute(req)) {
|
||||
final String content = IOUtils.toString(response.getEntity().getContent());
|
||||
final JSONObject obj = new JSONObject(content);
|
||||
final String token = obj.getString("access_token");
|
||||
if (StringUtils.isNotBlank(token)) {
|
||||
return token;
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
log.warn("Error obtaining access token", e);
|
||||
throw new CollectorException("Error obtaining access token", e);
|
||||
}
|
||||
throw new CollectorException("Access token is missing");
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.researchfi;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.json.JSONArray;
|
||||
|
||||
import eu.dnetlib.dhp.collection.plugin.utils.JsonUtils;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class ResearchFiIterator implements Iterator<String> {
|
||||
|
||||
private static final Log log = LogFactory.getLog(ResearchFiIterator.class);
|
||||
|
||||
private static final int PAGE_SIZE = 100;
|
||||
|
||||
private final String baseUrl;
|
||||
private final String authToken;
|
||||
private String nextUrl;
|
||||
private int nCalls = 0;
|
||||
|
||||
private final Queue<String> queue = new PriorityBlockingQueue<>();
|
||||
|
||||
public ResearchFiIterator(final String baseUrl, final String authToken) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.authToken = authToken;
|
||||
this.nextUrl = null;
|
||||
}
|
||||
|
||||
private void verifyStarted() {
|
||||
|
||||
try {
|
||||
if (this.nCalls == 0) {
|
||||
this.nextUrl = invokeUrl(this.baseUrl);
|
||||
}
|
||||
} catch (final CollectorException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
synchronized (this.queue) {
|
||||
verifyStarted();
|
||||
return !this.queue.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (this.queue) {
|
||||
verifyStarted();
|
||||
final String res = this.queue.poll();
|
||||
while (this.queue.isEmpty() && StringUtils.isNotBlank(this.nextUrl)) {
|
||||
try {
|
||||
this.nextUrl = invokeUrl(this.nextUrl);
|
||||
} catch (final CollectorException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
private String invokeUrl(final String url) throws CollectorException {
|
||||
|
||||
this.nCalls += 1;
|
||||
String next = null;
|
||||
|
||||
log.info("Calling url: " + url);
|
||||
|
||||
try (final CloseableHttpClient client = HttpClients.createDefault()) {
|
||||
|
||||
final HttpGet req = new HttpGet(url);
|
||||
req.addHeader("Authorization", "Bearer " + this.authToken);
|
||||
try (final CloseableHttpResponse response = client.execute(req)) {
|
||||
for (final Header header : response.getAllHeaders()) {
|
||||
log.debug("HEADER: " + header.getName() + " = " + header.getValue());
|
||||
if ("link".equals(header.getName())) {
|
||||
final String s = StringUtils.substringBetween(header.getValue(), "<", ">");
|
||||
final String token = StringUtils
|
||||
.substringBefore(StringUtils.substringAfter(s, "NextPageToken="), "&");
|
||||
|
||||
if (this.baseUrl.contains("?")) {
|
||||
next = this.baseUrl + "&NextPageToken=" + token;
|
||||
} else {
|
||||
next = this.baseUrl + "?NextPageToken=" + token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final String content = IOUtils.toString(response.getEntity().getContent());
|
||||
final JSONArray jsonArray = new JSONArray(content);
|
||||
|
||||
jsonArray.forEach(obj -> this.queue.add(JsonUtils.convertToXML(obj.toString())));
|
||||
}
|
||||
|
||||
return next;
|
||||
|
||||
} catch (final Throwable e) {
|
||||
log.warn("Error calling url: " + url, e);
|
||||
throw new CollectorException("Error calling url: " + url, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -12,6 +12,8 @@ import java.util.Iterator;
|
|||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import javax.xml.transform.OutputKeys;
|
||||
import javax.xml.transform.Transformer;
|
||||
|
@ -19,16 +21,10 @@ import javax.xml.transform.TransformerConfigurationException;
|
|||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpression;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
import javax.xml.xpath.*;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Node;
|
||||
|
@ -51,7 +47,6 @@ import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
|||
*
|
||||
*/
|
||||
public class RestIterator implements Iterator<String> {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RestIterator.class);
|
||||
public static final String UTF_8 = "UTF-8";
|
||||
private static final int MAX_ATTEMPTS = 5;
|
||||
|
@ -60,11 +55,15 @@ public class RestIterator implements Iterator<String> {
|
|||
|
||||
private final String AUTHBASIC = "basic";
|
||||
|
||||
private static final String XML_HEADER = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||
private static final String EMPTY_XML = XML_HEADER + "<" + JsonUtils.XML_WRAP_TAG + "></" + JsonUtils.XML_WRAP_TAG
|
||||
+ ">";
|
||||
|
||||
private final String baseUrl;
|
||||
private final String resumptionType;
|
||||
private final String resumptionParam;
|
||||
private final String resultFormatValue;
|
||||
private String queryParams;
|
||||
private String queryParams = "";
|
||||
private final int resultSizeValue;
|
||||
private int resumptionInt = 0; // integer resumption token (first record to harvest)
|
||||
private int resultTotal = -1;
|
||||
|
@ -158,7 +157,12 @@ public class RestIterator implements Iterator<String> {
|
|||
}
|
||||
|
||||
private void initQueue() {
|
||||
this.query = this.baseUrl + "?" + this.queryParams + this.querySize + this.queryFormat;
|
||||
if (queryParams.equals("") && querySize.equals("") && queryFormat.equals("")) {
|
||||
query = baseUrl;
|
||||
} else {
|
||||
query = baseUrl + "?" + queryParams + querySize + queryFormat;
|
||||
}
|
||||
|
||||
log.info("REST calls starting with {}", this.query);
|
||||
}
|
||||
|
||||
|
@ -172,19 +176,6 @@ public class RestIterator implements Iterator<String> {
|
|||
*/
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
if (this.recordQueue.isEmpty() && this.query.isEmpty()) {
|
||||
disconnect();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see java.util.Iterator#next()
|
||||
*/
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (this.recordQueue) {
|
||||
while (this.recordQueue.isEmpty() && !this.query.isEmpty()) {
|
||||
try {
|
||||
|
@ -194,6 +185,23 @@ public class RestIterator implements Iterator<String> {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.recordQueue.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
disconnect();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
* @see java.util.Iterator#next()
|
||||
*/
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (this.recordQueue) {
|
||||
return this.recordQueue.poll();
|
||||
}
|
||||
}
|
||||
|
@ -219,9 +227,8 @@ public class RestIterator implements Iterator<String> {
|
|||
|
||||
try {
|
||||
String resultJson;
|
||||
String resultXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||
String resultXml = XML_HEADER;
|
||||
String nextQuery = "";
|
||||
final String emptyXml = resultXml + "<" + JsonUtils.XML_WRAP_TAG + "></" + JsonUtils.XML_WRAP_TAG + ">";
|
||||
Node resultNode = null;
|
||||
NodeList nodeList = null;
|
||||
String qUrlArgument = "";
|
||||
|
@ -236,6 +243,22 @@ public class RestIterator implements Iterator<String> {
|
|||
}
|
||||
}
|
||||
|
||||
// find pagination page start number in queryParam and remove before start the first query
|
||||
if ((resumptionType.toLowerCase().equals("pagination") || resumptionType.toLowerCase().equals("page"))
|
||||
&& (query.contains("paginationStart="))) {
|
||||
|
||||
final Matcher m = Pattern.compile("paginationStart=([0-9]+)").matcher(query);
|
||||
m.find(); // guaranteed to be true for this regex
|
||||
|
||||
String[] pageVal = m.group(0).split("=");
|
||||
pagination = Integer.parseInt(pageVal[1]);
|
||||
|
||||
// remove page start number from query and queryParams
|
||||
queryParams = queryParams.replaceFirst("&?paginationStart=[0-9]+", "");
|
||||
query = query.replaceFirst("&?paginationStart=[0-9]+", "");
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("requesting URL [{}]", query);
|
||||
|
||||
|
@ -261,7 +284,7 @@ public class RestIterator implements Iterator<String> {
|
|||
this.resultStream = IOUtils.toInputStream(resultXml, UTF_8);
|
||||
}
|
||||
|
||||
if (!(emptyXml).equalsIgnoreCase(resultXml)) {
|
||||
if (!isEmptyXml(resultXml)) {
|
||||
resultNode = (Node) this.xpath
|
||||
.evaluate("/", new InputSource(this.resultStream), XPathConstants.NODE);
|
||||
nodeList = (NodeList) this.xprEntity.evaluate(resultNode, XPathConstants.NODESET);
|
||||
|
@ -270,8 +293,7 @@ public class RestIterator implements Iterator<String> {
|
|||
final StringWriter sw = new StringWriter();
|
||||
this.transformer.transform(new DOMSource(nodeList.item(i)), new StreamResult(sw));
|
||||
final String toEnqueue = sw.toString();
|
||||
if ((toEnqueue == null) || StringUtils.isBlank(toEnqueue)
|
||||
|| emptyXml.equalsIgnoreCase(toEnqueue)) {
|
||||
if ((toEnqueue == null) || StringUtils.isBlank(toEnqueue) || isEmptyXml(toEnqueue)) {
|
||||
log
|
||||
.warn(
|
||||
"The following record resulted in empty item for the feeding queue: {}", resultXml);
|
||||
|
@ -299,6 +321,7 @@ public class RestIterator implements Iterator<String> {
|
|||
throw new CollectorException("Mode: discover, Param 'resultSizeValue' is less than 2");
|
||||
}
|
||||
qUrlArgument = qUrl.getQuery();
|
||||
|
||||
final String[] arrayQUrlArgument = qUrlArgument.split("&");
|
||||
for (final String arrayUrlArgStr : arrayQUrlArgument) {
|
||||
if (arrayUrlArgStr.startsWith(this.resumptionParam)) {
|
||||
|
@ -312,7 +335,7 @@ public class RestIterator implements Iterator<String> {
|
|||
}
|
||||
}
|
||||
|
||||
if (((emptyXml).equalsIgnoreCase(resultXml))
|
||||
if (isEmptyXml(resultXml)
|
||||
|| ((nodeList != null) && (nodeList.getLength() < this.resultSizeValue))) {
|
||||
// resumptionStr = "";
|
||||
if (nodeList != null) {
|
||||
|
@ -331,13 +354,13 @@ public class RestIterator implements Iterator<String> {
|
|||
|
||||
case "pagination":
|
||||
case "page": // pagination, iterate over page numbers
|
||||
this.pagination += 1;
|
||||
if (nodeList != null) {
|
||||
if (nodeList != null && nodeList.getLength() > 0) {
|
||||
this.discoverResultSize += nodeList.getLength();
|
||||
} else {
|
||||
this.resultTotal = this.discoverResultSize;
|
||||
this.pagination = this.discoverResultSize;
|
||||
}
|
||||
this.pagination += 1;
|
||||
this.resumptionInt = this.pagination;
|
||||
this.resumptionStr = Integer.toString(this.resumptionInt);
|
||||
break;
|
||||
|
@ -415,6 +438,10 @@ public class RestIterator implements Iterator<String> {
|
|||
|
||||
}
|
||||
|
||||
private boolean isEmptyXml(String s) {
|
||||
return EMPTY_XML.equalsIgnoreCase(s);
|
||||
}
|
||||
|
||||
private boolean isInteger(final String s) {
|
||||
boolean isValidInteger = false;
|
||||
try {
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.zenodo;
|
||||
|
||||
import static eu.dnetlib.dhp.utils.DHPUtils.getHadoopConfiguration;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class CollectZenodoDumpCollectorPlugin implements CollectorPlugin {
|
||||
|
||||
final private Logger log = LoggerFactory.getLogger(getClass());
|
||||
|
||||
private void downloadItem(final String name, final String itemURL, final String basePath,
|
||||
final FileSystem fileSystem) {
|
||||
try {
|
||||
final Path hdfsWritePath = new Path(String.format("%s/%s", basePath, name));
|
||||
final FSDataOutputStream fsDataOutputStream = fileSystem.create(hdfsWritePath, true);
|
||||
final HttpGet request = new HttpGet(itemURL);
|
||||
final int timeout = 60; // seconds
|
||||
final RequestConfig config = RequestConfig
|
||||
.custom()
|
||||
.setConnectTimeout(timeout * 1000)
|
||||
.setConnectionRequestTimeout(timeout * 1000)
|
||||
.setSocketTimeout(timeout * 1000)
|
||||
.build();
|
||||
log.info("Downloading url {} into {}", itemURL, hdfsWritePath.getName());
|
||||
try (CloseableHttpClient client = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
|
||||
CloseableHttpResponse response = client.execute(request)) {
|
||||
int responseCode = response.getStatusLine().getStatusCode();
|
||||
log.info("Response code is {}", responseCode);
|
||||
if (responseCode >= 200 && responseCode < 400) {
|
||||
IOUtils.copy(response.getEntity().getContent(), fsDataOutputStream);
|
||||
}
|
||||
} catch (Throwable eu) {
|
||||
throw new RuntimeException(eu);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<String> collect(ApiDescriptor api, AggregatorReport report) throws CollectorException {
|
||||
try {
|
||||
final String zenodoURL = api.getBaseUrl();
|
||||
final String hdfsURI = api.getParams().get("hdfsURI");
|
||||
final FileSystem fileSystem = FileSystem.get(getHadoopConfiguration(hdfsURI));
|
||||
downloadItem("zenodoDump.tar.gz", zenodoURL, "/tmp", fileSystem);
|
||||
CompressionCodecFactory factory = new CompressionCodecFactory(fileSystem.getConf());
|
||||
|
||||
Path sourcePath = new Path("/tmp/zenodoDump.tar.gz");
|
||||
CompressionCodec codec = factory.getCodec(sourcePath);
|
||||
InputStream gzipInputStream = null;
|
||||
try {
|
||||
gzipInputStream = codec.createInputStream(fileSystem.open(sourcePath));
|
||||
return iterateTar(gzipInputStream);
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new CollectorException(e);
|
||||
} finally {
|
||||
log.info("Closing gzip stream");
|
||||
org.apache.hadoop.io.IOUtils.closeStream(gzipInputStream);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new CollectorException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private Stream<String> iterateTar(InputStream gzipInputStream) throws Exception {
|
||||
|
||||
Iterable<String> iterable = () -> new ZenodoTarIterator(gzipInputStream);
|
||||
return StreamSupport.stream(iterable.spliterator(), false);
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.zenodo;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
|
||||
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
|
||||
public class ZenodoTarIterator implements Iterator<String>, Closeable {
|
||||
|
||||
private final InputStream gzipInputStream;
|
||||
private final StringBuilder currentItem = new StringBuilder();
|
||||
private TarArchiveInputStream tais;
|
||||
private boolean hasNext;
|
||||
|
||||
public ZenodoTarIterator(InputStream gzipInputStream) {
|
||||
this.gzipInputStream = gzipInputStream;
|
||||
tais = new TarArchiveInputStream(gzipInputStream);
|
||||
hasNext = getNextItem();
|
||||
}
|
||||
|
||||
private boolean getNextItem() {
|
||||
try {
|
||||
TarArchiveEntry entry;
|
||||
while ((entry = tais.getNextTarEntry()) != null) {
|
||||
if (entry.isFile()) {
|
||||
currentItem.setLength(0);
|
||||
currentItem.append(IOUtils.toString(new InputStreamReader(tais)));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
final String data = currentItem.toString();
|
||||
hasNext = getNextItem();
|
||||
return data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
gzipInputStream.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
|
||||
package eu.dnetlib.dhp.sx.bio.pubmed;
|
||||
|
||||
/**
|
||||
* The type Pubmed Affiliation.
|
||||
*
|
||||
* @author Sandro La Bruzzo
|
||||
*/
|
||||
public class PMAffiliation {
|
||||
|
||||
private String name;
|
||||
|
||||
private PMIdentifier identifier;
|
||||
|
||||
public PMAffiliation() {
|
||||
|
||||
}
|
||||
|
||||
public PMAffiliation(String name, PMIdentifier identifier) {
|
||||
this.name = name;
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public PMIdentifier getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
public void setIdentifier(PMIdentifier identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
}
|
|
@ -8,259 +8,115 @@ import java.util.List;
|
|||
/**
|
||||
* This class represent an instance of Pubmed Article extracted from the native XML
|
||||
*
|
||||
* @author Sandro La Bruzzo
|
||||
*/
|
||||
|
||||
public class PMArticle implements Serializable {
|
||||
|
||||
/**
|
||||
* the Pubmed Identifier
|
||||
*/
|
||||
private String pmid;
|
||||
|
||||
private String pmcId;
|
||||
|
||||
/**
|
||||
* the DOI
|
||||
*/
|
||||
private String doi;
|
||||
/**
|
||||
* the Pubmed Date extracted from <PubmedPubDate> Specifies a date significant to either the article's history or the citation's processing.
|
||||
* All <History> dates will have a <Year>, <Month>, and <Day> elements. Some may have an <Hour>, <Minute>, and <Second> element(s).
|
||||
*/
|
||||
private String date;
|
||||
/**
|
||||
* This is an 'envelop' element that contains various elements describing the journal cited; i.e., ISSN, Volume, Issue, and PubDate and author name(s), however, it does not contain data itself.
|
||||
*/
|
||||
private PMJournal journal;
|
||||
/**
|
||||
* The full journal title (taken from NLM cataloging data following NLM rules for how to compile a serial name) is exported in this element. Some characters that are not part of the NLM MEDLINE/PubMed Character Set reside in a relatively small number of full journal titles. The NLM journal title abbreviation is exported in the <MedlineTA> element.
|
||||
*/
|
||||
private String title;
|
||||
/**
|
||||
* English-language abstracts are taken directly from the published article.
|
||||
* If the article does not have a published abstract, the National Library of Medicine does not create one,
|
||||
* thus the record lacks the <Abstract> and <AbstractText> elements. However, in the absence of a formally
|
||||
* labeled abstract in the published article, text from a substantive "summary", "summary and conclusions" or "conclusions and summary" may be used.
|
||||
*/
|
||||
private String description;
|
||||
/**
|
||||
* the language in which an article was published is recorded in <Language>.
|
||||
* All entries are three letter abbreviations stored in lower case, such as eng, fre, ger, jpn, etc. When a single
|
||||
* record contains more than one language value the XML export program extracts the languages in alphabetic order by the 3-letter language value.
|
||||
* Some records provided by collaborating data producers may contain the value und to identify articles whose language is undetermined.
|
||||
*/
|
||||
private String language;
|
||||
|
||||
/**
|
||||
* NLM controlled vocabulary, Medical Subject Headings (MeSH®), is used to characterize the content of the articles represented by MEDLINE citations. *
|
||||
*/
|
||||
private final List<PMSubject> subjects = new ArrayList<>();
|
||||
/**
|
||||
* This element is used to identify the type of article indexed for MEDLINE;
|
||||
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
|
||||
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
|
||||
*/
|
||||
private final List<PMSubject> publicationTypes = new ArrayList<>();
|
||||
/**
|
||||
* Personal and collective (corporate) author names published with the article are found in <AuthorList>.
|
||||
*/
|
||||
private List<PMSubject> subjects;
|
||||
private List<PMSubject> publicationTypes = new ArrayList<>();
|
||||
private List<PMAuthor> authors = new ArrayList<>();
|
||||
private List<PMGrant> grants = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* <GrantID> contains the research grant or contract number (or both) that designates financial support by any agency of the United States Public Health Service
|
||||
* or any institute of the National Institutes of Health. Additionally, beginning in late 2005, grant numbers are included for many other US and non-US funding agencies and organizations.
|
||||
*/
|
||||
private final List<PMGrant> grants = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* get the DOI
|
||||
* @return a DOI
|
||||
*/
|
||||
public String getDoi() {
|
||||
return doi;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the DOI
|
||||
* @param doi a DOI
|
||||
*/
|
||||
public void setDoi(String doi) {
|
||||
this.doi = doi;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the Pubmed Identifier
|
||||
* @return the PMID
|
||||
*/
|
||||
public String getPmid() {
|
||||
return pmid;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the Pubmed Identifier
|
||||
* @param pmid the Pubmed Identifier
|
||||
*/
|
||||
public void setPmid(String pmid) {
|
||||
this.pmid = pmid;
|
||||
}
|
||||
|
||||
/**
|
||||
* the Pubmed Date extracted from <PubmedPubDate> Specifies a date significant to either the article's history or the citation's processing.
|
||||
* All <History> dates will have a <Year>, <Month>, and <Day> elements. Some may have an <Hour>, <Minute>, and <Second> element(s).
|
||||
*
|
||||
* @return the Pubmed Date
|
||||
*/
|
||||
public String getDate() {
|
||||
return date;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the pubmed Date
|
||||
* @param date
|
||||
*/
|
||||
public void setDate(String date) {
|
||||
this.date = date;
|
||||
}
|
||||
|
||||
/**
|
||||
* The full journal title (taken from NLM cataloging data following NLM rules for how to compile a serial name) is exported in this element.
|
||||
* Some characters that are not part of the NLM MEDLINE/PubMed Character Set reside in a relatively small number of full journal titles.
|
||||
* The NLM journal title abbreviation is exported in the <MedlineTA> element.
|
||||
*
|
||||
* @return the pubmed Journal Extracted
|
||||
*/
|
||||
public PMJournal getJournal() {
|
||||
return journal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the mapped pubmed Journal
|
||||
* @param journal
|
||||
*/
|
||||
public void setJournal(PMJournal journal) {
|
||||
this.journal = journal;
|
||||
}
|
||||
|
||||
/**
|
||||
* <ArticleTitle> contains the entire title of the journal article. <ArticleTitle> is always in English;
|
||||
* those titles originally published in a non-English language and translated for <ArticleTitle> are enclosed in square brackets.
|
||||
* All titles end with a period unless another punctuation mark such as a question mark or bracket is present.
|
||||
* Explanatory information about the title itself is enclosed in parentheses, e.g.: (author's transl).
|
||||
* Corporate/collective authors may appear at the end of <ArticleTitle> for citations up to about the year 2000.
|
||||
*
|
||||
* @return the extracted pubmed Title
|
||||
*/
|
||||
public String getTitle() {
|
||||
return title;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the pubmed title
|
||||
* @param title
|
||||
*/
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
/**
|
||||
* English-language abstracts are taken directly from the published article.
|
||||
* If the article does not have a published abstract, the National Library of Medicine does not create one,
|
||||
* thus the record lacks the <Abstract> and <AbstractText> elements. However, in the absence of a formally
|
||||
* labeled abstract in the published article, text from a substantive "summary", "summary and conclusions" or "conclusions and summary" may be used.
|
||||
*
|
||||
* @return the Mapped Pubmed Article Abstracts
|
||||
*/
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the Mapped Pubmed Article Abstracts
|
||||
* @param description
|
||||
*/
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
/**
|
||||
* Personal and collective (corporate) author names published with the article are found in <AuthorList>.
|
||||
*
|
||||
* @return get the Mapped Authors lists
|
||||
*/
|
||||
public List<PMAuthor> getAuthors() {
|
||||
return authors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the Mapped Authors lists
|
||||
* @param authors
|
||||
*/
|
||||
public void setAuthors(List<PMAuthor> authors) {
|
||||
this.authors = authors;
|
||||
}
|
||||
|
||||
/**
|
||||
* This element is used to identify the type of article indexed for MEDLINE;
|
||||
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
|
||||
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
|
||||
*
|
||||
* @return the mapped Subjects
|
||||
*/
|
||||
public List<PMSubject> getSubjects() {
|
||||
return subjects;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* the language in which an article was published is recorded in <Language>.
|
||||
* All entries are three letter abbreviations stored in lower case, such as eng, fre, ger, jpn, etc. When a single
|
||||
* record contains more than one language value the XML export program extracts the languages in alphabetic order by the 3-letter language value.
|
||||
* Some records provided by collaborating data producers may contain the value und to identify articles whose language is undetermined.
|
||||
*
|
||||
* @return The mapped Language
|
||||
*/
|
||||
public String getLanguage() {
|
||||
return language;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Set The mapped Language
|
||||
*
|
||||
* @param language the mapped Language
|
||||
*/
|
||||
public void setLanguage(String language) {
|
||||
this.language = language;
|
||||
}
|
||||
|
||||
/**
|
||||
* This element is used to identify the type of article indexed for MEDLINE;
|
||||
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
|
||||
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
|
||||
*
|
||||
* @return the mapped Publication Type
|
||||
*/
|
||||
public List<PMSubject> getPublicationTypes() {
|
||||
return publicationTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
* <GrantID> contains the research grant or contract number (or both) that designates financial support by any agency of the United States Public Health Service
|
||||
* or any institute of the National Institutes of Health. Additionally, beginning in late 2005, grant numbers are included for many other US and non-US funding agencies and organizations.
|
||||
* @return the mapped grants
|
||||
*/
|
||||
|
||||
public List<PMGrant> getGrants() {
|
||||
return grants;
|
||||
}
|
||||
|
||||
public String getPmcId() {
|
||||
return pmcId;
|
||||
}
|
||||
|
||||
public PMArticle setPmcId(String pmcId) {
|
||||
public void setPmcId(String pmcId) {
|
||||
this.pmcId = pmcId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String getDoi() {
|
||||
return doi;
|
||||
}
|
||||
|
||||
public void setDoi(String doi) {
|
||||
this.doi = doi;
|
||||
}
|
||||
|
||||
public String getDate() {
|
||||
return date;
|
||||
}
|
||||
|
||||
public void setDate(String date) {
|
||||
this.date = date;
|
||||
}
|
||||
|
||||
public PMJournal getJournal() {
|
||||
return journal;
|
||||
}
|
||||
|
||||
public void setJournal(PMJournal journal) {
|
||||
this.journal = journal;
|
||||
}
|
||||
|
||||
public String getTitle() {
|
||||
return title;
|
||||
}
|
||||
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getLanguage() {
|
||||
return language;
|
||||
}
|
||||
|
||||
public void setLanguage(String language) {
|
||||
this.language = language;
|
||||
}
|
||||
|
||||
public List<PMSubject> getSubjects() {
|
||||
return subjects;
|
||||
}
|
||||
|
||||
public void setSubjects(List<PMSubject> subjects) {
|
||||
this.subjects = subjects;
|
||||
}
|
||||
|
||||
public List<PMSubject> getPublicationTypes() {
|
||||
return publicationTypes;
|
||||
}
|
||||
|
||||
public void setPublicationTypes(List<PMSubject> publicationTypes) {
|
||||
this.publicationTypes = publicationTypes;
|
||||
}
|
||||
|
||||
public List<PMAuthor> getAuthors() {
|
||||
return authors;
|
||||
}
|
||||
|
||||
public void setAuthors(List<PMAuthor> authors) {
|
||||
this.authors = authors;
|
||||
}
|
||||
|
||||
public List<PMGrant> getGrants() {
|
||||
return grants;
|
||||
}
|
||||
|
||||
public void setGrants(List<PMGrant> grants) {
|
||||
this.grants = grants;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,8 @@ public class PMAuthor implements Serializable {
|
|||
|
||||
private String lastName;
|
||||
private String foreName;
|
||||
private PMIdentifier identifier;
|
||||
private PMAffiliation affiliation;
|
||||
|
||||
/**
|
||||
* Gets last name.
|
||||
|
@ -59,4 +61,40 @@ public class PMAuthor implements Serializable {
|
|||
.format("%s, %s", this.foreName != null ? this.foreName : "", this.lastName != null ? this.lastName : "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets identifier.
|
||||
*
|
||||
* @return the identifier
|
||||
*/
|
||||
public PMIdentifier getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets identifier.
|
||||
*
|
||||
* @param identifier the identifier
|
||||
*/
|
||||
public void setIdentifier(PMIdentifier identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets affiliation.
|
||||
*
|
||||
* @return the affiliation
|
||||
*/
|
||||
public PMAffiliation getAffiliation() {
|
||||
return affiliation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets affiliation.
|
||||
*
|
||||
* @param affiliation the affiliation
|
||||
*/
|
||||
public void setAffiliation(PMAffiliation affiliation) {
|
||||
this.affiliation = affiliation;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
|
||||
package eu.dnetlib.dhp.sx.bio.pubmed;
|
||||
|
||||
public class PMIdentifier {
|
||||
|
||||
private String pid;
|
||||
private String type;
|
||||
|
||||
public PMIdentifier(String pid, String type) {
|
||||
this.pid = cleanPid(pid);
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public PMIdentifier() {
|
||||
|
||||
}
|
||||
|
||||
private String cleanPid(String pid) {
|
||||
|
||||
if (pid == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// clean ORCID ID in the form 0000000163025705 to 0000-0001-6302-5705
|
||||
if (pid.matches("[0-9]{15}[0-9X]")) {
|
||||
return pid.replaceAll("(.{4})(.{4})(.{4})(.{4})", "$1-$2-$3-$4");
|
||||
}
|
||||
|
||||
// clean ORCID in the form http://orcid.org/0000-0001-8567-3543 to 0000-0001-8567-3543
|
||||
if (pid.matches("http://orcid.org/[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}")) {
|
||||
return pid.replaceAll("http://orcid.org/", "");
|
||||
}
|
||||
return pid;
|
||||
}
|
||||
|
||||
public String getPid() {
|
||||
return pid;
|
||||
}
|
||||
|
||||
public PMIdentifier setPid(String pid) {
|
||||
this.pid = cleanPid(pid);
|
||||
return this;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public PMIdentifier setType(String type) {
|
||||
this.type = type;
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -29,6 +29,18 @@
|
|||
"paramDescription": "the path to get the input data from Datacite",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "wip",
|
||||
"paramLongName": "webCrawlInputPath",
|
||||
"paramDescription": "the path to get the input data from Web Crawl",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "pub",
|
||||
"paramLongName": "publisherInputPath",
|
||||
"paramDescription": "the path to get the input data from publishers",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "o",
|
||||
"paramLongName": "outputPath",
|
||||
|
|
|
@ -31,9 +31,11 @@ spark2SqlQueryExecutionListeners=com.cloudera.spark.lineage.NavigatorQueryListen
|
|||
# The following is needed as a property of a workflow
|
||||
oozie.wf.application.path=${oozieTopWfApplicationPath}
|
||||
|
||||
crossrefInputPath=/data/bip-affiliations/crossref-data.json
|
||||
pubmedInputPath=/data/bip-affiliations/pubmed-data.json
|
||||
openapcInputPath=/data/bip-affiliations/openapc-data.json
|
||||
dataciteInputPath=/data/bip-affiliations/datacite-data.json
|
||||
crossrefInputPath=/data/openaire-affiliations/crossref-data.json
|
||||
pubmedInputPath=/data/openaire-affiliations/pubmed-data-v4.json
|
||||
openapcInputPath=/data/openaire-affiliations/openapc-data.json
|
||||
dataciteInputPath=/data/openaire-affiliations/datacite-data.json
|
||||
webCrawlInputPath=/data/openaire-affiliations/webCrawl
|
||||
publisherInputPath=/data/openaire-affiliations/publishers
|
||||
|
||||
outputPath=/tmp/crossref-affiliations-output-v5
|
||||
outputPath=/tmp/affRoAS
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue