Merge remote-tracking branch 'origin/propagateorcid' into propagateorcid

# Conflicts:
#	dhp-common/src/main/java/eu/dnetlib/dhp/common/enrichment/Constants.java
#	dhp-common/src/main/scala/eu/dnetlib/dhp/common/author/SparkEnrichWithOrcidAuthors.scala
#	dhp-common/src/main/scala/eu/dnetlib/dhp/utils/ORCIDAuthorEnricher.scala
#	dhp-workflows/dhp-enrichment/src/main/java/eu/dnetlib/dhp/PropagationConstant.java
#	dhp-workflows/dhp-enrichment/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/OrcidAuthors.java
#	dhp-workflows/dhp-enrichment/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/SparkPropagateOrcidAuthor.java
#	dhp-workflows/dhp-graph-mapper/src/main/scala/eu/dnetlib/dhp/enrich/orcid/SparkEnrichGraphWithOrcidAuthors.scala
This commit is contained in:
Miriam Baglioni 2024-12-18 09:23:21 +01:00
commit 40c002b112
106 changed files with 3061 additions and 761 deletions

1
.gitignore vendored
View File

@ -28,3 +28,4 @@ spark-warehouse
/**/.scalafmt.conf /**/.scalafmt.conf
/.java-version /.java-version
/dhp-shade-package/dependency-reduced-pom.xml /dhp-shade-package/dependency-reduced-pom.xml
/**/job.properties

View File

@ -10,6 +10,11 @@ public class Constants {
public static final Map<String, String> accessRightsCoarMap = Maps.newHashMap(); public static final Map<String, String> accessRightsCoarMap = Maps.newHashMap();
public static final Map<String, String> coarCodeLabelMap = Maps.newHashMap(); public static final Map<String, String> coarCodeLabelMap = Maps.newHashMap();
public static final String RAID_NS_PREFIX = "raid________";
public static final String END_DATE = "endDate";
public static final String START_DATE = "startDate";
public static final String ROR_NS_PREFIX = "ror_________"; public static final String ROR_NS_PREFIX = "ror_________";
public static final String ROR_OPENAIRE_ID = "10|openaire____::993a7ae7a863813cf95028b50708e222"; public static final String ROR_OPENAIRE_ID = "10|openaire____::993a7ae7a863813cf95028b50708e222";

View File

@ -1,3 +1,4 @@
package eu.dnetlib.dhp.common.enrichment; package eu.dnetlib.dhp.common.enrichment;
public class Constants { public class Constants {

View File

@ -2,8 +2,7 @@
package eu.dnetlib.dhp.oa.merge; package eu.dnetlib.dhp.oa.merge;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import static org.apache.spark.sql.functions.col; import static org.apache.spark.sql.functions.*;
import static org.apache.spark.sql.functions.when;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
@ -135,7 +134,9 @@ public class GroupEntitiesSparkJob {
.applyCoarVocabularies(entity, vocs), .applyCoarVocabularies(entity, vocs),
OAFENTITY_KRYO_ENC) OAFENTITY_KRYO_ENC)
.groupByKey((MapFunction<OafEntity, String>) OafEntity::getId, Encoders.STRING()) .groupByKey((MapFunction<OafEntity, String>) OafEntity::getId, Encoders.STRING())
.mapGroups((MapGroupsFunction<String, OafEntity, OafEntity>) MergeUtils::mergeById, OAFENTITY_KRYO_ENC) .mapGroups(
(MapGroupsFunction<String, OafEntity, OafEntity>) (key, group) -> MergeUtils.mergeById(group, vocs),
OAFENTITY_KRYO_ENC)
.map( .map(
(MapFunction<OafEntity, Tuple2<String, OafEntity>>) t -> new Tuple2<>( (MapFunction<OafEntity, Tuple2<String, OafEntity>>) t -> new Tuple2<>(
t.getClass().getName(), t), t.getClass().getName(), t),

View File

@ -2,7 +2,6 @@
package eu.dnetlib.dhp.schema.oaf.utils; package eu.dnetlib.dhp.schema.oaf.utils;
import static eu.dnetlib.dhp.schema.common.ModelConstants.*; import static eu.dnetlib.dhp.schema.common.ModelConstants.*;
import static eu.dnetlib.dhp.schema.common.ModelConstants.OPENAIRE_META_RESOURCE_TYPE;
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance; import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.getProvenance;
import java.net.MalformedURLException; import java.net.MalformedURLException;
@ -696,6 +695,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
} }
} }
// set ORCID_PENDING to all orcid values that are not coming from ORCID provenance
for (Author a : r.getAuthor()) { for (Author a : r.getAuthor()) {
if (Objects.isNull(a.getPid())) { if (Objects.isNull(a.getPid())) {
a.setPid(Lists.newArrayList()); a.setPid(Lists.newArrayList());
@ -752,6 +752,40 @@ public class GraphCleaningFunctions extends CleaningFunctions {
.collect(Collectors.toList())); .collect(Collectors.toList()));
} }
} }
// Identify clashing ORCIDS:that is same ORCID associated to multiple authors in this result
Map<String, Integer> clashing_orcid = new HashMap<>();
for (Author a : r.getAuthor()) {
a
.getPid()
.stream()
.filter(
p -> StringUtils
.contains(StringUtils.lowerCase(p.getQualifier().getClassid()), ORCID_PENDING))
.map(StructuredProperty::getValue)
.distinct()
.forEach(orcid -> clashing_orcid.compute(orcid, (k, v) -> (v == null) ? 1 : v + 1));
}
Set<String> clashing = clashing_orcid
.entrySet()
.stream()
.filter(ee -> ee.getValue() > 1)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
// filter out clashing orcids
for (Author a : r.getAuthor()) {
a
.setPid(
a
.getPid()
.stream()
.filter(p -> !clashing.contains(p.getValue()))
.collect(Collectors.toList()));
}
} }
if (value instanceof Publication) { if (value instanceof Publication) {
@ -810,7 +844,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
return author; return author;
} }
private static Optional<String> cleanDateField(Field<String> dateofacceptance) { public static Optional<String> cleanDateField(Field<String> dateofacceptance) {
return Optional return Optional
.ofNullable(dateofacceptance) .ofNullable(dateofacceptance)
.map(Field::getValue) .map(Field::getValue)

View File

@ -23,24 +23,30 @@ import org.apache.commons.lang3.tuple.Pair;
import com.github.sisyphsu.dateparser.DateParserUtils; import com.github.sisyphsu.dateparser.DateParserUtils;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
import eu.dnetlib.dhp.oa.merge.AuthorMerger; import eu.dnetlib.dhp.oa.merge.AuthorMerger;
import eu.dnetlib.dhp.schema.common.AccessRightComparator; import eu.dnetlib.dhp.schema.common.AccessRightComparator;
import eu.dnetlib.dhp.schema.common.EntityType;
import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.common.ModelSupport; import eu.dnetlib.dhp.schema.common.ModelSupport;
import eu.dnetlib.dhp.schema.oaf.*; import eu.dnetlib.dhp.schema.oaf.*;
public class MergeUtils { public class MergeUtils {
public static <T extends Oaf> T mergeById(String s, Iterator<T> oafEntityIterator) { public static <T extends Oaf> T mergeById(Iterator<T> oafEntityIterator, VocabularyGroup vocs) {
return mergeGroup(s, oafEntityIterator, true); return mergeGroup(oafEntityIterator, true, vocs);
} }
public static <T extends Oaf> T mergeGroup(String s, Iterator<T> oafEntityIterator) { public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator) {
return mergeGroup(s, oafEntityIterator, false); return mergeGroup(oafEntityIterator, false);
} }
public static <T extends Oaf> T mergeGroup(String s, Iterator<T> oafEntityIterator, public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator, boolean checkDelegateAuthority) {
boolean checkDelegateAuthority) { return mergeGroup(oafEntityIterator, checkDelegateAuthority, null);
}
public static <T extends Oaf> T mergeGroup(Iterator<T> oafEntityIterator,
boolean checkDelegateAuthority, VocabularyGroup vocs) {
ArrayList<T> sortedEntities = new ArrayList<>(); ArrayList<T> sortedEntities = new ArrayList<>();
oafEntityIterator.forEachRemaining(sortedEntities::add); oafEntityIterator.forEachRemaining(sortedEntities::add);
@ -49,13 +55,55 @@ public class MergeUtils {
Iterator<T> it = sortedEntities.iterator(); Iterator<T> it = sortedEntities.iterator();
T merged = it.next(); T merged = it.next();
if (!it.hasNext() && merged instanceof Result && vocs != null) {
return enforceResultType(vocs, (Result) merged);
} else {
while (it.hasNext()) { while (it.hasNext()) {
merged = checkedMerge(merged, it.next(), checkDelegateAuthority); merged = checkedMerge(merged, it.next(), checkDelegateAuthority);
} }
}
return merged; return merged;
} }
private static <T extends Oaf> T enforceResultType(VocabularyGroup vocs, Result mergedResult) {
if (Optional.ofNullable(mergedResult.getInstance()).map(List::isEmpty).orElse(true)) {
return (T) mergedResult;
} else {
final Instance i = mergedResult.getInstance().get(0);
if (!vocs.vocabularyExists(ModelConstants.DNET_RESULT_TYPOLOGIES)) {
return (T) mergedResult;
} else {
final String expectedResultType = Optional
.ofNullable(
vocs
.lookupTermBySynonym(
ModelConstants.DNET_RESULT_TYPOLOGIES, i.getInstancetype().getClassid()))
.orElse(ModelConstants.ORP_DEFAULT_RESULTTYPE)
.getClassid();
// there is a clash among the result types
if (!expectedResultType.equals(mergedResult.getResulttype().getClassid())) {
Result result = (Result) Optional
.ofNullable(ModelSupport.oafTypes.get(expectedResultType))
.map(r -> {
try {
return r.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new IllegalStateException(e);
}
})
.orElse(new OtherResearchProduct());
result.setId(mergedResult.getId());
return (T) mergeResultFields(result, mergedResult);
} else {
return (T) mergedResult;
}
}
}
}
public static <T extends Oaf> T checkedMerge(final T left, final T right, boolean checkDelegateAuthority) { public static <T extends Oaf> T checkedMerge(final T left, final T right, boolean checkDelegateAuthority) {
return (T) merge(left, right, checkDelegateAuthority); return (T) merge(left, right, checkDelegateAuthority);
} }
@ -106,7 +154,7 @@ public class MergeUtils {
return mergeSoftware((Software) left, (Software) right); return mergeSoftware((Software) left, (Software) right);
} }
return mergeResultFields((Result) left, (Result) right); return left;
} else if (sameClass(left, right, Datasource.class)) { } else if (sameClass(left, right, Datasource.class)) {
// TODO // TODO
final int trust = compareTrust(left, right); final int trust = compareTrust(left, right);
@ -654,16 +702,9 @@ public class MergeUtils {
} }
private static Field<String> selectOldestDate(Field<String> d1, Field<String> d2) { private static Field<String> selectOldestDate(Field<String> d1, Field<String> d2) {
if (d1 == null || StringUtils.isBlank(d1.getValue())) { if (!GraphCleaningFunctions.cleanDateField(d1).isPresent()) {
return d2; return d2;
} else if (d2 == null || StringUtils.isBlank(d2.getValue())) { } else if (!GraphCleaningFunctions.cleanDateField(d2).isPresent()) {
return d1;
}
if (StringUtils.contains(d1.getValue(), "null")) {
return d2;
}
if (StringUtils.contains(d2.getValue(), "null")) {
return d1; return d1;
} }
@ -715,7 +756,9 @@ public class MergeUtils {
private static String spKeyExtractor(StructuredProperty sp) { private static String spKeyExtractor(StructuredProperty sp) {
return Optional return Optional
.ofNullable(sp) .ofNullable(sp)
.map(s -> Joiner.on("||") .map(
s -> Joiner
.on("||")
.useForNull("") .useForNull("")
.join(qualifierKeyExtractor(s.getQualifier()), s.getValue())) .join(qualifierKeyExtractor(s.getQualifier()), s.getValue()))
.orElse(null); .orElse(null);

View File

@ -25,10 +25,12 @@ abstract class SparkEnrichWithOrcidAuthors(propertyPath: String, args: Array[Str
log.info(s"targetPath is '$targetPath'") log.info(s"targetPath is '$targetPath'")
val workingDir = parser.get("workingDir") val workingDir = parser.get("workingDir")
log.info(s"targetPath is '$workingDir'") log.info(s"targetPath is '$workingDir'")
val classid = Option(parser.get("matchingSource")).map(_=>ModelConstants.ORCID_PENDING).getOrElse(ModelConstants.ORCID) val classid =
Option(parser.get("matchingSource")).map(_ => ModelConstants.ORCID_PENDING).getOrElse(ModelConstants.ORCID)
log.info(s"classid is '$classid'") log.info(s"classid is '$classid'")
val provenance = Option(parser.get("matchingSource")).map(_=>PROPAGATION_DATA_INFO_TYPE).getOrElse("ORCID_ENRICHMENT") val provenance =
Option(parser.get("matchingSource")).map(_ => PROPAGATION_DATA_INFO_TYPE).getOrElse("ORCID_ENRICHMENT")
log.info(s"targetPath is '$workingDir'") log.info(s"targetPath is '$workingDir'")
createTemporaryData(spark, graphPath, orcidPath, workingDir) createTemporaryData(spark, graphPath, orcidPath, workingDir)
@ -75,13 +77,15 @@ abstract class SparkEnrichWithOrcidAuthors(propertyPath: String, args: Array[Str
.filter(e => ModelSupport.isResult(e._1)) .filter(e => ModelSupport.isResult(e._1))
.foreach(e => { .foreach(e => {
val resultType = e._1.name() val resultType = e._1.name()
val c = classid
val p = provenance
spark.read spark.read
.parquet(s"$targetPath/${resultType}_unmatched") .parquet(s"$targetPath/${resultType}_unmatched")
.where("size(graph_authors) > 0") .where("size(graph_authors) > 0")
.as[MatchData](Encoders.bean(classOf[MatchData])) .as[MatchData](Encoders.bean(classOf[MatchData]))
.map(md => { .map(md => {
ORCIDAuthorEnricher.enrichOrcid(md.id, md.graph_authors, md.orcid_authors, classid, provenance) ORCIDAuthorEnricher.enrichOrcid(md.id, md.graph_authors, md.orcid_authors, c, p)
})(Encoders.bean(classOf[ORCIDAuthorEnricherResult])) })(Encoders.bean(classOf[ORCIDAuthorEnricherResult]))
.write .write
.option("compression", "gzip") .option("compression", "gzip")
@ -90,4 +94,3 @@ abstract class SparkEnrichWithOrcidAuthors(propertyPath: String, args: Array[Str
}) })
} }
} }

View File

@ -1,11 +1,12 @@
package eu.dnetlib.dhp.utils package eu.dnetlib.dhp.utils
import java.text.Normalizer
import java.util.Locale import java.util.Locale
import java.util.regex.Pattern import java.util.regex.Pattern
import scala.util.control.Breaks.{break, breakable} import scala.util.control.Breaks.{break, breakable}
object AuthorMatchers { object AuthorMatchers {
val SPLIT_REGEX = Pattern.compile("[\\s,\\.]+") val SPLIT_REGEX = Pattern.compile("[\\s\\p{Punct}\\p{Pd}]+")
val WORD_DIFF = 2 val WORD_DIFF = 2
@ -24,9 +25,16 @@ object AuthorMatchers {
} }
} }
def normalize(s: String): Array[String] = {
SPLIT_REGEX
.split(Normalizer.normalize(s, Normalizer.Form.NFC).toLowerCase(Locale.ROOT))
.filter(_.nonEmpty)
.sorted
}
def matchOrderedTokenAndAbbreviations(a1: String, a2: String): Boolean = { def matchOrderedTokenAndAbbreviations(a1: String, a2: String): Boolean = {
val p1: Array[String] = SPLIT_REGEX.split(a1.trim.toLowerCase(Locale.ROOT)).filter(_.nonEmpty).sorted val p1: Array[String] = normalize(a1)
val p2: Array[String] = SPLIT_REGEX.split(a2.trim.toLowerCase(Locale.ROOT)).filter(_.nonEmpty).sorted val p2: Array[String] = normalize(a2)
if (p1.length < 2 || p2.length < 2) return false if (p1.length < 2 || p2.length < 2) return false
if (Math.abs(p1.length - p2.length) > WORD_DIFF) return false // use alternative comparison algo if (Math.abs(p1.length - p2.length) > WORD_DIFF) return false // use alternative comparison algo

View File

@ -7,6 +7,7 @@ import eu.dnetlib.dhp.schema.sx.OafUtils
import java.util import java.util
import scala.beans.BeanProperty import scala.beans.BeanProperty
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.control.Breaks.{break, breakable} import scala.util.control.Breaks.{break, breakable}
case class OrcidAuthor( case class OrcidAuthor(
@ -53,7 +54,8 @@ object ORCIDAuthorEnricher extends Serializable {
// Author enriching strategy: // Author enriching strategy:
// 1) create a copy of graph author list in unmatched_authors // 1) create a copy of graph author list in unmatched_authors
// 2) find best match in unmatched_authors, remove it from unmatched_authors and enrich it so // 2) find best match in unmatched_authors, remove it from unmatched_authors and enrich it so
// that the enrichment is reflected in graph_authors (they share author instances) // that the enrichment is reflected in graph_authors (they share author instances).
// Do not match in case of ambiguity: two authors match and at least one of them has affiliation string
// 3) repeat (2) till the end of the list and then with different matching algorithms that have decreasing // 3) repeat (2) till the end of the list and then with different matching algorithms that have decreasing
// trust in their output // trust in their output
// At the end unmatched_authors will contain authors not matched with any of the matching algos // At the end unmatched_authors will contain authors not matched with any of the matching algos
@ -87,7 +89,19 @@ object ORCIDAuthorEnricher extends Serializable {
(author, orcid) => (author, orcid) =>
AuthorMatchers AuthorMatchers
.matchOrderedTokenAndAbbreviations(author.getFullname, orcid.givenName + " " + orcid.familyName), .matchOrderedTokenAndAbbreviations(author.getFullname, orcid.givenName + " " + orcid.familyName),
"orderedTokens", "orderedTokens-1",
classid,
provenance,
skipAmbiguities = true
) ++
// split author names in tokens, order the tokens, then check for matches of full tokens or abbreviations
extractAndEnrichMatches(
unmatched_authors,
orcid_authors,
(author, orcid) =>
AuthorMatchers
.matchOrderedTokenAndAbbreviations(author.getFullname, orcid.givenName + " " + orcid.familyName),
"orderedTokens-2",
classid, classid,
provenance provenance
) ++ ) ++
@ -116,28 +130,38 @@ object ORCIDAuthorEnricher extends Serializable {
} }
private def extractAndEnrichMatches( private def extractAndEnrichMatches(
graph_authors: java.util.List[Author], unmatched_authors: java.util.List[Author],
orcid_authors: java.util.List[OrcidAuthor], orcid_authors: java.util.List[OrcidAuthor],
matchingFunc: (Author, OrcidAuthor) => Boolean, matchingFunc: (Author, OrcidAuthor) => Boolean,
matchName: String, matchName: String,
classid: String, classid: String,
provenance : String provenance: String,
) = { skipAmbiguities: Boolean = false
val matched = scala.collection.mutable.ArrayBuffer.empty[MatchedAuthors] ): ArrayBuffer[MatchedAuthors] = {
val matched = ArrayBuffer.empty[MatchedAuthors]
if (graph_authors != null && !graph_authors.isEmpty) { if (unmatched_authors == null || unmatched_authors.isEmpty) {
val ait = graph_authors.iterator return matched
}
while (ait.hasNext) {
val author = ait.next()
val oit = orcid_authors.iterator val oit = orcid_authors.iterator
breakable {
while (oit.hasNext) { while (oit.hasNext) {
val orcid = oit.next() val orcid = oit.next()
val candidates = unmatched_authors.asScala.foldLeft(ArrayBuffer[Author]())((res, author) => {
if (matchingFunc(author, orcid)) { if (matchingFunc(author, orcid)) {
ait.remove() res += author
}
res
})
if (
candidates.size == 1 ||
(candidates.size > 1 && !skipAmbiguities && !candidates
.exists(a => a.getRawAffiliationString != null && !a.getRawAffiliationString.isEmpty))
) {
val author = candidates(0)
unmatched_authors.remove(author)
oit.remove() oit.remove()
matched += MatchedAuthors(author, orcid, matchName) matched += MatchedAuthors(author, orcid, matchName)
@ -146,20 +170,14 @@ object ORCIDAuthorEnricher extends Serializable {
} }
val orcidPID = OafUtils.createSP(orcid.orcid, classid, classid) val orcidPID = OafUtils.createSP(orcid.orcid, classid, classid)
//val orcidPID = OafUtils.createSP(orcid.orcid, ModelConstants.ORCID, ModelConstants.ORCID)
orcidPID.setDataInfo(OafUtils.generateDataInfo()) orcidPID.setDataInfo(OafUtils.generateDataInfo())
orcidPID.getDataInfo.setProvenanceaction( orcidPID.getDataInfo.setProvenanceaction(
//OafUtils.createQualifier("ORCID_ENRICHMENT", "ORCID_ENRICHMENT")
OafUtils.createQualifier(provenance, provenance) OafUtils.createQualifier(provenance, provenance)
) )
author.getPid.add(orcidPID) author.getPid.add(orcidPID)
}
break()
}
}
}
}
} }
matched matched

View File

@ -21,7 +21,7 @@ public class CodeMatch extends AbstractStringComparator {
public CodeMatch(Map<String, String> params) { public CodeMatch(Map<String, String> params) {
super(params); super(params);
this.params = params; this.params = params;
this.CODE_REGEX = Pattern.compile(params.getOrDefault("codeRegex", "[a-zA-Z]::\\d+")); this.CODE_REGEX = Pattern.compile(params.getOrDefault("codeRegex", "[a-zA-Z]+::\\d+"));
} }
public Set<String> getRegexList(String input) { public Set<String> getRegexList(String input) {

View File

@ -0,0 +1,67 @@
package eu.dnetlib.pace.tree;
import java.time.DateTimeException;
import java.time.LocalDate;
import java.time.Period;
import java.time.format.DateTimeFormatter;
import java.util.Locale;
import java.util.Map;
import com.wcohen.ss.AbstractStringDistance;
import eu.dnetlib.pace.config.Config;
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
import eu.dnetlib.pace.tree.support.ComparatorClass;
@ComparatorClass("dateRange")
public class DateRange extends AbstractStringComparator {
int YEAR_RANGE;
public DateRange(Map<String, String> params) {
super(params, new com.wcohen.ss.JaroWinkler());
YEAR_RANGE = Integer.parseInt(params.getOrDefault("year_range", "3"));
}
public DateRange(final double weight) {
super(weight, new com.wcohen.ss.JaroWinkler());
}
protected DateRange(final double weight, final AbstractStringDistance ssalgo) {
super(weight, ssalgo);
}
public static boolean isNumeric(String str) {
return str.matches("\\d+"); // match a number with optional '-' and decimal.
}
@Override
public double distance(final String a, final String b, final Config conf) {
if (a.isEmpty() || b.isEmpty()) {
return -1.0; // return -1 if a field is missing
}
try {
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ENGLISH);
LocalDate d1 = LocalDate.parse(a, formatter);
LocalDate d2 = LocalDate.parse(b, formatter);
Period period = Period.between(d1, d2);
return period.getYears() <= YEAR_RANGE ? 1.0 : 0.0;
} catch (DateTimeException e) {
return -1.0;
}
}
@Override
public double getWeight() {
return super.weight;
}
@Override
protected double normalize(final double d) {
return d;
}
}

View File

@ -41,21 +41,38 @@ public class JsonListMatch extends AbstractListComparator {
return -1; return -1;
} }
final Set<String> ca = sa.stream().map(this::toComparableString).collect(Collectors.toSet()); Set<String> ca = sa.stream().map(this::toComparableString).collect(Collectors.toSet());
final Set<String> cb = sb.stream().map(this::toComparableString).collect(Collectors.toSet()); Set<String> cb = sb.stream().map(this::toComparableString).collect(Collectors.toSet());
switch (MODE) {
case "count":
return Sets.intersection(ca, cb).size();
case "percentage":
int incommon = Sets.intersection(ca, cb).size(); int incommon = Sets.intersection(ca, cb).size();
int simDiff = Sets.symmetricDifference(ca, cb).size(); int simDiff = Sets.symmetricDifference(ca, cb).size();
if (incommon + simDiff == 0) { if (incommon + simDiff == 0) {
return 0.0; return 0.0;
} }
if (MODE.equals("percentage"))
return (double) incommon / (incommon + simDiff); return (double) incommon / (incommon + simDiff);
else
return incommon;
case "type":
Set<String> typesA = ca.stream().map(s -> s.split("::")[0]).collect(Collectors.toSet());
Set<String> typesB = cb.stream().map(s -> s.split("::")[0]).collect(Collectors.toSet());
Set<String> types = Sets.intersection(typesA, typesB);
if (types.isEmpty()) // if no common type, it is impossible to compare
return -1;
ca = ca.stream().filter(s -> types.contains(s.split("::")[0])).collect(Collectors.toSet());
cb = cb.stream().filter(s -> types.contains(s.split("::")[0])).collect(Collectors.toSet());
return (double) Sets.intersection(ca, cb).size() / types.size();
default:
return -1;
}
} }
// converts every json into a comparable string basing on parameters // converts every json into a comparable string basing on parameters

View File

@ -65,6 +65,43 @@ public class ComparatorTest extends AbstractPaceTest {
} }
@Test
public void datasetVersionCodeMatchTest() {
params.put("codeRegex", "(?=[\\w-]*[a-zA-Z])(?=[\\w-]*\\d)[\\w-]+");
CodeMatch codeMatch = new CodeMatch(params);
// names have different codes
assertEquals(
0.0,
codeMatch
.distance(
"physical oceanography at ctd station june 1998 ev02a",
"physical oceanography at ctd station june 1998 ir02", conf));
// names have same code
assertEquals(
1.0,
codeMatch
.distance(
"physical oceanography at ctd station june 1998 ev02a",
"physical oceanography at ctd station june 1998 ev02a", conf));
// code is not in both names
assertEquals(
-1,
codeMatch
.distance(
"physical oceanography at ctd station june 1998",
"physical oceanography at ctd station june 1998 ev02a", conf));
assertEquals(
1.0,
codeMatch
.distance(
"physical oceanography at ctd station june 1998", "physical oceanography at ctd station june 1998",
conf));
}
@Test @Test
public void listContainsMatchTest() { public void listContainsMatchTest() {
@ -257,15 +294,15 @@ public class ComparatorTest extends AbstractPaceTest {
List<String> a = createFieldList( List<String> a = createFieldList(
Arrays Arrays
.asList( .asList(
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"doi\",\"classname\":\"Digital Object Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"10.1111/pbi.12655\"}"), "{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"grid\",\"classname\":\"GRID Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"grid_1\"}",
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":null,\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:actionset\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"ror\",\"classname\":\"Research Organization Registry\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"ror_1\"}"),
"authors"); "authors");
List<String> b = createFieldList( List<String> b = createFieldList(
Arrays Arrays
.asList( .asList(
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"pmc\",\"classname\":\"PubMed Central ID\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"PMC5399005\"}", "{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"grid\",\"classname\":\"GRID Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"grid_1\"}",
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"pmid\",\"classname\":\"PubMed ID\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"27775869\"}", "{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"ror\",\"classname\":\"Research Organization Registry\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"ror_2\"}",
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"user:claim\",\"classname\":\"Linked by user\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"doi\",\"classname\":\"Digital Object Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"10.1111/pbi.12655\"}", "{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"user:claim\",\"classname\":\"Linked by user\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"isni\",\"classname\":\"ISNI Identifier\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"isni_1\"}"),
"{\"datainfo\":{\"deletedbyinference\":false,\"inferenceprovenance\":\"\",\"inferred\":false,\"invisible\":false,\"provenanceaction\":{\"classid\":\"sysimport:crosswalk:repository\",\"classname\":\"Harvested\",\"schemeid\":\"dnet:provenanceActions\",\"schemename\":\"dnet:provenanceActions\"},\"trust\":\"0.9\"},\"qualifier\":{\"classid\":\"handle\",\"classname\":\"Handle\",\"schemeid\":\"dnet:pid_types\",\"schemename\":\"dnet:pid_types\"},\"value\":\"1854/LU-8523529\"}"),
"authors"); "authors");
double result = jsonListMatch.compare(a, b, conf); double result = jsonListMatch.compare(a, b, conf);
@ -277,6 +314,13 @@ public class ComparatorTest extends AbstractPaceTest {
result = jsonListMatch.compare(a, b, conf); result = jsonListMatch.compare(a, b, conf);
assertEquals(1.0, result); assertEquals(1.0, result);
params.put("mode", "type");
jsonListMatch = new JsonListMatch(params);
result = jsonListMatch.compare(a, b, conf);
assertEquals(0.5, result);
} }
@Test @Test
@ -327,6 +371,24 @@ public class ComparatorTest extends AbstractPaceTest {
} }
@Test
public void dateMatch() {
DateRange dateRange = new DateRange(params);
double result = dateRange.distance("2021-05-13", "2023-05-13", conf);
assertEquals(1.0, result);
result = dateRange.distance("2021-05-13", "2025-05-13", conf);
assertEquals(0.0, result);
result = dateRange.distance("", "2020-05-05", conf);
assertEquals(-1.0, result);
result = dateRange.distance("invalid date", "2021-05-02", conf);
assertEquals(-1.0, result);
}
@Test @Test
public void titleVersionMatchTest() { public void titleVersionMatchTest() {

View File

@ -26,16 +26,16 @@
<dependencies> <dependencies>
<dependency>
<groupId>eu.dnetlib.dhp</groupId>
<artifactId>dhp-actionmanager</artifactId>
<version>${project.version}</version>
</dependency>
<!-- <dependency>--> <!-- <dependency>-->
<!-- <groupId>eu.dnetlib.dhp</groupId>--> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<!-- <artifactId>dhp-aggregation</artifactId>--> <!-- <artifactId>dhp-actionmanager</artifactId>-->
<!-- <version>${project.version}</version>--> <!-- <version>${project.version}</version>-->
<!-- </dependency>--> <!-- </dependency>-->
<dependency>
<groupId>eu.dnetlib.dhp</groupId>
<artifactId>dhp-aggregation</artifactId>
<version>${project.version}</version>
</dependency>
<!-- <dependency>--> <!-- <dependency>-->
<!-- <groupId>eu.dnetlib.dhp</groupId>--> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<!-- <artifactId>dhp-blacklist</artifactId>--> <!-- <artifactId>dhp-blacklist</artifactId>-->
@ -56,61 +56,61 @@
<!-- <artifactId>dhp-enrichment</artifactId>--> <!-- <artifactId>dhp-enrichment</artifactId>-->
<!-- <version>${project.version}</version>--> <!-- <version>${project.version}</version>-->
<!-- </dependency>--> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-graph-mapper</artifactId> <!-- <artifactId>dhp-graph-mapper</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-graph-provision</artifactId> <!-- <artifactId>dhp-graph-provision</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-impact-indicators</artifactId> <!-- <artifactId>dhp-impact-indicators</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-stats-actionsets</artifactId> <!-- <artifactId>dhp-stats-actionsets</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-stats-hist-snaps</artifactId> <!-- <artifactId>dhp-stats-hist-snaps</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-stats-monitor-irish</artifactId> <!-- <artifactId>dhp-stats-monitor-irish</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-stats-promote</artifactId> <!-- <artifactId>dhp-stats-promote</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-stats-update</artifactId> <!-- <artifactId>dhp-stats-update</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-swh</artifactId> <!-- <artifactId>dhp-swh</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-usage-raw-data-update</artifactId> <!-- <artifactId>dhp-usage-raw-data-update</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
<dependency> <!-- <dependency>-->
<groupId>eu.dnetlib.dhp</groupId> <!-- <groupId>eu.dnetlib.dhp</groupId>-->
<artifactId>dhp-usage-stats-build</artifactId> <!-- <artifactId>dhp-usage-stats-build</artifactId>-->
<version>${project.version}</version> <!-- <version>${project.version}</version>-->
</dependency> <!-- </dependency>-->
</dependencies> </dependencies>

View File

@ -135,22 +135,10 @@
<arg>--outputPath</arg><arg>${workingDir}/action_payload_by_type</arg> <arg>--outputPath</arg><arg>${workingDir}/action_payload_by_type</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg> <arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
</spark> </spark>
<ok to="ForkPromote"/> <ok to="PromoteActionPayloadForDatasetTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<fork name="ForkPromote">
<path start="PromoteActionPayloadForDatasetTable"/>
<path start="PromoteActionPayloadForDatasourceTable"/>
<path start="PromoteActionPayloadForOrganizationTable"/>
<path start="PromoteActionPayloadForOtherResearchProductTable"/>
<path start="PromoteActionPayloadForProjectTable"/>
<path start="PromoteActionPayloadForPublicationTable"/>
<path start="PromoteActionPayloadForRelationTable"/>
<path start="PromoteActionPayloadForSoftwareTable"/>
<path start="PromoteActionPayloadForPersonTable"/>
</fork>
<action name="PromoteActionPayloadForDatasetTable"> <action name="PromoteActionPayloadForDatasetTable">
<sub-workflow> <sub-workflow>
<app-path>${wf:appPath()}/promote_action_payload_for_dataset_table</app-path> <app-path>${wf:appPath()}/promote_action_payload_for_dataset_table</app-path>
@ -162,7 +150,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForDatasourceTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -177,7 +165,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForOrganizationTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -192,7 +180,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForOtherResearchProductTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -207,7 +195,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForProjectTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -222,7 +210,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForPublicationTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -237,7 +225,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForRelationTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -252,7 +240,7 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="PromoteActionPayloadForSoftwareTable"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
@ -267,26 +255,9 @@
</property> </property>
</configuration> </configuration>
</sub-workflow> </sub-workflow>
<ok to="JoinPromote"/> <ok to="End"/>
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="PromoteActionPayloadForPersonTable">
<sub-workflow>
<app-path>${wf:appPath()}/promote_action_payload_for_person_table</app-path>
<propagate-configuration/>
<configuration>
<property>
<name>inputActionPayloadRootPath</name>
<value>${workingDir}/action_payload_by_type</value>
</property>
</configuration>
</sub-workflow>
<ok to="JoinPromote"/>
<error to="Kill"/>
</action>
<join name="JoinPromote" to="End"/>
<end name="End"/> <end name="End"/>
</workflow-app> </workflow-app>

View File

@ -13,6 +13,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport; import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.Instance;
import eu.dnetlib.dhp.schema.oaf.Qualifier;
import eu.dnetlib.dhp.schema.oaf.StructuredProperty; import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
import eu.dnetlib.dhp.schema.oaf.Subject; import eu.dnetlib.dhp.schema.oaf.Subject;
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils; import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;

View File

@ -104,22 +104,22 @@ public class PrepareAffiliationRelations implements Serializable {
.listKeyValues(OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME); .listKeyValues(OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
JavaPairRDD<Text, Text> crossrefRelations = prepareAffiliationRelationsNewModel( JavaPairRDD<Text, Text> crossrefRelations = prepareAffiliationRelationsNewModel(
spark, crossrefInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::crossref"); spark, crossrefInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":crossref");
JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations( JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations(
spark, pubmedInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::pubmed"); spark, pubmedInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":pubmed");
JavaPairRDD<Text, Text> openAPCRelations = prepareAffiliationRelationsNewModel( JavaPairRDD<Text, Text> openAPCRelations = prepareAffiliationRelationsNewModel(
spark, openapcInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::openapc"); spark, openapcInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":openapc");
JavaPairRDD<Text, Text> dataciteRelations = prepareAffiliationRelationsNewModel( JavaPairRDD<Text, Text> dataciteRelations = prepareAffiliationRelationsNewModel(
spark, dataciteInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::datacite"); spark, dataciteInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":datacite");
JavaPairRDD<Text, Text> webCrawlRelations = prepareAffiliationRelationsNewModel( JavaPairRDD<Text, Text> webCrawlRelations = prepareAffiliationRelationsNewModel(
spark, webcrawlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::rawaff"); spark, webcrawlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":rawaff");
JavaPairRDD<Text, Text> publisherRelations = prepareAffiliationRelationFromPublisherNewModel( JavaPairRDD<Text, Text> publisherRelations = prepareAffiliationRelationFromPublisherNewModel(
spark, publisherlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + "::webcrawl"); spark, publisherlInputPath, collectedfromOpenAIRE, BIP_INFERENCE_PROVENANCE + ":webcrawl");
crossrefRelations crossrefRelations
.union(pubmedRelations) .union(pubmedRelations)

View File

@ -15,6 +15,7 @@ import java.util.stream.Collectors;
import org.apache.commons.cli.ParseException; import org.apache.commons.cli.ParseException;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -29,7 +30,6 @@ import org.apache.spark.sql.Dataset;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.spark_project.jetty.util.StringUtil;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
@ -193,8 +193,8 @@ public class ExtractPerson implements Serializable {
private static Relation getProjectRelation(String project, String orcid, String role) { private static Relation getProjectRelation(String project, String orcid, String role) {
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid); String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid);
String target = PROJECT_ID_PREFIX + project.substring(0, 14) String target = PROJECT_ID_PREFIX + StringUtils.substringBefore(project, "::") + "::"
+ IdentifierFactory.md5(project.substring(15)); + IdentifierFactory.md5(StringUtils.substringAfter(project, "::"));
List<KeyValue> properties = new ArrayList<>(); List<KeyValue> properties = new ArrayList<>();
Relation relation = OafMapperUtils Relation relation = OafMapperUtils
@ -206,7 +206,7 @@ public class ExtractPerson implements Serializable {
null); null);
relation.setValidated(true); relation.setValidated(true);
if (StringUtil.isNotBlank(role)) { if (StringUtils.isNotBlank(role)) {
KeyValue kv = new KeyValue(); KeyValue kv = new KeyValue();
kv.setKey("role"); kv.setKey("role");
kv.setValue(role); kv.setValue(role);
@ -345,7 +345,20 @@ public class ExtractPerson implements Serializable {
OafMapperUtils OafMapperUtils
.structuredProperty( .structuredProperty(
op.getOrcid(), ModelConstants.ORCID, ModelConstants.ORCID_CLASSNAME, op.getOrcid(), ModelConstants.ORCID, ModelConstants.ORCID_CLASSNAME,
ModelConstants.DNET_PID_TYPES, ModelConstants.DNET_PID_TYPES, null)); ModelConstants.DNET_PID_TYPES, ModelConstants.DNET_PID_TYPES,
OafMapperUtils
.dataInfo(
false,
null,
false,
false,
OafMapperUtils
.qualifier(
ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY,
ModelConstants.SYSIMPORT_CROSSWALK_ENTITYREGISTRY,
ModelConstants.DNET_PID_TYPES,
ModelConstants.DNET_PID_TYPES),
"0.91")));
person.setDateofcollection(op.getLastModifiedDate()); person.setDateofcollection(op.getLastModifiedDate());
person.setOriginalId(Arrays.asList(op.getOrcid())); person.setOriginalId(Arrays.asList(op.getOrcid()));
person.setDataInfo(ORCIDDATAINFO); person.setDataInfo(ORCIDDATAINFO);
@ -439,13 +452,13 @@ public class ExtractPerson implements Serializable {
null); null);
relation.setValidated(true); relation.setValidated(true);
if (Optional.ofNullable(row.getStartDate()).isPresent() && StringUtil.isNotBlank(row.getStartDate())) { if (Optional.ofNullable(row.getStartDate()).isPresent() && StringUtils.isNotBlank(row.getStartDate())) {
KeyValue kv = new KeyValue(); KeyValue kv = new KeyValue();
kv.setKey("startDate"); kv.setKey("startDate");
kv.setValue(row.getStartDate()); kv.setValue(row.getStartDate());
properties.add(kv); properties.add(kv);
} }
if (Optional.ofNullable(row.getEndDate()).isPresent() && StringUtil.isNotBlank(row.getEndDate())) { if (Optional.ofNullable(row.getEndDate()).isPresent() && StringUtils.isNotBlank(row.getEndDate())) {
KeyValue kv = new KeyValue(); KeyValue kv = new KeyValue();
kv.setKey("endDate"); kv.setKey("endDate");
kv.setValue(row.getEndDate()); kv.setValue(row.getEndDate());

View File

@ -0,0 +1,203 @@
package eu.dnetlib.dhp.actionmanager.raid;
import static eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson.OPENAIRE_DATASOURCE_ID;
import static eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson.OPENAIRE_DATASOURCE_NAME;
import static eu.dnetlib.dhp.common.Constants.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import static eu.dnetlib.dhp.schema.common.ModelConstants.*;
import static eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils.*;
import java.util.*;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.actionmanager.raid.model.RAiDEntity;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.*;
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
import eu.dnetlib.dhp.utils.DHPUtils;
import scala.Tuple2;
public class GenerateRAiDActionSetJob {
private static final Logger log = LoggerFactory
.getLogger(eu.dnetlib.dhp.actionmanager.raid.GenerateRAiDActionSetJob.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final List<KeyValue> RAID_COLLECTED_FROM = listKeyValues(
OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
private static final Qualifier RAID_QUALIFIER = qualifier(
"0049", "Research Activity Identifier", DNET_PUBLICATION_RESOURCE, DNET_PUBLICATION_RESOURCE);
private static final Qualifier RAID_INFERENCE_QUALIFIER = qualifier(
"raid:openaireinference", "Inferred by OpenAIRE", DNET_PROVENANCE_ACTIONS, DNET_PROVENANCE_ACTIONS);
private static final DataInfo RAID_DATA_INFO = dataInfo(
false, OPENAIRE_DATASOURCE_NAME, true, false, RAID_INFERENCE_QUALIFIER, "0.92");
public static void main(final String[] args) throws Exception {
final String jsonConfiguration = IOUtils
.toString(
eu.dnetlib.dhp.actionmanager.raid.GenerateRAiDActionSetJob.class
.getResourceAsStream("/eu/dnetlib/dhp/actionmanager/raid/action_set_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
final Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final String inputPath = parser.get("inputPath");
log.info("inputPath: {}", inputPath);
final String outputPath = parser.get("outputPath");
log.info("outputPath {}: ", outputPath);
final SparkConf conf = new SparkConf();
runWithSparkSession(conf, isSparkSessionManaged, spark -> {
removeOutputDir(spark, outputPath);
processRAiDEntities(spark, inputPath, outputPath);
});
}
private static void removeOutputDir(final SparkSession spark, final String path) {
HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration());
}
static void processRAiDEntities(final SparkSession spark,
final String inputPath,
final String outputPath) {
readInputPath(spark, inputPath)
.map(GenerateRAiDActionSetJob::prepareRAiD)
.flatMap(List::iterator)
.mapToPair(
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
}
protected static List<AtomicAction<? extends Oaf>> prepareRAiD(final RAiDEntity r) {
final Date now = new Date();
final OtherResearchProduct orp = new OtherResearchProduct();
final List<AtomicAction<? extends Oaf>> res = new ArrayList<>();
String raidId = calculateOpenaireId(r.getRaid());
orp.setId(raidId);
orp.setCollectedfrom(RAID_COLLECTED_FROM);
orp.setDataInfo(RAID_DATA_INFO);
orp
.setTitle(
Collections
.singletonList(
structuredProperty(
r.getTitle(),
qualifier("main title", "main title", DNET_DATACITE_TITLE, DNET_DATACITE_TITLE),
RAID_DATA_INFO)));
orp.setDescription(listFields(RAID_DATA_INFO, r.getSummary()));
Instance instance = new Instance();
instance.setInstancetype(RAID_QUALIFIER);
orp.setInstance(Collections.singletonList(instance));
orp
.setSubject(
r
.getSubjects()
.stream()
.map(
s -> subject(
s,
qualifier(
DNET_SUBJECT_KEYWORD, DNET_SUBJECT_KEYWORD, DNET_SUBJECT_TYPOLOGIES,
DNET_SUBJECT_TYPOLOGIES),
RAID_DATA_INFO))
.collect(Collectors.toList()));
orp
.setRelevantdate(
Arrays
.asList(
structuredProperty(
r.getEndDate(), qualifier(END_DATE, END_DATE, DNET_DATACITE_DATE, DNET_DATACITE_DATE),
RAID_DATA_INFO),
structuredProperty(
r.getStartDate(),
qualifier(START_DATE, START_DATE, DNET_DATACITE_DATE, DNET_DATACITE_DATE),
RAID_DATA_INFO)));
orp.setLastupdatetimestamp(now.getTime());
orp.setDateofacceptance(field(r.getStartDate(), RAID_DATA_INFO));
res.add(new AtomicAction<>(OtherResearchProduct.class, orp));
for (String resultId : r.getIds()) {
Relation rel1 = OafMapperUtils
.getRelation(
raidId,
resultId,
ModelConstants.RESULT_RESULT,
PART,
HAS_PART,
orp);
Relation rel2 = OafMapperUtils
.getRelation(
resultId,
raidId,
ModelConstants.RESULT_RESULT,
PART,
IS_PART_OF,
orp);
res.add(new AtomicAction<>(Relation.class, rel1));
res.add(new AtomicAction<>(Relation.class, rel2));
}
return res;
}
public static String calculateOpenaireId(final String raid) {
return String.format("50|%s::%s", RAID_NS_PREFIX, DHPUtils.md5(raid));
}
public static List<Author> createAuthors(final List<String> author) {
return author.stream().map(s -> {
Author a = new Author();
a.setFullname(s);
return a;
}).collect(Collectors.toList());
}
private static JavaRDD<RAiDEntity> readInputPath(
final SparkSession spark,
final String path) {
return spark
.read()
.json(path)
.as(Encoders.bean(RAiDEntity.class))
.toJavaRDD();
}
}

View File

@ -0,0 +1,5 @@
package eu.dnetlib.dhp.actionmanager.raid.model;
public class GenerateRAiDActionSetJob {
}

View File

@ -0,0 +1,106 @@
package eu.dnetlib.dhp.actionmanager.raid.model;
import java.io.Serializable;
import java.util.List;
public class RAiDEntity implements Serializable {
String raid;
List<String> authors;
String startDate;
String endDate;
List<String> subjects;
List<String> titles;
List<String> ids;
String title;
String summary;
public RAiDEntity() {
}
public RAiDEntity(String raid, List<String> authors, String startDate, String endDate, List<String> subjects,
List<String> titles, List<String> ids, String title, String summary) {
this.raid = raid;
this.authors = authors;
this.startDate = startDate;
this.endDate = endDate;
this.subjects = subjects;
this.titles = titles;
this.ids = ids;
this.title = title;
this.summary = summary;
}
public String getRaid() {
return raid;
}
public void setRaid(String raid) {
this.raid = raid;
}
public List<String> getAuthors() {
return authors;
}
public void setAuthors(List<String> authors) {
this.authors = authors;
}
public String getStartDate() {
return startDate;
}
public void setStartDate(String startDate) {
this.startDate = startDate;
}
public String getEndDate() {
return endDate;
}
public void setEndDate(String endDate) {
this.endDate = endDate;
}
public List<String> getSubjects() {
return subjects;
}
public void setSubjects(List<String> subjects) {
this.subjects = subjects;
}
public List<String> getTitles() {
return titles;
}
public void setTitles(List<String> titles) {
this.titles = titles;
}
public List<String> getIds() {
return ids;
}
public void setIds(List<String> ids) {
this.ids = ids;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getSummary() {
return summary;
}
public void setSummary(String summary) {
this.summary = summary;
}
}

View File

@ -44,13 +44,7 @@ import eu.dnetlib.dhp.common.Constants;
import eu.dnetlib.dhp.common.HdfsSupport; import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.schema.action.AtomicAction; import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.DataInfo; import eu.dnetlib.dhp.schema.oaf.*;
import eu.dnetlib.dhp.schema.oaf.Field;
import eu.dnetlib.dhp.schema.oaf.KeyValue;
import eu.dnetlib.dhp.schema.oaf.Oaf;
import eu.dnetlib.dhp.schema.oaf.Organization;
import eu.dnetlib.dhp.schema.oaf.Qualifier;
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
import eu.dnetlib.dhp.utils.DHPUtils; import eu.dnetlib.dhp.utils.DHPUtils;
import scala.Tuple2; import scala.Tuple2;

View File

@ -28,6 +28,7 @@ import eu.dnetlib.dhp.collection.plugin.mongodb.MongoDbDumpCollectorPlugin;
import eu.dnetlib.dhp.collection.plugin.oai.OaiCollectorPlugin; import eu.dnetlib.dhp.collection.plugin.oai.OaiCollectorPlugin;
import eu.dnetlib.dhp.collection.plugin.osf.OsfPreprintsCollectorPlugin; import eu.dnetlib.dhp.collection.plugin.osf.OsfPreprintsCollectorPlugin;
import eu.dnetlib.dhp.collection.plugin.rest.RestCollectorPlugin; import eu.dnetlib.dhp.collection.plugin.rest.RestCollectorPlugin;
import eu.dnetlib.dhp.collection.plugin.zenodo.CollectZenodoDumpCollectorPlugin;
import eu.dnetlib.dhp.common.aggregation.AggregatorReport; import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
import eu.dnetlib.dhp.common.collection.CollectorException; import eu.dnetlib.dhp.common.collection.CollectorException;
import eu.dnetlib.dhp.common.collection.HttpClientParams; import eu.dnetlib.dhp.common.collection.HttpClientParams;
@ -129,6 +130,8 @@ public class CollectorWorker extends ReportingJob {
return new Gtr2PublicationsCollectorPlugin(this.clientParams); return new Gtr2PublicationsCollectorPlugin(this.clientParams);
case osfPreprints: case osfPreprints:
return new OsfPreprintsCollectorPlugin(this.clientParams); return new OsfPreprintsCollectorPlugin(this.clientParams);
case zenodoDump:
return new CollectZenodoDumpCollectorPlugin();
case other: case other:
final CollectorPlugin.NAME.OTHER_NAME plugin = Optional final CollectorPlugin.NAME.OTHER_NAME plugin = Optional
.ofNullable(this.api.getParams().get("other_plugin_type")) .ofNullable(this.api.getParams().get("other_plugin_type"))

View File

@ -154,7 +154,6 @@ public class ORCIDExtractor extends Thread {
extractedItem++; extractedItem++;
if (extractedItem % 100000 == 0) { if (extractedItem % 100000 == 0) {
log.info("Thread {}: Extracted {} items", id, extractedItem); log.info("Thread {}: Extracted {} items", id, extractedItem);
break;
} }
} }
} }

View File

@ -11,7 +11,7 @@ public interface CollectorPlugin {
enum NAME { enum NAME {
oai, other, rest_json2xml, file, fileGzip, baseDump, gtr2Publications, osfPreprints; oai, other, rest_json2xml, file, fileGzip, baseDump, gtr2Publications, osfPreprints, zenodoDump, research_fi;
public enum OTHER_NAME { public enum OTHER_NAME {
mdstore_mongodb_dump, mdstore_mongodb mdstore_mongodb_dump, mdstore_mongodb

View File

@ -1,6 +1,9 @@
package eu.dnetlib.dhp.collection.plugin.gtr2; package eu.dnetlib.dhp.collection.plugin.gtr2;
import java.nio.charset.StandardCharsets;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
@ -16,9 +19,6 @@ import org.dom4j.Document;
import org.dom4j.DocumentException; import org.dom4j.DocumentException;
import org.dom4j.DocumentHelper; import org.dom4j.DocumentHelper;
import org.dom4j.Element; import org.dom4j.Element;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -33,7 +33,7 @@ public class Gtr2PublicationsIterator implements Iterator<String> {
private static final Logger log = LoggerFactory.getLogger(Gtr2PublicationsIterator.class); private static final Logger log = LoggerFactory.getLogger(Gtr2PublicationsIterator.class);
private final HttpConnector2 connector; private final HttpConnector2 connector;
private static final DateTimeFormatter simpleDateTimeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd"); private static final DateTimeFormatter simpleDateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
private static final int MAX_ATTEMPTS = 10; private static final int MAX_ATTEMPTS = 10;
@ -41,7 +41,7 @@ public class Gtr2PublicationsIterator implements Iterator<String> {
private int currPage; private int currPage;
private int endPage; private int endPage;
private boolean incremental = false; private boolean incremental = false;
private DateTime fromDate; private LocalDate fromDate;
private final Map<String, String> cache = new HashMap<>(); private final Map<String, String> cache = new HashMap<>();
@ -188,28 +188,28 @@ public class Gtr2PublicationsIterator implements Iterator<String> {
private Document loadURL(final String cleanUrl, final int attempt) { private Document loadURL(final String cleanUrl, final int attempt) {
try { try {
log.debug(" * Downloading Url: " + cleanUrl); log.debug(" * Downloading Url: {}", cleanUrl);
final byte[] bytes = this.connector.getInputSource(cleanUrl).getBytes("UTF-8"); final byte[] bytes = this.connector.getInputSource(cleanUrl).getBytes(StandardCharsets.UTF_8);
return DocumentHelper.parseText(new String(bytes)); return DocumentHelper.parseText(new String(bytes));
} catch (final Throwable e) { } catch (final Throwable e) {
log.error("Error dowloading url: " + cleanUrl + ", attempt = " + attempt, e); log.error("Error dowloading url: {}, attempt = {}", cleanUrl, attempt, e);
if (attempt >= MAX_ATTEMPTS) { if (attempt >= MAX_ATTEMPTS) {
throw new RuntimeException("Error dowloading url: " + cleanUrl, e); throw new RuntimeException("Error downloading url: " + cleanUrl, e);
} }
try { try {
Thread.sleep(60000); // I wait for a minute Thread.sleep(60000); // I wait for a minute
} catch (final InterruptedException e1) { } catch (final InterruptedException e1) {
throw new RuntimeException("Error dowloading url: " + cleanUrl, e); throw new RuntimeException("Error downloading url: " + cleanUrl, e);
} }
return loadURL(cleanUrl, attempt + 1); return loadURL(cleanUrl, attempt + 1);
} }
} }
private DateTime parseDate(final String s) { private LocalDate parseDate(final String s) {
return DateTime.parse(s.contains("T") ? s.substring(0, s.indexOf("T")) : s, simpleDateTimeFormatter); return LocalDate.parse(s.contains("T") ? s.substring(0, s.indexOf("T")) : s, simpleDateTimeFormatter);
} }
private boolean isAfter(final String d, final DateTime fromDate) { private boolean isAfter(final String d, final LocalDate fromDate) {
return StringUtils.isNotBlank(d) && parseDate(d).isAfter(fromDate); return StringUtils.isNotBlank(d) && parseDate(d).isAfter(fromDate);
} }
} }

View File

@ -6,7 +6,7 @@ import java.util.Queue;
import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.PriorityBlockingQueue;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.math.NumberUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.http.Header; import org.apache.http.Header;
@ -27,27 +27,27 @@ public class ResearchFiIterator implements Iterator<String> {
private final String baseUrl; private final String baseUrl;
private final String authToken; private final String authToken;
private int currPage; private String nextUrl;
private int nPages; private int nCalls = 0;
private final Queue<String> queue = new PriorityBlockingQueue<>(); private final Queue<String> queue = new PriorityBlockingQueue<>();
public ResearchFiIterator(final String baseUrl, final String authToken) { public ResearchFiIterator(final String baseUrl, final String authToken) {
this.baseUrl = baseUrl; this.baseUrl = baseUrl;
this.authToken = authToken; this.authToken = authToken;
this.currPage = 0; this.nextUrl = null;
this.nPages = 0;
} }
private void verifyStarted() { private void verifyStarted() {
if (this.currPage == 0) {
try { try {
nextCall(); if (this.nCalls == 0) {
this.nextUrl = invokeUrl(this.baseUrl);
}
} catch (final CollectorException e) { } catch (final CollectorException e) {
throw new IllegalStateException(e); throw new IllegalStateException(e);
} }
} }
}
@Override @Override
public boolean hasNext() { public boolean hasNext() {
@ -62,9 +62,9 @@ public class ResearchFiIterator implements Iterator<String> {
synchronized (this.queue) { synchronized (this.queue) {
verifyStarted(); verifyStarted();
final String res = this.queue.poll(); final String res = this.queue.poll();
while (this.queue.isEmpty() && (this.currPage < this.nPages)) { while (this.queue.isEmpty() && StringUtils.isNotBlank(this.nextUrl)) {
try { try {
nextCall(); this.nextUrl = invokeUrl(this.nextUrl);
} catch (final CollectorException e) { } catch (final CollectorException e) {
throw new IllegalStateException(e); throw new IllegalStateException(e);
} }
@ -73,18 +73,11 @@ public class ResearchFiIterator implements Iterator<String> {
} }
} }
private void nextCall() throws CollectorException { private String invokeUrl(final String url) throws CollectorException {
this.currPage += 1; this.nCalls += 1;
String next = null;
final String url;
if (!this.baseUrl.contains("?")) {
url = String.format("%s?PageNumber=%d&PageSize=%d", this.baseUrl, this.currPage, PAGE_SIZE);
} else if (!this.baseUrl.contains("PageSize=")) {
url = String.format("%s&PageNumber=%d&PageSize=%d", this.baseUrl, this.currPage, PAGE_SIZE);
} else {
url = String.format("%s&PageNumber=%d", this.baseUrl, this.currPage);
}
log.info("Calling url: " + url); log.info("Calling url: " + url);
try (final CloseableHttpClient client = HttpClients.createDefault()) { try (final CloseableHttpClient client = HttpClients.createDefault()) {
@ -94,11 +87,15 @@ public class ResearchFiIterator implements Iterator<String> {
try (final CloseableHttpResponse response = client.execute(req)) { try (final CloseableHttpResponse response = client.execute(req)) {
for (final Header header : response.getAllHeaders()) { for (final Header header : response.getAllHeaders()) {
log.debug("HEADER: " + header.getName() + " = " + header.getValue()); log.debug("HEADER: " + header.getName() + " = " + header.getValue());
if ("x-page-count".equals(header.getName())) { if ("link".equals(header.getName())) {
final int totalPages = NumberUtils.toInt(header.getValue()); final String s = StringUtils.substringBetween(header.getValue(), "<", ">");
if (this.nPages != totalPages) { final String token = StringUtils
this.nPages = NumberUtils.toInt(header.getValue()); .substringBefore(StringUtils.substringAfter(s, "NextPageToken="), "&");
log.info("Total pages: " + totalPages);
if (this.baseUrl.contains("?")) {
next = this.baseUrl + "&NextPageToken=" + token;
} else {
next = this.baseUrl + "?NextPageToken=" + token;
} }
} }
} }
@ -108,6 +105,9 @@ public class ResearchFiIterator implements Iterator<String> {
jsonArray.forEach(obj -> this.queue.add(JsonUtils.convertToXML(obj.toString()))); jsonArray.forEach(obj -> this.queue.add(JsonUtils.convertToXML(obj.toString())));
} }
return next;
} catch (final Throwable e) { } catch (final Throwable e) {
log.warn("Error calling url: " + url, e); log.warn("Error calling url: " + url, e);
throw new CollectorException("Error calling url: " + url, e); throw new CollectorException("Error calling url: " + url, e);

View File

@ -0,0 +1,109 @@
package eu.dnetlib.dhp.collection.plugin.zenodo;
import static eu.dnetlib.dhp.utils.DHPUtils.getHadoopConfiguration;
import java.io.IOException;
import java.io.InputStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.collection.ApiDescriptor;
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
import eu.dnetlib.dhp.common.collection.CollectorException;
public class CollectZenodoDumpCollectorPlugin implements CollectorPlugin {
final private Logger log = LoggerFactory.getLogger(getClass());
private void downloadItem(final String name, final String itemURL, final String basePath,
final FileSystem fileSystem) {
try {
final Path hdfsWritePath = new Path(String.format("%s/%s", basePath, name));
final FSDataOutputStream fsDataOutputStream = fileSystem.create(hdfsWritePath, true);
final HttpGet request = new HttpGet(itemURL);
final int timeout = 60; // seconds
final RequestConfig config = RequestConfig
.custom()
.setConnectTimeout(timeout * 1000)
.setConnectionRequestTimeout(timeout * 1000)
.setSocketTimeout(timeout * 1000)
.build();
log.info("Downloading url {} into {}", itemURL, hdfsWritePath.getName());
try (CloseableHttpClient client = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
CloseableHttpResponse response = client.execute(request)) {
int responseCode = response.getStatusLine().getStatusCode();
log.info("Response code is {}", responseCode);
if (responseCode >= 200 && responseCode < 400) {
IOUtils.copy(response.getEntity().getContent(), fsDataOutputStream);
fsDataOutputStream.flush();
fsDataOutputStream.hflush();
fsDataOutputStream.close();
}
} catch (Throwable eu) {
throw new RuntimeException(eu);
}
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
public FileSystem initializeFileSystem(final String hdfsURI) {
try {
return FileSystem.get(getHadoopConfiguration(hdfsURI));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Stream<String> collect(ApiDescriptor api, AggregatorReport report) throws CollectorException {
final String zenodoURL = api.getBaseUrl();
final String hdfsURI = api.getParams().get("hdfsURI");
final FileSystem fileSystem = initializeFileSystem(hdfsURI);
return doStream(fileSystem, zenodoURL, "/tmp");
}
public Stream<String> doStream(FileSystem fileSystem, String zenodoURL, String basePath) throws CollectorException {
try {
downloadItem("zenodoDump.tar.gz", zenodoURL, basePath, fileSystem);
CompressionCodecFactory factory = new CompressionCodecFactory(fileSystem.getConf());
Path sourcePath = new Path(basePath + "/zenodoDump.tar.gz");
CompressionCodec codec = factory.getCodec(sourcePath);
InputStream gzipInputStream = null;
try {
gzipInputStream = codec.createInputStream(fileSystem.open(sourcePath));
return iterateTar(gzipInputStream);
} catch (IOException e) {
throw new CollectorException(e);
}
} catch (Exception e) {
throw new CollectorException(e);
}
}
private Stream<String> iterateTar(InputStream gzipInputStream) throws Exception {
Iterable<String> iterable = () -> new ZenodoTarIterator(gzipInputStream);
return StreamSupport.stream(iterable.spliterator(), false);
}
}

View File

@ -0,0 +1,59 @@
package eu.dnetlib.dhp.collection.plugin.zenodo;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Iterator;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.io.IOUtils;
public class ZenodoTarIterator implements Iterator<String>, Closeable {
private final InputStream gzipInputStream;
private final StringBuilder currentItem = new StringBuilder();
private TarArchiveInputStream tais;
private boolean hasNext;
public ZenodoTarIterator(InputStream gzipInputStream) {
this.gzipInputStream = gzipInputStream;
tais = new TarArchiveInputStream(gzipInputStream);
hasNext = getNextItem();
}
private boolean getNextItem() {
try {
TarArchiveEntry entry;
while ((entry = tais.getNextTarEntry()) != null) {
if (entry.isFile()) {
currentItem.setLength(0);
currentItem.append(IOUtils.toString(new InputStreamReader(tais)));
return true;
}
}
return false;
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public String next() {
final String data = currentItem.toString();
hasNext = getNextItem();
return data;
}
@Override
public void close() throws IOException {
gzipInputStream.close();
}
}

View File

@ -0,0 +1,39 @@
package eu.dnetlib.dhp.sx.bio.pubmed;
/**
* The type Pubmed Affiliation.
*
* @author Sandro La Bruzzo
*/
public class PMAffiliation {
private String name;
private PMIdentifier identifier;
public PMAffiliation() {
}
public PMAffiliation(String name, PMIdentifier identifier) {
this.name = name;
this.identifier = identifier;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public PMIdentifier getIdentifier() {
return identifier;
}
public void setIdentifier(PMIdentifier identifier) {
this.identifier = identifier;
}
}

View File

@ -8,259 +8,115 @@ import java.util.List;
/** /**
* This class represent an instance of Pubmed Article extracted from the native XML * This class represent an instance of Pubmed Article extracted from the native XML
* *
* @author Sandro La Bruzzo
*/ */
public class PMArticle implements Serializable { public class PMArticle implements Serializable {
/**
* the Pubmed Identifier
*/
private String pmid; private String pmid;
private String pmcId; private String pmcId;
/**
* the DOI
*/
private String doi; private String doi;
/**
* the Pubmed Date extracted from <PubmedPubDate> Specifies a date significant to either the article's history or the citation's processing.
* All <History> dates will have a <Year>, <Month>, and <Day> elements. Some may have an <Hour>, <Minute>, and <Second> element(s).
*/
private String date; private String date;
/**
* This is an 'envelop' element that contains various elements describing the journal cited; i.e., ISSN, Volume, Issue, and PubDate and author name(s), however, it does not contain data itself.
*/
private PMJournal journal; private PMJournal journal;
/**
* The full journal title (taken from NLM cataloging data following NLM rules for how to compile a serial name) is exported in this element. Some characters that are not part of the NLM MEDLINE/PubMed Character Set reside in a relatively small number of full journal titles. The NLM journal title abbreviation is exported in the <MedlineTA> element.
*/
private String title; private String title;
/**
* English-language abstracts are taken directly from the published article.
* If the article does not have a published abstract, the National Library of Medicine does not create one,
* thus the record lacks the <Abstract> and <AbstractText> elements. However, in the absence of a formally
* labeled abstract in the published article, text from a substantive "summary", "summary and conclusions" or "conclusions and summary" may be used.
*/
private String description; private String description;
/**
* the language in which an article was published is recorded in <Language>.
* All entries are three letter abbreviations stored in lower case, such as eng, fre, ger, jpn, etc. When a single
* record contains more than one language value the XML export program extracts the languages in alphabetic order by the 3-letter language value.
* Some records provided by collaborating data producers may contain the value und to identify articles whose language is undetermined.
*/
private String language; private String language;
private List<PMSubject> subjects;
/** private List<PMSubject> publicationTypes = new ArrayList<>();
* NLM controlled vocabulary, Medical Subject Headings (MeSH®), is used to characterize the content of the articles represented by MEDLINE citations. *
*/
private final List<PMSubject> subjects = new ArrayList<>();
/**
* This element is used to identify the type of article indexed for MEDLINE;
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
*/
private final List<PMSubject> publicationTypes = new ArrayList<>();
/**
* Personal and collective (corporate) author names published with the article are found in <AuthorList>.
*/
private List<PMAuthor> authors = new ArrayList<>(); private List<PMAuthor> authors = new ArrayList<>();
private List<PMGrant> grants = new ArrayList<>();
/**
* <GrantID> contains the research grant or contract number (or both) that designates financial support by any agency of the United States Public Health Service
* or any institute of the National Institutes of Health. Additionally, beginning in late 2005, grant numbers are included for many other US and non-US funding agencies and organizations.
*/
private final List<PMGrant> grants = new ArrayList<>();
/**
* get the DOI
* @return a DOI
*/
public String getDoi() {
return doi;
}
/**
* Set the DOI
* @param doi a DOI
*/
public void setDoi(String doi) {
this.doi = doi;
}
/**
* get the Pubmed Identifier
* @return the PMID
*/
public String getPmid() { public String getPmid() {
return pmid; return pmid;
} }
/**
* set the Pubmed Identifier
* @param pmid the Pubmed Identifier
*/
public void setPmid(String pmid) { public void setPmid(String pmid) {
this.pmid = pmid; this.pmid = pmid;
} }
/**
* the Pubmed Date extracted from <PubmedPubDate> Specifies a date significant to either the article's history or the citation's processing.
* All <History> dates will have a <Year>, <Month>, and <Day> elements. Some may have an <Hour>, <Minute>, and <Second> element(s).
*
* @return the Pubmed Date
*/
public String getDate() {
return date;
}
/**
* Set the pubmed Date
* @param date
*/
public void setDate(String date) {
this.date = date;
}
/**
* The full journal title (taken from NLM cataloging data following NLM rules for how to compile a serial name) is exported in this element.
* Some characters that are not part of the NLM MEDLINE/PubMed Character Set reside in a relatively small number of full journal titles.
* The NLM journal title abbreviation is exported in the <MedlineTA> element.
*
* @return the pubmed Journal Extracted
*/
public PMJournal getJournal() {
return journal;
}
/**
* Set the mapped pubmed Journal
* @param journal
*/
public void setJournal(PMJournal journal) {
this.journal = journal;
}
/**
* <ArticleTitle> contains the entire title of the journal article. <ArticleTitle> is always in English;
* those titles originally published in a non-English language and translated for <ArticleTitle> are enclosed in square brackets.
* All titles end with a period unless another punctuation mark such as a question mark or bracket is present.
* Explanatory information about the title itself is enclosed in parentheses, e.g.: (author's transl).
* Corporate/collective authors may appear at the end of <ArticleTitle> for citations up to about the year 2000.
*
* @return the extracted pubmed Title
*/
public String getTitle() {
return title;
}
/**
* set the pubmed title
* @param title
*/
public void setTitle(String title) {
this.title = title;
}
/**
* English-language abstracts are taken directly from the published article.
* If the article does not have a published abstract, the National Library of Medicine does not create one,
* thus the record lacks the <Abstract> and <AbstractText> elements. However, in the absence of a formally
* labeled abstract in the published article, text from a substantive "summary", "summary and conclusions" or "conclusions and summary" may be used.
*
* @return the Mapped Pubmed Article Abstracts
*/
public String getDescription() {
return description;
}
/**
* Set the Mapped Pubmed Article Abstracts
* @param description
*/
public void setDescription(String description) {
this.description = description;
}
/**
* Personal and collective (corporate) author names published with the article are found in <AuthorList>.
*
* @return get the Mapped Authors lists
*/
public List<PMAuthor> getAuthors() {
return authors;
}
/**
* Set the Mapped Authors lists
* @param authors
*/
public void setAuthors(List<PMAuthor> authors) {
this.authors = authors;
}
/**
* This element is used to identify the type of article indexed for MEDLINE;
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
*
* @return the mapped Subjects
*/
public List<PMSubject> getSubjects() {
return subjects;
}
/**
*
* the language in which an article was published is recorded in <Language>.
* All entries are three letter abbreviations stored in lower case, such as eng, fre, ger, jpn, etc. When a single
* record contains more than one language value the XML export program extracts the languages in alphabetic order by the 3-letter language value.
* Some records provided by collaborating data producers may contain the value und to identify articles whose language is undetermined.
*
* @return The mapped Language
*/
public String getLanguage() {
return language;
}
/**
*
* Set The mapped Language
*
* @param language the mapped Language
*/
public void setLanguage(String language) {
this.language = language;
}
/**
* This element is used to identify the type of article indexed for MEDLINE;
* it characterizes the nature of the information or the manner in which it is conveyed as well as the type of
* research support received (e.g., Review, Letter, Retracted Publication, Clinical Conference, Research Support, N.I.H., Extramural).
*
* @return the mapped Publication Type
*/
public List<PMSubject> getPublicationTypes() {
return publicationTypes;
}
/**
* <GrantID> contains the research grant or contract number (or both) that designates financial support by any agency of the United States Public Health Service
* or any institute of the National Institutes of Health. Additionally, beginning in late 2005, grant numbers are included for many other US and non-US funding agencies and organizations.
* @return the mapped grants
*/
public List<PMGrant> getGrants() {
return grants;
}
public String getPmcId() { public String getPmcId() {
return pmcId; return pmcId;
} }
public PMArticle setPmcId(String pmcId) { public void setPmcId(String pmcId) {
this.pmcId = pmcId; this.pmcId = pmcId;
return this; }
public String getDoi() {
return doi;
}
public void setDoi(String doi) {
this.doi = doi;
}
public String getDate() {
return date;
}
public void setDate(String date) {
this.date = date;
}
public PMJournal getJournal() {
return journal;
}
public void setJournal(PMJournal journal) {
this.journal = journal;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getLanguage() {
return language;
}
public void setLanguage(String language) {
this.language = language;
}
public List<PMSubject> getSubjects() {
return subjects;
}
public void setSubjects(List<PMSubject> subjects) {
this.subjects = subjects;
}
public List<PMSubject> getPublicationTypes() {
return publicationTypes;
}
public void setPublicationTypes(List<PMSubject> publicationTypes) {
this.publicationTypes = publicationTypes;
}
public List<PMAuthor> getAuthors() {
return authors;
}
public void setAuthors(List<PMAuthor> authors) {
this.authors = authors;
}
public List<PMGrant> getGrants() {
return grants;
}
public void setGrants(List<PMGrant> grants) {
this.grants = grants;
} }
} }

View File

@ -12,6 +12,8 @@ public class PMAuthor implements Serializable {
private String lastName; private String lastName;
private String foreName; private String foreName;
private PMIdentifier identifier;
private PMAffiliation affiliation;
/** /**
* Gets last name. * Gets last name.
@ -59,4 +61,40 @@ public class PMAuthor implements Serializable {
.format("%s, %s", this.foreName != null ? this.foreName : "", this.lastName != null ? this.lastName : ""); .format("%s, %s", this.foreName != null ? this.foreName : "", this.lastName != null ? this.lastName : "");
} }
/**
* Gets identifier.
*
* @return the identifier
*/
public PMIdentifier getIdentifier() {
return identifier;
}
/**
* Sets identifier.
*
* @param identifier the identifier
*/
public void setIdentifier(PMIdentifier identifier) {
this.identifier = identifier;
}
/**
* Gets affiliation.
*
* @return the affiliation
*/
public PMAffiliation getAffiliation() {
return affiliation;
}
/**
* Sets affiliation.
*
* @param affiliation the affiliation
*/
public void setAffiliation(PMAffiliation affiliation) {
this.affiliation = affiliation;
}
} }

View File

@ -0,0 +1,53 @@
package eu.dnetlib.dhp.sx.bio.pubmed;
public class PMIdentifier {
private String pid;
private String type;
public PMIdentifier(String pid, String type) {
this.pid = cleanPid(pid);
this.type = type;
}
public PMIdentifier() {
}
private String cleanPid(String pid) {
if (pid == null) {
return null;
}
// clean ORCID ID in the form 0000000163025705 to 0000-0001-6302-5705
if (pid.matches("[0-9]{15}[0-9X]")) {
return pid.replaceAll("(.{4})(.{4})(.{4})(.{4})", "$1-$2-$3-$4");
}
// clean ORCID in the form http://orcid.org/0000-0001-8567-3543 to 0000-0001-8567-3543
if (pid.matches("http://orcid.org/[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}")) {
return pid.replaceAll("http://orcid.org/", "");
}
return pid;
}
public String getPid() {
return pid;
}
public PMIdentifier setPid(String pid) {
this.pid = cleanPid(pid);
return this;
}
public String getType() {
return type;
}
public PMIdentifier setType(String type) {
this.type = type;
return this;
}
}

View File

@ -0,0 +1,14 @@
[
{
"paramName": "i",
"paramLongName": "inputPath",
"paramDescription": "the path of the input json",
"paramRequired": true
},
{
"paramName": "o",
"paramLongName": "outputPath",
"paramDescription": "the path of the new ActionSet",
"paramRequired": true
}
]

View File

@ -0,0 +1,58 @@
<configuration>
<property>
<name>jobTracker</name>
<value>yarnRM</value>
</property>
<property>
<name>nameNode</name>
<value>hdfs://nameservice1</value>
</property>
<property>
<name>oozie.use.system.libpath</name>
<value>true</value>
</property>
<property>
<name>oozie.action.sharelib.for.spark</name>
<value>spark2</value>
</property>
<property>
<name>hive_metastore_uris</name>
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
</property>
<property>
<name>spark2YarnHistoryServerAddress</name>
<value>http://iis-cdh5-test-gw.ocean.icm.edu.pl:18089</value>
</property>
<property>
<name>spark2ExtraListeners</name>
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
</property>
<property>
<name>spark2SqlQueryExecutionListeners</name>
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
</property>
<property>
<name>oozie.launcher.mapreduce.user.classpath.first</name>
<value>true</value>
</property>
<property>
<name>sparkExecutorNumber</name>
<value>4</value>
</property>
<property>
<name>spark2EventLogDir</name>
<value>/user/spark/spark2ApplicationHistory</value>
</property>
<property>
<name>sparkDriverMemory</name>
<value>15G</value>
</property>
<property>
<name>sparkExecutorMemory</name>
<value>6G</value>
</property>
<property>
<name>sparkExecutorCores</name>
<value>1</value>
</property>
</configuration>

View File

@ -0,0 +1,53 @@
<workflow-app name="Update_RAiD_action_set" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>raidJsonInputPath</name>
<description>the path of the json</description>
</property>
<property>
<name>raidActionSetPath</name>
<description>path where to store the action set</description>
</property>
</parameters>
<start to="deleteoutputpath"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="deleteoutputpath">
<fs>
<delete path='${raidActionSetPath}'/>
<mkdir path='${raidActionSetPath}'/>
</fs>
<ok to="processRAiDFile"/>
<error to="Kill"/>
</action>
<action name="processRAiDFile">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>ProcessRAiDFile</name>
<class>eu.dnetlib.dhp.actionmanager.raid.GenerateRAiDActionSetJob</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=3840
</spark-opts>
<arg>--inputPath</arg><arg>${raidJsonInputPath}</arg>
<arg>--outputPath</arg><arg>${raidActionSetPath}</arg>
</spark>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>

View File

@ -1,8 +1,7 @@
[ [
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true}, {"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
{"paramName":"i", "paramLongName":"isLookupUrl", "paramDescription": "isLookupUrl", "paramRequired": true}, {"paramName":"i", "paramLongName":"isLookupUrl", "paramDescription": "isLookupUrl", "paramRequired": true},
{"paramName":"w", "paramLongName":"workingPath", "paramDescription": "the path of the sequencial file to read", "paramRequired": true}, {"paramName":"s", "paramLongName":"sourcePath", "paramDescription": "the baseline path", "paramRequired": true},
{"paramName":"mo", "paramLongName":"mdstoreOutputVersion", "paramDescription": "the oaf path ", "paramRequired": true}, {"paramName":"mo", "paramLongName":"mdstoreOutputVersion", "paramDescription": "the mdstore path to save", "paramRequired": true}
{"paramName":"s", "paramLongName":"skipUpdate", "paramDescription": "skip update ", "paramRequired": false},
{"paramName":"h", "paramLongName":"hdfsServerUri", "paramDescription": "the working path ", "paramRequired": true}
] ]

View File

@ -1,4 +1,4 @@
<workflow-app name="Download_Transform_Pubmed_Workflow" xmlns="uri:oozie:workflow:0.5"> <workflow-app name="Transform_Pubmed_Workflow" xmlns="uri:oozie:workflow:0.5">
<parameters> <parameters>
<property> <property>
<name>baselineWorkingPath</name> <name>baselineWorkingPath</name>
@ -16,11 +16,6 @@
<name>mdStoreManagerURI</name> <name>mdStoreManagerURI</name>
<description>the path of the cleaned mdstore</description> <description>the path of the cleaned mdstore</description>
</property> </property>
<property>
<name>skipUpdate</name>
<value>false</value>
<description>The request block size</description>
</property>
</parameters> </parameters>
<start to="StartTransaction"/> <start to="StartTransaction"/>
@ -44,16 +39,16 @@
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg> <arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
<capture-output/> <capture-output/>
</java> </java>
<ok to="ConvertDataset"/> <ok to="TransformPubMed"/>
<error to="RollBack"/> <error to="RollBack"/>
</action> </action>
<action name="ConvertDataset"> <action name="TransformPubMed">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>
<mode>cluster</mode> <mode>cluster</mode>
<name>Convert Baseline to OAF Dataset</name> <name>Convert Baseline Pubmed to OAF Dataset</name>
<class>eu.dnetlib.dhp.sx.bio.ebi.SparkCreateBaselineDataFrame</class> <class>eu.dnetlib.dhp.sx.bio.ebi.SparkCreatePubmedDump</class>
<jar>dhp-aggregation-${projectVersion}.jar</jar> <jar>dhp-aggregation-${projectVersion}.jar</jar>
<spark-opts> <spark-opts>
--executor-memory=${sparkExecutorMemory} --executor-memory=${sparkExecutorMemory}
@ -65,12 +60,10 @@
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
</spark-opts> </spark-opts>
<arg>--workingPath</arg><arg>${baselineWorkingPath}</arg> <arg>--sourcePath</arg><arg>${baselineWorkingPath}</arg>
<arg>--mdstoreOutputVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg> <arg>--mdstoreOutputVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
<arg>--master</arg><arg>yarn</arg> <arg>--master</arg><arg>yarn</arg>
<arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg> <arg>--isLookupUrl</arg><arg>${isLookupUrl}</arg>
<arg>--hdfsServerUri</arg><arg>${nameNode}</arg>
<arg>--skipUpdate</arg><arg>${skipUpdate}</arg>
</spark> </spark>
<ok to="CommitVersion"/> <ok to="CommitVersion"/>
<error to="RollBack"/> <error to="RollBack"/>

View File

@ -37,7 +37,7 @@ case class mappingAuthor(
family: Option[String], family: Option[String],
sequence: Option[String], sequence: Option[String],
ORCID: Option[String], ORCID: Option[String],
affiliation: Option[mappingAffiliation] affiliation: Option[List[mappingAffiliation]]
) {} ) {}
case class funderInfo(id: String, uri: String, name: String, synonym: List[String]) {} case class funderInfo(id: String, uri: String, name: String, synonym: List[String]) {}
@ -457,15 +457,14 @@ case object Crossref2Oaf {
} }
//Mapping Author //Mapping Author
val authorList: List[mappingAuthor] = val authorList: List[mappingAuthor] = (json \ "author").extract[List[mappingAuthor]].filter(a => a.family.isDefined)
(json \ "author").extract[List[mappingAuthor]].filter(a => a.family.isDefined)
val sorted_list = authorList.sortWith((a: mappingAuthor, b: mappingAuthor) => val sorted_list = authorList.sortWith((a: mappingAuthor, b: mappingAuthor) =>
a.sequence.isDefined && a.sequence.get.equalsIgnoreCase("first") a.sequence.isDefined && a.sequence.get.equalsIgnoreCase("first")
) )
result.setAuthor(sorted_list.zipWithIndex.map { case (a, index) => result.setAuthor(sorted_list.zipWithIndex.map { case (a, index) =>
generateAuhtor(a.given.orNull, a.family.get, a.ORCID.orNull, index) generateAuthor(a.given.orNull, a.family.get, a.ORCID.orNull, index, a.affiliation)
}.asJava) }.asJava)
// Mapping instance // Mapping instance
@ -504,19 +503,6 @@ case object Crossref2Oaf {
) )
} }
val is_review = json \ "relation" \ "is-review-of" \ "id"
if (is_review != JNothing) {
instance.setInstancetype(
OafMapperUtils.qualifier(
"0015",
"peerReviewed",
ModelConstants.DNET_REVIEW_LEVELS,
ModelConstants.DNET_REVIEW_LEVELS
)
)
}
if (doi.startsWith("10.3410") || doi.startsWith("10.12703")) if (doi.startsWith("10.3410") || doi.startsWith("10.12703"))
instance.setHostedby( instance.setHostedby(
OafMapperUtils.keyValue(OafMapperUtils.createOpenaireId(10, "openaire____::H1Connect", true), "H1Connect") OafMapperUtils.keyValue(OafMapperUtils.createOpenaireId(10, "openaire____::H1Connect", true), "H1Connect")
@ -574,12 +560,23 @@ case object Crossref2Oaf {
s"50|doiboost____|$id" s"50|doiboost____|$id"
} }
def generateAuhtor(given: String, family: String, orcid: String, index: Int): Author = { private def generateAuthor(
given: String,
family: String,
orcid: String,
index: Int,
affiliation: Option[List[mappingAffiliation]]
): Author = {
val a = new Author val a = new Author
a.setName(given) a.setName(given)
a.setSurname(family) a.setSurname(family)
a.setFullname(s"$given $family") a.setFullname(s"$given $family")
a.setRank(index + 1) a.setRank(index + 1)
// Adding Raw affiliation if it's defined
if (affiliation.isDefined) {
a.setRawAffiliationString(affiliation.get.map(a => a.name).asJava)
}
if (StringUtils.isNotBlank(orcid)) if (StringUtils.isNotBlank(orcid))
a.setPid( a.setPid(
List( List(
@ -673,11 +670,11 @@ case object Crossref2Oaf {
val doi = input.getString(0) val doi = input.getString(0)
val rorId = input.getString(1) val rorId = input.getString(1)
val pubId = s"50|${PidType.doi.toString.padTo(12, "_")}::${DoiCleaningRule.clean(doi)}" val pubId = IdentifierFactory.idFromPid("50", "doi", DoiCleaningRule.clean(doi), true)
val affId = GenerateRorActionSetJob.calculateOpenaireId(rorId) val affId = GenerateRorActionSetJob.calculateOpenaireId(rorId)
val r: Relation = new Relation val r: Relation = new Relation
DoiCleaningRule.clean(doi)
r.setSource(pubId) r.setSource(pubId)
r.setTarget(affId) r.setTarget(affId)
r.setRelType(ModelConstants.RESULT_ORGANIZATION) r.setRelType(ModelConstants.RESULT_ORGANIZATION)
@ -705,7 +702,15 @@ case object Crossref2Oaf {
val objectType = (json \ "type").extractOrElse[String](null) val objectType = (json \ "type").extractOrElse[String](null)
if (objectType == null) if (objectType == null)
return resultList return resultList
val typology = getTypeQualifier(objectType, vocabularies)
// If the item has a relations is-review-of, then we force it to a peer-review
val is_review = json \ "relation" \ "is-review-of" \ "id"
var force_to_review = false
if (is_review != JNothing) {
force_to_review = true
}
val typology = getTypeQualifier(if (force_to_review) "peer-review" else objectType, vocabularies)
if (typology == null) if (typology == null)
return List() return List()
@ -757,33 +762,6 @@ case object Crossref2Oaf {
else else
resultList resultList
} }
// if (uw != null) {
// result.getCollectedfrom.add(createUnpayWallCollectedFrom())
// val i: Instance = new Instance()
// i.setCollectedfrom(createUnpayWallCollectedFrom())
// if (uw.best_oa_location != null) {
//
// i.setUrl(List(uw.best_oa_location.url).asJava)
// if (uw.best_oa_location.license.isDefined) {
// i.setLicense(field[String](uw.best_oa_location.license.get, null))
// }
//
// val colour = get_unpaywall_color(uw.oa_status)
// if (colour.isDefined) {
// val a = new AccessRight
// a.setClassid(ModelConstants.ACCESS_RIGHT_OPEN)
// a.setClassname(ModelConstants.ACCESS_RIGHT_OPEN)
// a.setSchemeid(ModelConstants.DNET_ACCESS_MODES)
// a.setSchemename(ModelConstants.DNET_ACCESS_MODES)
// a.setOpenAccessRoute(colour.get)
// i.setAccessright(a)
// }
// i.setPid(result.getPid)
// result.getInstance().add(i)
// }
// }
} }
private def createCiteRelation(source: Result, targetPid: String, targetPidType: String): List[Relation] = { private def createCiteRelation(source: Result, targetPid: String, targetPidType: String): List[Relation] = {
@ -978,7 +956,26 @@ case object Crossref2Oaf {
case "10.13039/501100010790" => case "10.13039/501100010790" =>
generateSimpleRelationFromAward(funder, "erasmusplus_", a => a) generateSimpleRelationFromAward(funder, "erasmusplus_", a => a)
case _ => logger.debug("no match for " + funder.DOI.get) case _ => logger.debug("no match for " + funder.DOI.get)
//Add for Danish funders
//Independent Research Fund Denmark (IRFD)
case "10.13039/501100004836" =>
generateSimpleRelationFromAward(funder, "irfd________", a => a)
val targetId = getProjectId("irfd________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
//Carlsberg Foundation (CF)
case "10.13039/501100002808" =>
generateSimpleRelationFromAward(funder, "cf__________", a => a)
val targetId = getProjectId("cf__________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
//Novo Nordisk Foundation (NNF)
case "10.13039/501100009708" =>
generateSimpleRelationFromAward(funder, "nnf___________", a => a)
val targetId = getProjectId("nnf_________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
case _ => logger.debug("no match for " + funder.DOI.get)
} }
} else { } else {

View File

@ -0,0 +1,104 @@
package eu.dnetlib.dhp.sx.bio.ebi
import com.fasterxml.jackson.databind.ObjectMapper
import eu.dnetlib.dhp.application.AbstractScalaApplication
import eu.dnetlib.dhp.common.Constants
import eu.dnetlib.dhp.common.Constants.{MDSTORE_DATA_PATH, MDSTORE_SIZE_PATH}
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup
import eu.dnetlib.dhp.schema.mdstore.MDStoreVersion
import eu.dnetlib.dhp.sx.bio.pubmed.{PMArticle, PMParser2, PubMedToOaf}
import eu.dnetlib.dhp.transformation.TransformSparkJobNode
import eu.dnetlib.dhp.utils.DHPUtils.writeHdfsFile
import eu.dnetlib.dhp.utils.ISLookupClientFactory
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
import org.slf4j.{Logger, LoggerFactory}
class SparkCreatePubmedDump(propertyPath: String, args: Array[String], log: Logger)
extends AbstractScalaApplication(propertyPath, args, log: Logger) {
/** Here all the spark applications runs this method
* where the whole logic of the spark node is defined
*/
override def run(): Unit = {
val isLookupUrl: String = parser.get("isLookupUrl")
log.info("isLookupUrl: {}", isLookupUrl)
val sourcePath = parser.get("sourcePath")
log.info(s"SourcePath is '$sourcePath'")
val mdstoreOutputVersion = parser.get("mdstoreOutputVersion")
log.info(s"mdstoreOutputVersion is '$mdstoreOutputVersion'")
val mapper = new ObjectMapper()
val cleanedMdStoreVersion = mapper.readValue(mdstoreOutputVersion, classOf[MDStoreVersion])
val outputBasePath = cleanedMdStoreVersion.getHdfsPath
log.info(s"outputBasePath is '$outputBasePath'")
val isLookupService = ISLookupClientFactory.getLookUpService(isLookupUrl)
val vocabularies = VocabularyGroup.loadVocsFromIS(isLookupService)
createPubmedDump(spark, sourcePath, outputBasePath, vocabularies)
}
/** This method creates a dump of the pubmed articles
* @param spark the spark session
* @param sourcePath the path of the source file
* @param targetPath the path of the target file
* @param vocabularies the vocabularies
*/
def createPubmedDump(
spark: SparkSession,
sourcePath: String,
targetPath: String,
vocabularies: VocabularyGroup
): Unit = {
require(spark != null)
implicit val PMEncoder: Encoder[PMArticle] = Encoders.bean(classOf[PMArticle])
import spark.implicits._
val df = spark.read.option("lineSep", "</PubmedArticle>").text(sourcePath)
val mapper = new ObjectMapper()
df.as[String]
.map(s => {
val id = s.indexOf("<PubmedArticle>")
if (id >= 0) s"${s.substring(id)}</PubmedArticle>" else null
})
.filter(s => s != null)
.map { i =>
//remove try catch
try {
new PMParser2().parse(i)
} catch {
case _: Exception => {
throw new RuntimeException(s"Error parsing article: $i")
}
}
}
.dropDuplicates("pmid")
.map { a =>
val oaf = PubMedToOaf.convert(a, vocabularies)
if (oaf != null)
mapper.writeValueAsString(oaf)
else
null
}
.as[String]
.filter(s => s != null)
.write
.option("compression", "gzip")
.mode("overwrite")
.text(targetPath + MDSTORE_DATA_PATH)
val mdStoreSize = spark.read.text(targetPath + MDSTORE_DATA_PATH).count
writeHdfsFile(spark.sparkContext.hadoopConfiguration, "" + mdStoreSize, targetPath + MDSTORE_SIZE_PATH)
}
}
object SparkCreatePubmedDump {
def main(args: Array[String]): Unit = {
val log: Logger = LoggerFactory.getLogger(getClass)
new SparkCreatePubmedDump("/eu/dnetlib/dhp/sx/bio/ebi/baseline_to_oaf_params.json", args, log).initialize().run()
}
}

View File

@ -0,0 +1,277 @@
package eu.dnetlib.dhp.sx.bio.pubmed
import org.apache.commons.lang3.StringUtils
import javax.xml.stream.XMLEventReader
import scala.collection.JavaConverters._
import scala.xml.{MetaData, NodeSeq}
import scala.xml.pull.{EvElemEnd, EvElemStart, EvText}
class PMParser2 {
/** Extracts the value of an attribute from a MetaData object.
* @param attrs the MetaData object
* @param key the key of the attribute
* @return the value of the attribute or null if the attribute is not found
*/
private def extractAttributes(attrs: MetaData, key: String): String = {
val res = attrs.get(key)
if (res.isDefined) {
val s = res.get
if (s != null && s.nonEmpty)
s.head.text
else
null
} else null
}
/** Validates and formats a date given the year, month, and day as strings.
*
* @param year the year as a string
* @param month the month as a string
* @param day the day as a string
* @return the formatted date as "YYYY-MM-DD" or null if the date is invalid
*/
private def validate_Date(year: String, month: String, day: String): String = {
try {
f"${year.toInt}-${month.toInt}%02d-${day.toInt}%02d"
} catch {
case _: Throwable => null
}
}
/** Extracts the grant information from a NodeSeq object.
*
* @param gNode the NodeSeq object
* @return the grant information or an empty list if the grant information is not found
*/
private def extractGrant(gNode: NodeSeq): List[PMGrant] = {
gNode
.map(node => {
val grantId = (node \ "GrantID").text
val agency = (node \ "Agency").text
val country = (node \ "Country").text
new PMGrant(grantId, agency, country)
})
.toList
}
/** Extracts the journal information from a NodeSeq object.
*
* @param jNode the NodeSeq object
* @return the journal information or null if the journal information is not found
*/
private def extractJournal(jNode: NodeSeq): PMJournal = {
val journal = new PMJournal
journal.setTitle((jNode \ "Title").text)
journal.setIssn((jNode \ "ISSN").text)
journal.setVolume((jNode \ "JournalIssue" \ "Volume").text)
journal.setIssue((jNode \ "JournalIssue" \ "Issue").text)
if (journal.getTitle != null && StringUtils.isNotEmpty(journal.getTitle))
journal
else
null
}
private def extractAuthors(aNode: NodeSeq): List[PMAuthor] = {
aNode
.map(author => {
val a = new PMAuthor
a.setLastName((author \ "LastName").text)
a.setForeName((author \ "ForeName").text)
val id = (author \ "Identifier").text
val idType = (author \ "Identifier" \ "@Source").text
if (id != null && id.nonEmpty && idType != null && idType.nonEmpty) {
a.setIdentifier(new PMIdentifier(id, idType))
}
val affiliation = (author \ "AffiliationInfo" \ "Affiliation").text
val affiliationId = (author \ "AffiliationInfo" \ "Identifier").text
val affiliationIdType = (author \ "AffiliationInfo" \ "Identifier" \ "@Source").text
if (affiliation != null && affiliation.nonEmpty) {
val aff = new PMAffiliation()
aff.setName(affiliation)
if (
affiliationId != null && affiliationId.nonEmpty && affiliationIdType != null && affiliationIdType.nonEmpty
) {
aff.setIdentifier(new PMIdentifier(affiliationId, affiliationIdType))
}
a.setAffiliation(aff)
}
a
})
.toList
}
def parse(input: String): PMArticle = {
val xml = scala.xml.XML.loadString(input)
val article = new PMArticle
val grantNodes = xml \ "MedlineCitation" \\ "Grant"
article.setGrants(extractGrant(grantNodes).asJava)
val journal = xml \ "MedlineCitation" \ "Article" \ "Journal"
article.setJournal(extractJournal(journal))
val authors = xml \ "MedlineCitation" \ "Article" \ "AuthorList" \ "Author"
article.setAuthors(
extractAuthors(authors).asJava
)
val pmId = xml \ "MedlineCitation" \ "PMID"
val articleIds = xml \ "PubmedData" \ "ArticleIdList" \ "ArticleId"
articleIds.foreach(articleId => {
val idType = (articleId \ "@IdType").text
val id = articleId.text
if ("doi".equalsIgnoreCase(idType)) article.setDoi(id)
if ("pmc".equalsIgnoreCase(idType)) article.setPmcId(id)
})
article.setPmid(pmId.text)
val pubMedPubDate = xml \ "MedlineCitation" \ "DateCompleted"
val currentDate =
validate_Date((pubMedPubDate \ "Year").text, (pubMedPubDate \ "Month").text, (pubMedPubDate \ "Day").text)
if (currentDate != null) article.setDate(currentDate)
val articleTitle = xml \ "MedlineCitation" \ "Article" \ "ArticleTitle"
article.setTitle(articleTitle.text)
val abstractText = xml \ "MedlineCitation" \ "Article" \ "Abstract" \ "AbstractText"
if (abstractText != null && abstractText.text != null && abstractText.text.nonEmpty)
article.setDescription(abstractText.text.split("\n").map(s => s.trim).mkString(" ").trim)
val language = xml \ "MedlineCitation" \ "Article" \ "Language"
article.setLanguage(language.text)
val subjects = xml \ "MedlineCitation" \ "MeshHeadingList" \ "MeshHeading"
article.setSubjects(
subjects
.take(20)
.map(subject => {
val descriptorName = (subject \ "DescriptorName").text
val ui = (subject \ "DescriptorName" \ "@UI").text
val s = new PMSubject
s.setValue(descriptorName)
s.setMeshId(ui)
s
})
.toList
.asJava
)
val publicationTypes = xml \ "MedlineCitation" \ "Article" \ "PublicationTypeList" \ "PublicationType"
article.setPublicationTypes(
publicationTypes
.map(pt => {
val s = new PMSubject
s.setValue(pt.text)
s
})
.toList
.asJava
)
article
}
def parse2(xml: XMLEventReader): PMArticle = {
var currentArticle: PMArticle = null
var currentSubject: PMSubject = null
var currentAuthor: PMAuthor = null
var currentJournal: PMJournal = null
var currentGrant: PMGrant = null
var currNode: String = null
var currentYear = "0"
var currentMonth = "01"
var currentDay = "01"
var currentArticleType: String = null
while (xml.hasNext) {
val ne = xml.next
ne match {
case EvElemStart(_, label, attrs, _) =>
currNode = label
label match {
case "PubmedArticle" => currentArticle = new PMArticle
case "Author" => currentAuthor = new PMAuthor
case "Journal" => currentJournal = new PMJournal
case "Grant" => currentGrant = new PMGrant
case "PublicationType" | "DescriptorName" =>
currentSubject = new PMSubject
currentSubject.setMeshId(extractAttributes(attrs, "UI"))
case "ArticleId" => currentArticleType = extractAttributes(attrs, "IdType")
case _ =>
}
case EvElemEnd(_, label) =>
label match {
case "PubmedArticle" => return currentArticle
case "Author" => currentArticle.getAuthors.add(currentAuthor)
case "Journal" => currentArticle.setJournal(currentJournal)
case "Grant" => currentArticle.getGrants.add(currentGrant)
case "PubMedPubDate" =>
if (currentArticle.getDate == null)
currentArticle.setDate(validate_Date(currentYear, currentMonth, currentDay))
case "PubDate" => currentJournal.setDate(s"$currentYear-$currentMonth-$currentDay")
case "DescriptorName" => currentArticle.getSubjects.add(currentSubject)
case "PublicationType" => currentArticle.getPublicationTypes.add(currentSubject)
case _ =>
}
case EvText(text) =>
if (currNode != null && text.trim.nonEmpty)
currNode match {
case "ArticleTitle" => {
if (currentArticle.getTitle == null)
currentArticle.setTitle(text.trim)
else
currentArticle.setTitle(currentArticle.getTitle + text.trim)
}
case "AbstractText" => {
if (currentArticle.getDescription == null)
currentArticle.setDescription(text.trim)
else
currentArticle.setDescription(currentArticle.getDescription + text.trim)
}
case "PMID" => currentArticle.setPmid(text.trim)
case "ArticleId" =>
if ("doi".equalsIgnoreCase(currentArticleType)) currentArticle.setDoi(text.trim)
if ("pmc".equalsIgnoreCase(currentArticleType)) currentArticle.setPmcId(text.trim)
case "Language" => currentArticle.setLanguage(text.trim)
case "ISSN" => currentJournal.setIssn(text.trim)
case "GrantID" => currentGrant.setGrantID(text.trim)
case "Agency" => currentGrant.setAgency(text.trim)
case "Country" => if (currentGrant != null) currentGrant.setCountry(text.trim)
case "Year" => currentYear = text.trim
case "Month" => currentMonth = text.trim
case "Day" => currentDay = text.trim
case "Volume" => currentJournal.setVolume(text.trim)
case "Issue" => currentJournal.setIssue(text.trim)
case "PublicationType" | "DescriptorName" => currentSubject.setValue(text.trim)
case "LastName" => {
if (currentAuthor != null)
currentAuthor.setLastName(text.trim)
}
case "ForeName" =>
if (currentAuthor != null)
currentAuthor.setForeName(text.trim)
case "Title" =>
if (currentJournal.getTitle == null)
currentJournal.setTitle(text.trim)
else
currentJournal.setTitle(currentJournal.getTitle + text.trim)
case _ =>
}
case _ =>
}
}
null
}
}

View File

@ -294,6 +294,24 @@ object PubMedToOaf {
author.setName(a.getForeName) author.setName(a.getForeName)
author.setSurname(a.getLastName) author.setSurname(a.getLastName)
author.setFullname(a.getFullName) author.setFullname(a.getFullName)
if (a.getIdentifier != null) {
author.setPid(
List(
OafMapperUtils.structuredProperty(
a.getIdentifier.getPid,
OafMapperUtils.qualifier(
a.getIdentifier.getType,
a.getIdentifier.getType,
ModelConstants.DNET_PID_TYPES,
ModelConstants.DNET_PID_TYPES
),
dataInfo
)
).asJava
)
}
if (a.getAffiliation != null)
author.setRawAffiliationString(List(a.getAffiliation.getName).asJava)
author.setRank(index + 1) author.setRank(index + 1)
author author
}(collection.breakOut) }(collection.breakOut)

View File

@ -0,0 +1,165 @@
package eu.dnetlib.dhp.actionmanager.raid;
import static java.nio.file.Files.createTempDirectory;
import static eu.dnetlib.dhp.actionmanager.Constants.OBJECT_MAPPER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.File;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import eu.dnetlib.dhp.actionmanager.opencitations.CreateOpenCitationsASTest;
import eu.dnetlib.dhp.actionmanager.raid.model.RAiDEntity;
import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.oaf.Oaf;
import eu.dnetlib.dhp.schema.oaf.OtherResearchProduct;
import eu.dnetlib.dhp.schema.oaf.Relation;
import scala.Tuple2;
public class GenerateRAiDActionSetJobTest {
private static String input_path;
private static String output_path;
static SparkSession spark;
@BeforeEach
void setUp() throws Exception {
input_path = Paths
.get(
GenerateRAiDActionSetJobTest.class
.getResource("/eu/dnetlib/dhp/actionmanager/raid/raid_example.json")
.toURI())
.toFile()
.getAbsolutePath();
output_path = createTempDirectory(GenerateRAiDActionSetJobTest.class.getSimpleName() + "-")
.toAbsolutePath()
.toString();
SparkConf conf = new SparkConf();
conf.setAppName(GenerateRAiDActionSetJobTest.class.getSimpleName());
conf.setMaster("local[*]");
conf.set("spark.driver.host", "localhost");
conf.set("hive.metastore.local", "true");
conf.set("spark.ui.enabled", "false");
conf.set("spark.sql.warehouse.dir", output_path);
conf.set("hive.metastore.warehouse.dir", output_path);
spark = SparkSession
.builder()
.appName(GenerateRAiDActionSetJobTest.class.getSimpleName())
.config(conf)
.getOrCreate();
}
@AfterAll
static void cleanUp() throws Exception {
FileUtils.deleteDirectory(new File(output_path));
}
@Test
@Disabled
void testProcessRAiDEntities() {
GenerateRAiDActionSetJob.processRAiDEntities(spark, input_path, output_path + "/test_raid_action_set");
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
JavaRDD<? extends Oaf> result = sc
.sequenceFile(output_path + "/test_raid_action_set", Text.class, Text.class)
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
.map(AtomicAction::getPayload);
assertEquals(80, result.count());
}
@Test
void testPrepareRAiD() {
List<AtomicAction<? extends Oaf>> atomicActions = GenerateRAiDActionSetJob
.prepareRAiD(
new RAiDEntity(
"-92190526",
Arrays
.asList(
"Berli, Justin", "Le Mao, Bérénice", "Guillaume Touya", "Wenclik, Laura",
"Courtial, Azelle", "Muehlenhaus, Ian", "Justin Berli", "Touya, Guillaume",
"Gruget, Maïeul", "Azelle Courtial", "Ian Muhlenhaus", "Maïeul Gruget", "Marion Dumont",
"Maïeul GRUGET", "Cécile Duchêne"),
"2021-09-10",
"2024-02-16",
Arrays
.asList(
"cartography, zoom, pan, desert fog", "Road network", "zooming", "Pan-scalar maps",
"pan-scalar map", "Python library", "QGIS", "map design", "landmarks",
"Cartes transscalaires", "anchor", "disorientation", "[INFO]Computer Science [cs]",
"[SHS.GEO]Humanities and Social Sciences/Geography", "cognitive cartography",
"eye-tracking", "Computers in Earth Sciences", "Topographic map", "National Mapping Agency",
"General Medicine", "Geography, Planning and Development", "multi-scales",
"pan-scalar maps", "Selection", "cartography", "General Earth and Planetary Sciences",
"progressiveness", "map generalisation", "Eye-tracker", "zoom", "algorithms", "Map Design",
"cartography, map generalisation, zoom, multi-scale map", "Interactive maps",
"Map generalisation", "Earth and Planetary Sciences (miscellaneous)",
"Cartographic generalization", "rivers", "Benchmark", "General Environmental Science",
"open source", "drawing", "Constraint", "Multi-scale maps"),
Arrays
.asList(
"Where do people look at during multi-scale map tasks?", "FogDetector survey raw data",
"Collection of cartographic disorientation stories", "Anchorwhat dataset",
"BasqueRoads: A Benchmark for Road Network Selection",
"Progressive river network selection for pan-scalar maps",
"BasqueRoads, a dataset to benchmark road selection algorithms",
"Missing the city for buildings? A critical review of pan-scalar map generalization and design in contemporary zoomable maps",
"Empirical approach to advance the generalisation of multi-scale maps",
"L'Alpe d'Huez: a dataset to benchmark topographic map generalisation",
"eye-tracking data from a survey on zooming in a pan-scalar map",
"Material of the experiment 'More is Less' from the MapMuxing project",
"Cartagen4py, an open source Python library for map generalisation",
"LAlpe dHuez: A Benchmark for Topographic Map Generalisation"),
Arrays
.asList(
"50|doi_dedup___::6915135e0aa39f913394513f809ae58a",
"50|doi_dedup___::754e3c283639bc6e104c925ff3e34007",
"50|doi_dedup___::13517477f3c1261d57a3364363ce6ce0",
"50|doi_dedup___::675b16c73accc4e7242bbb4ed9b3724a",
"50|doi_dedup___::94ce09906b2d7d37eb2206cea8a50153",
"50|dedup_wf_002::cc575d5ca5651ff8c3029a3a76e7e70a",
"50|doi_dedup___::c5e52baddda17c755d1bae012a97dc13",
"50|doi_dedup___::4f5f38c9e08fe995f7278963183f8ad4",
"50|doi_dedup___::a9bc4453273b2d02648a5cb453195042",
"50|doi_dedup___::5e893dc0cb7624a33f41c9b428bd59f7",
"50|doi_dedup___::c1ecdef48fd9be811a291deed950e1c5",
"50|doi_dedup___::9e93c8f2d97c35de8a6a57a5b53ef283",
"50|dedup_wf_002::d08be0ed27b13d8a880e891e08d093ea",
"50|doi_dedup___::f8d8b3b9eddeca2fc0e3bc9e63996555"),
"Exploring Multi-Scale Map Generalization and Design",
"This project aims to advance the generalization of multi-scale maps by investigating the impact of different design elements on user experience. The research involves collecting and analyzing data from various sources, including surveys, eye-tracking studies, and user experiments. The goal is to identify best practices for map generalization and design, with a focus on reducing disorientation and improving information retrieval during exploration. The project has led to the development of several datasets, including BasqueRoads, AnchorWhat, and L'Alpe d'Huez, which can be used to benchmark road selection algorithms and topographic map generalization techniques. The research has also resulted in the creation of a Python library, Cartagen4py, for map generalization. The findings of this project have the potential to improve the design and usability of multi-scale maps, making them more effective tools for navigation and information retrieval."));
OtherResearchProduct orp = (OtherResearchProduct) atomicActions.get(0).getPayload();
Relation rel = (Relation) atomicActions.get(1).getPayload();
assertEquals("Exploring Multi-Scale Map Generalization and Design", orp.getTitle().get(0).getValue());
assertEquals("50|raid________::759a564ce5cc7360cab030c517c7366b", rel.getSource());
assertEquals("50|doi_dedup___::6915135e0aa39f913394513f809ae58a", rel.getTarget());
}
}

View File

@ -0,0 +1,28 @@
package eu.dnetlib.dhp.collection.plugin.zenodo;
import java.util.zip.GZIPInputStream;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class ZenodoPluginCollectionTest {
@Test
public void testZenodoIterator() throws Exception {
final GZIPInputStream gis = new GZIPInputStream(
getClass().getResourceAsStream("/eu/dnetlib/dhp/collection/zenodo/zenodo.tar.gz"));
try (ZenodoTarIterator it = new ZenodoTarIterator(gis)) {
Assertions.assertTrue(it.hasNext());
int i = 0;
while (it.hasNext()) {
Assertions.assertNotNull(it.next());
i++;
}
Assertions.assertEquals(10, i);
}
}
}

View File

@ -0,0 +1,6 @@
{"raid": "-9222092103004099540", "authors": ["Department of Archaeology & Museums", "Department of Archaeology and Museums", "Department Of Archaeology & Museums"], "subjects": ["Begamganj", "Raisen", "Bhopal", "Budhni", "Malwa site survey", "सीहोर", "Gauharganj", "बुधनी", "Budni", "Berasia"], "titles": ["Malwa site survey : Raisen District, Begamganj Tahsīl, photographic documentation", "Malwa site survey : Bhopal District, photographic documentation (version 1, TIFF files)", "Malwa site survey : Raisen District, Gauharganj Tahsīl, village finds", "Malwa site survey : Sehore सीहोर District, Budni Tahsīl, photographic documentation (part 1)", "Malwa site survey: Bhopal District, Berasia Tahsīl, photographic documentation (with villages named)", "Malwa site survey : Sehore सीहोर District, Budni Tahsīl, photographic documentation (part 2)", "Malwa site survey : Bhopal District, photographic documentation (version 2, JPEG files)"], "ids": ["50|doi_dedup___::7523d165970830dd857e6cbea4302adf", "50|doi_dedup___::02309ae8a9fae291df321e317f5c5330", "50|doi_dedup___::95347ba2c4264414fab39712ee7fe481", "50|doi_dedup___::970aa708fe667596754fd02a708780f5", "50|doi_dedup___::b7cd9128cc53b1257a4f000347f339b0", "50|doi_dedup___::c7d65da0ecedef4d2c702b9db197d90c", "50|doi_dedup___::addbb67cf5046e340f342ba091bcebfa"], "title": "Documentation of Malwa Region", "summary": "This project involves the documentation of the Malwa region through photographic surveys. The surveys were conducted by the Department of Archaeology and Museums, Madhya Pradesh, and cover various districts and tahsils. The documentation includes photographic records of sites, villages, and other relevant features. The project aims to provide a comprehensive understanding of the region's cultural and historical significance.", "startDate": "2019-03-06", "endDate": "2019-03-08"}
{"raid": "-9221424331076109424", "authors": ["Hutchings, Judy", "Ward, Catherine", "Baban, Adriana", "D<><44>nil<69><6C>, Ingrid", "Frantz, Inga", "Gardner, Frances", "Lachman, Jamie", "Lachman, Jamie M.", "Foran, Heather", "Heinrichs, Nina", "Murphy, Hugh", "B<><42>ban, Adriana", "Raleva, Marija", "Fang, Xiangming", "Jansen, Elena", "Taut, Diana", "Foran, Heather M.", "T<><54>ut, Diana", "Ward, Catherine L.", "Williams, Margiad", "Lesco, Galina", "Brühl, Antonia"], "subjects": ["3. Good health", "5. Gender equality", "Criminology not elsewhere classified", "1. No poverty", "2. Zero hunger"], "titles": ["sj-docx-1-vaw-10.1177_10778012231188090 - Supplemental material for Co-Occurrence of Intimate Partner Violence Against Mothers and Maltreatment of Their Children With Behavioral Problems in Eastern Europe", "Hunger in vulnerable families in Southeastern Europe: Associations with health and violence", "Prevention of child mental health problems through parenting interventions in Southeastern Europe (RISE): study protocol for a multi-site randomised controlled trial"], "ids": ["50|doi_dedup___::a70015063e5400dae2e097ee10b4a589", "50|doi_dedup___::6e1d12026fcde9087724622ccdeed430", "50|doi_dedup___::5b7bd5d46c5d95e2ef5b36663504a67e"], "title": "Exploring the Impact of Hunger and Violence on Child Health in Southeastern Europe", "summary": "This study aims to investigate the relationship between hunger, violence, and child health in vulnerable families in Southeastern Europe. The research will explore the experiences of families in FYR Macedonia, Republic of Moldova, and Romania, and examine the associations between hunger, maltreatment, and other health indicators. The study will also test the efficacy of a parenting intervention targeting child behavioral problems in alleviating these issues. The findings of this research will contribute to the development of effective interventions to address the complex needs of vulnerable families in the region.", "startDate": "2019-06-04", "endDate": "2023-01-01"}
{"raid": "-9219052635741785098", "authors": ["Berli, Justin", "Le Mao, Bérénice", "Guillaume Touya", "Wenclik, Laura", "Courtial, Azelle", "Muehlenhaus, Ian", "Justin Berli", "Touya, Guillaume", "Gruget, Maïeul", "Azelle Courtial", "Ian Muhlenhaus", "Maïeul Gruget", "Marion Dumont", "Maïeul GRUGET", "Cécile Duchêne"], "subjects": ["cartography, zoom, pan, desert fog", "Road network", "zooming", "Pan-scalar maps", "pan-scalar map", "Python library", "QGIS", "map design", "landmarks", "Cartes transscalaires", "anchor", "disorientation", "[INFO]Computer Science [cs]", "[SHS.GEO]Humanities and Social Sciences/Geography", "cognitive cartography", "eye-tracking", "Computers in Earth Sciences", "Topographic map", "National Mapping Agency", "General Medicine", "Geography, Planning and Development", "multi-scales", "pan-scalar maps", "Selection", "cartography", "General Earth and Planetary Sciences", "progressiveness", "map generalisation", "Eye-tracker", "zoom", "algorithms", "Map Design", "cartography, map generalisation, zoom, multi-scale map", "Interactive maps", "Map generalisation", "Earth and Planetary Sciences (miscellaneous)", "Cartographic generalization", "rivers", "Benchmark", "General Environmental Science", "open source", "drawing", "Constraint", "Multi-scale maps"], "titles": ["Where do people look at during multi-scale map tasks?", "FogDetector survey raw data", "Collection of cartographic disorientation stories", "Anchorwhat dataset", "BasqueRoads: A Benchmark for Road Network Selection", "Progressive river network selection for pan-scalar maps", "BasqueRoads, a dataset to benchmark road selection algorithms", "Missing the city for buildings? A critical review of pan-scalar map generalization and design in contemporary zoomable maps", "Empirical approach to advance the generalisation of multi-scale maps", "L'Alpe d'Huez: a dataset to benchmark topographic map generalisation", "eye-tracking data from a survey on zooming in a pan-scalar map", "Material of the experiment \"More is Less\" from the MapMuxing project", "Cartagen4py, an open source Python library for map generalisation", "LAlpe dHuez: A Benchmark for Topographic Map Generalisation"], "ids": ["50|doi_dedup___::6915135e0aa39f913394513f809ae58a", "50|doi_dedup___::754e3c283639bc6e104c925ff3e34007", "50|doi_dedup___::13517477f3c1261d57a3364363ce6ce0", "50|doi_dedup___::675b16c73accc4e7242bbb4ed9b3724a", "50|doi_dedup___::94ce09906b2d7d37eb2206cea8a50153", "50|dedup_wf_002::cc575d5ca5651ff8c3029a3a76e7e70a", "50|doi_dedup___::c5e52baddda17c755d1bae012a97dc13", "50|doi_dedup___::4f5f38c9e08fe995f7278963183f8ad4", "50|doi_dedup___::a9bc4453273b2d02648a5cb453195042", "50|doi_dedup___::5e893dc0cb7624a33f41c9b428bd59f7", "50|doi_dedup___::c1ecdef48fd9be811a291deed950e1c5", "50|doi_dedup___::9e93c8f2d97c35de8a6a57a5b53ef283", "50|dedup_wf_002::d08be0ed27b13d8a880e891e08d093ea", "50|doi_dedup___::f8d8b3b9eddeca2fc0e3bc9e63996555"], "title": "Exploring Multi-Scale Map Generalization and Design", "summary": "This project aims to advance the generalization of multi-scale maps by investigating the impact of different design elements on user experience. The research involves collecting and analyzing data from various sources, including surveys, eye-tracking studies, and user experiments. The goal is to identify best practices for map generalization and design, with a focus on reducing disorientation and improving information retrieval during exploration. The project has led to the development of several datasets, including BasqueRoads, AnchorWhat, and L'Alpe d'Huez, which can be used to benchmark road selection algorithms and topographic map generalization techniques. The research has also resulted in the creation of a Python library, Cartagen4py, for map generalization. The findings of this project have the potential to improve the design and usability of multi-scale maps, making them more effective tools for navigation and information retrieval.", "startDate": "2021-09-10", "endDate": "2024-02-16"}
{"raid": "-9216828847055450272", "authors": ["Grey, Alan", "Gorelov, Sergey", "Pall, Szilard", "Merz, Pascal", "Justin A., Lemkul", "Szilárd Páll", "Pasquadibisceglie, Andrea", "Kutzner, Carsten", "Schulz, Roland", "Nabet, Julien", "Abraham, Mark", "Jalalypour, Farzaneh", "Lundborg, Magnus", "Gray, Alan", "Villa, Alessandra", "Berk Hess", "Santuz, Hubert", "Irrgang, M. Eric", "Wingbermuehle, Sebastian", "Lemkul, Justin A.", "Jordan, Joe", "Pellegrino, Michele", "Doijade, Mahesh", "Shvetsov, Alexey", "Hess, Berk", "Behera, Sudarshan", "Andrey Alekseenko", "Shugaeva, Tatiana", "Fleischmann, Stefan", "Bergh, Cathrine", "Morozov, Dmitry", "Adam Hospital", "Briand, Eliane", "Lindahl, Erik", "Brown, Ania", "Marta Lloret Llinares", "Miletic, Vedran", "Alekseenko, Andrey", "Gouaillardet, Gilles", "Fiorin, Giacomo", "Basov, Vladimir"], "subjects": ["webinar"], "titles": ["Introduction to HPC: molecular dynamics simulations with GROMACS: log files", "BioExcel webinar #73: Competency frameworks to support training design and professional development", "Introduction to HPC: molecular dynamics simulations with GROMACS: output files - Devana", "GROMACS 2024.0 Manual", "BioExcel Webinar #71: GROMACS-PMX for accurate estimation of free energy differences", "Introduction to HPC: molecular dynamics simulations with GROMACS: input files", "BioExcel Webinar #68: What's new in GROMACS 2023", "BioExcel Webinar #69: BioBB-Wfs and BioBB-API, integrated web-based platform and programmatic interface for biomolecular simulations workflows using the BioExcel Building Blocks library", "GROMACS 2024-beta Source code"], "ids": ["50|doi_dedup___::8318fbc815ee1943c3269be7567f220b", "50|doi_dedup___::9530e03fb2aac63e82b18a40dc09e32c", "50|doi_dedup___::30174ab31075e76a428ca5b4f4d236b8", "50|doi_________::70b7c6dce09ae6f1361d22913fdf95eb", "50|doi_dedup___::337dd48600618f3c06257edd750d6201", "50|doi_dedup___::d622992ba9077617f37ebd268b3e806d", "50|doi_dedup___::0b0bcc6825d6c052c37882fd5cfc1e8c", "50|doi_dedup___::4b1541a7cee32527c65ace5d1ed57335", "50|doi_dedup___::1379861df59bd755e4fb39b9f95ffbd3"], "title": "Exploring High-Performance Computing and Biomolecular Simulations", "summary": "This project involves exploring high-performance computing (HPC) and biomolecular simulations using GROMACS. The objectives include understanding molecular dynamics simulations, log files, input files, and output files. Additionally, the project aims to explore competency frameworks for professional development, specifically in the field of computational biomolecular research. The tools and techniques used will include GROMACS, BioExcel Building Blocks, and competency frameworks. The expected outcomes include a deeper understanding of HPC and biomolecular simulations, as well as the development of skills in using GROMACS and BioExcel Building Blocks. The project will also contribute to the development of competency frameworks for professional development in the field of computational biomolecular research.", "startDate": "2023-04-25", "endDate": "2024-01-30"}
{"raid": "-9210544816395499758", "authors": ["Bateson, Melissa", "Andrews, Clare", "Verhulst, Simon", "Nettle, Daniel", "Zuidersma, Erica"], "subjects": ["2. Zero hunger"], "titles": ["Exposure to food insecurity increases energy storage and reduces somatic maintenance in European starlings", "Data and code archive for Andrews et al. 'Exposure to food insecurity increases energy storage and reduces somatic maintenance in European starlings'"], "ids": ["50|doi_dedup___::176117239be06189523c253e0ca9c5ec", "50|doi_dedup___::343e0b0ddf0d54763a89a62af1f7a379"], "title": "Investigating the Effects of Food Insecurity on Energy Storage and Somatic Maintenance in European Starlings", "summary": "This study examines the impact of food insecurity on energy storage and somatic maintenance in European starlings. The research involved exposing juvenile starlings to either uninterrupted food availability or a regime of unpredictable food unavailability. The results show that birds exposed to food insecurity stored more energy, but at the expense of somatic maintenance and repair. The study provides insights into the adaptive responses of birds to food scarcity and the trade-offs involved in energy storage and maintenance.", "startDate": "2021-06-28", "endDate": "2021-06-28"}
{"raid": "-9208499171224730388", "authors": ["Maniati, Eleni", "Bakker, Bjorn", "McClelland, Sarah E.", "Shaikh, Nadeem", "De Angelis, Simone", "Johnson, Sarah C.", "Wang, Jun", "Foijer, Floris", "Spierings, Diana C. J.", "Boemo, Michael A.", "Wardenaar, René", "Mazzagatti, Alice"], "subjects": [], "titles": ["Additional file 2 of Replication stress generates distinctive landscapes of DNA copy number alterations and chromosome scale losses", "Additional file 5 of Replication stress generates distinctive landscapes of DNA copy number alterations and chromosome scale losses"], "ids": ["50|doi_dedup___::a1bfeb173971f74a274fab8bdd78a4bc", "50|doi_dedup___::3d6e151aaeb2f7c40a320207fdd80ade"], "title": "Analysis of DNA Copy Number Alterations and Chromosome Scale Losses", "summary": "This study analyzed the effects of replication stress on DNA copy number alterations and chromosome scale losses. The results show distinctive landscapes of these alterations and losses, which were further investigated in additional files. The study provides valuable insights into the mechanisms of replication stress and its impact on genomic stability.", "startDate": "2022-01-01", "endDate": "2022-01-01"}

View File

@ -0,0 +1,232 @@
{
"indexed": {
"date-parts": [
[
2022,
4,
3
]
],
"date-time": "2022-04-03T01:45:59Z",
"timestamp": 1648950359167
},
"reference-count": 0,
"publisher": "American Society of Clinical Oncology (ASCO)",
"issue": "18_suppl",
"content-domain": {
"domain": [],
"crossmark-restriction": false
},
"short-container-title": [
"JCO"
],
"published-print": {
"date-parts": [
[
2007,
6,
20
]
]
},
"abstract": "<jats:p> 3507 </jats:p><jats:p> Purpose: To detect IGF-1R on circulating tumor cells (CTCs) as a biomarker in the clinical development of a monoclonal human antibody, CP-751,871, targeting IGF-1R. Experimental Design: An automated sample preparation and analysis system for enumerating CTCs (Celltracks) was adapted for detecting IGF-1R positive CTCs with a diagnostic antibody targeting a different IGF-1R epitope to CP-751,871. This assay was utilized in three phase I trials of CP-751,871 as a single agent or with chemotherapy and was validated using cell lines and blood samples from healthy volunteers and patients with metastatic carcinoma. Results: There was no interference between the analytical and therapeutic antibodies. CP-751,871 was well tolerated as a single agent, and in combination with docetaxel or carboplatin and paclitaxel, at doses ranging from 0.05 mg/kg to 20 mg/kg. Eighty patients were enrolled on phase 1 studies of CP-751,871, with 47 (59%) patients having CTCs detected during the study. Prior to treatment 26 patients (33%) had CTCs, with 23 having detectable IGF-1R positive CTCs. CP-751,871 alone, and CP-751,871 with cytotoxic chemotherapy, decreased CTCs and IGF-1R positive CTCs; these increased towards the end of the 21-day cycle in some patients, falling again with retreatment. CTCs were commonest in advanced hormone refractory prostate cancer (11/20). Detectable IGF-1R expression on CTCs before treatment with CP-751,871 and docetaxel was associated with a higher frequency of PSA decline by more than 50% (6/10 vs 2/8 patients). A relationship was observed between sustained falls in CTCs counts and PSA declines by more than 50%. Conclusions: IGF-1R expression is detectable by immunofluorescence on CTCs. These data support the further evaluation of CTCs in pharmacodynamic studies and patient selection, particularly in advanced prostate cancer. </jats:p><jats:p> No significant financial relationships to disclose. </jats:p>",
"DOI": "10.1200/jco.2007.25.18_suppl.3507",
"type": "journal-article",
"created": {
"date-parts": [
[
2020,
3,
6
]
],
"date-time": "2020-03-06T20:50:42Z",
"timestamp": 1583527842000
},
"page": "3507-3507",
"source": "Crossref",
"is-referenced-by-count": 0,
"title": [
"Circulating tumor cells expressing the insulin growth factor-1 receptor (IGF-1R): Method of detection, incidence and potential applications"
],
"prefix": "10.1200",
"volume": "25",
"author": [
{
"given": "J. S.",
"family": "de Bono",
"sequence": "first",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "A.",
"family": "Adjei",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "G.",
"family": "Attard",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "M.",
"family": "Pollak",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "P.",
"family": "Fong",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "P.",
"family": "Haluska",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "L.",
"family": "Roberts",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "D.",
"family": "Chainese",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "L.",
"family": "Terstappen",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
},
{
"given": "A.",
"family": "Gualberto",
"sequence": "additional",
"affiliation": [
{
"name": "Royal Marsden Hospital, Surrey, United Kingdom; Mayo Clinic, Rochester, MN; McGill University & Lady Davis Research Institute, Montreal, PQ, Canada; Pfizer Global Research & Development, New London, CT; Immunicon Corporation, Huntingdon Valley, PA"
}
]
}
],
"member": "233",
"container-title": [
"Journal of Clinical Oncology"
],
"original-title": [],
"language": "en",
"deposited": {
"date-parts": [
[
2020,
3,
6
]
],
"date-time": "2020-03-06T20:51:03Z",
"timestamp": 1583527863000
},
"score": 1,
"resource": {
"primary": {
"URL": "http://ascopubs.org/doi/10.1200/jco.2007.25.18_suppl.3507"
}
},
"subtitle": [],
"short-title": [],
"issued": {
"date-parts": [
[
2007,
6,
20
]
]
},
"references-count": 0,
"journal-issue": {
"issue": "18_suppl",
"published-print": {
"date-parts": [
[
2007,
6,
20
]
]
}
},
"alternative-id": [
"10.1200/jco.2007.25.18_suppl.3507"
],
"URL": "http://dx.doi.org/10.1200/jco.2007.25.18_suppl.3507",
"relation": {},
"ISSN": [
"0732-183X",
"1527-7755"
],
"issn-type": [
{
"value": "0732-183X",
"type": "print"
},
{
"value": "1527-7755",
"type": "electronic"
}
],
"subject": [],
"published": {
"date-parts": [
[
2007,
6,
20
]
]
}
}

View File

@ -0,0 +1,157 @@
<PubmedArticle>
<MedlineCitation Status="MEDLINE" IndexingMethod="Curated" Owner="NLM">
<PMID Version="1">37318999</PMID>
<DateCompleted>
<Year>2024</Year>
<Month>02</Month>
<Day>09</Day>
</DateCompleted>
<DateRevised>
<Year>2024</Year>
<Month>02</Month>
<Day>09</Day>
</DateRevised>
<Article PubModel="Print-Electronic">
<Journal>
<ISSN IssnType="Electronic">1522-1229</ISSN>
<JournalIssue CitedMedium="Internet">
<Volume>47</Volume>
<Issue>3</Issue>
<PubDate>
<Year>2023</Year>
<Month>Sep</Month>
<Day>01</Day>
</PubDate>
</JournalIssue>
<Title>Advances in physiology education</Title>
<ISOAbbreviation>Adv Physiol Educ</ISOAbbreviation>
</Journal>
<ArticleTitle>Providing the choice of in-person or videoconference attendance in a clinical physiology course may harm learning outcomes for the entire cohort.</ArticleTitle>
<Pagination>
<MedlinePgn>548-556</MedlinePgn>
</Pagination>
<ELocationID EIdType="doi" ValidYN="Y">10.1152/advan.00160.2022</ELocationID>
<Abstract>
<AbstractText>Clinical Physiology 1 and 2 are flipped classes in which students watch prerecorded videos before class. During the 3-h class, students take practice assessments, work in groups on critical thinking exercises, work through case studies, and engage in drawing exercises. Due to the COVID pandemic, these courses were transitioned from in-person classes to online classes. Despite the university's return-to-class policy, some students were reluctant to return to in-person classes; therefore during the 2021-2022 academic year, Clinical Physiology 1 and 2 were offered as flipped, hybrid courses. In a hybrid format, students either attended the synchronous class in person or online. Here we evaluate the learning outcomes and the perceptions of the learning experience for students who attended Clinical Physiology 1 and 2 either online (2020-2021) or in a hybrid format (2021-2022). In addition to exam scores, in-class surveys and end of course evaluations were compiled to describe the student experience in the flipped hybrid setting. Retrospective linear mixed-model regression analysis of exam scores revealed that a hybrid modality (2021-2022) was associated with lower exam scores when controlling for sex, graduate/undergraduate status, delivery method, and the order in which the courses were taken (<i>F</i> test: <i>F</i> = 8.65, df1 = 2, df2 = 179.28, <i>P</i> = 0.0003). In addition, being a Black Indigenous Person of Color (BIPOC) student is associated with a lower exam score, controlling for the same previous factors (<i>F</i> test: <i>F</i> = 4.23, df1 = 1, df2 = 130.28, <i>P</i> = 0.04), albeit with lower confidence; the BIPOC representation in this sample is small (BIPOC: <i>n</i> = 144; total: <i>n</i> = 504). There is no significant interaction between the hybrid modality and race, meaning that BIPOC and White students are both negatively affected in a hybrid flipped course. Instructors should consider carefully about offering hybrid courses and build in extra student support.<b>NEW &amp; NOTEWORTHY</b> The transition from online to in-person teaching has been as challenging as the original transition to remote teaching with the onset of the pandemic. Since not all students were ready to return to the classroom, students could choose to take this course in person or online. This arrangement provided flexibility and opportunities for innovative class activities for students but introduced tradeoffs in lower test scores from the hybrid modality than fully online or fully in-person modalities.</AbstractText>
</Abstract>
<AuthorList CompleteYN="Y">
<Author ValidYN="Y">
<LastName>Anderson</LastName>
<ForeName>Lisa Carney</ForeName>
<Initials>LC</Initials>
<Identifier Source="ORCID">0000-0003-2261-1921</Identifier>
<AffiliationInfo>
<Affiliation>Department of Integrative Biology and Physiology, University of Minnesota, Minneapolis, Minnesota, United States.</Affiliation>
<Identifier Source="ROR">https://ror.org/017zqws13</Identifier>
</AffiliationInfo>
</Author>
<Author ValidYN="Y">
<LastName>Jacobson</LastName>
<ForeName>Tate</ForeName>
<Initials>T</Initials>
<AffiliationInfo>
<Affiliation>Department of Statistics, University of Minnesota, Minneapolis, Minnesota, United States.</Affiliation>
</AffiliationInfo>
</Author>
</AuthorList>
<Language>eng</Language>
<PublicationTypeList>
<PublicationType UI="D016428">Journal Article</PublicationType>
</PublicationTypeList>
<ArticleDate DateType="Electronic">
<Year>2023</Year>
<Month>06</Month>
<Day>15</Day>
</ArticleDate>
</Article>
<MedlineJournalInfo>
<Country>United States</Country>
<MedlineTA>Adv Physiol Educ</MedlineTA>
<NlmUniqueID>100913944</NlmUniqueID>
<ISSNLinking>1043-4046</ISSNLinking>
</MedlineJournalInfo>
<CitationSubset>IM</CitationSubset>
<MeshHeadingList>
<MeshHeading>
<DescriptorName UI="D010827" MajorTopicYN="Y">Physiology</DescriptorName>
<QualifierName UI="Q000193" MajorTopicYN="N">education</QualifierName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D012189" MajorTopicYN="N">Retrospective Studies</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D007858" MajorTopicYN="N">Learning</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D058873" MajorTopicYN="N">Pandemics</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D000086382" MajorTopicYN="N">COVID-19</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D012044" MajorTopicYN="N">Regression Analysis</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D013334" MajorTopicYN="N">Students</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D006801" MajorTopicYN="N">Humans</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D008297" MajorTopicYN="N">Male</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D005260" MajorTopicYN="N">Female</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D044465" MajorTopicYN="N">White People</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D044383" MajorTopicYN="N">Black People</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D020375" MajorTopicYN="N">Education, Distance</DescriptorName>
</MeshHeading>
<MeshHeading>
<DescriptorName UI="D003479" MajorTopicYN="N">Curriculum</DescriptorName>
</MeshHeading>
</MeshHeadingList>
<KeywordList Owner="NOTNLM">
<Keyword MajorTopicYN="N">flipped teaching</Keyword>
<Keyword MajorTopicYN="N">hybrid teaching</Keyword>
<Keyword MajorTopicYN="N">inequity</Keyword>
<Keyword MajorTopicYN="N">learning outcomes</Keyword>
<Keyword MajorTopicYN="N">responsive teaching</Keyword>
</KeywordList>
</MedlineCitation>
<PubmedData>
<History>
<PubMedPubDate PubStatus="medline">
<Year>2023</Year>
<Month>7</Month>
<Day>21</Day>
<Hour>6</Hour>
<Minute>44</Minute>
</PubMedPubDate>
<PubMedPubDate PubStatus="pubmed">
<Year>2023</Year>
<Month>6</Month>
<Day>15</Day>
<Hour>19</Hour>
<Minute>14</Minute>
</PubMedPubDate>
<PubMedPubDate PubStatus="entrez">
<Year>2023</Year>
<Month>6</Month>
<Day>15</Day>
<Hour>12</Hour>
<Minute>53</Minute>
</PubMedPubDate>
</History>
<PublicationStatus>ppublish</PublicationStatus>
<ArticleIdList>
<ArticleId IdType="pubmed">37318999</ArticleId>
<ArticleId IdType="doi">10.1152/advan.00160.2022</ArticleId>
</ArticleIdList>
</PubmedData>
</PubmedArticle>

View File

@ -3,12 +3,15 @@ package eu.dnetlib.dhp.collection.crossref
import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.ObjectMapper
import eu.dnetlib.dhp.aggregation.AbstractVocabularyTest import eu.dnetlib.dhp.aggregation.AbstractVocabularyTest
import eu.dnetlib.dhp.collection.crossref.Crossref2Oaf.TransformationType import eu.dnetlib.dhp.collection.crossref.Crossref2Oaf.TransformationType
import eu.dnetlib.dhp.schema.oaf.Publication
import org.apache.commons.io.IOUtils import org.apache.commons.io.IOUtils
import org.junit.jupiter.api.{BeforeEach, Test} import org.junit.jupiter.api.{Assertions, BeforeEach, Test}
import org.junit.jupiter.api.extension.ExtendWith import org.junit.jupiter.api.extension.ExtendWith
import org.mockito.junit.jupiter.MockitoExtension import org.mockito.junit.jupiter.MockitoExtension
import org.slf4j.{Logger, LoggerFactory} import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters.asScalaBufferConverter
@ExtendWith(Array(classOf[MockitoExtension])) @ExtendWith(Array(classOf[MockitoExtension]))
class CrossrefMappingTest extends AbstractVocabularyTest { class CrossrefMappingTest extends AbstractVocabularyTest {
@ -25,8 +28,32 @@ class CrossrefMappingTest extends AbstractVocabularyTest {
val input = val input =
IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/collection/crossref/issn_pub.json"), "utf-8") IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/collection/crossref/issn_pub.json"), "utf-8")
println(Crossref2Oaf.convert(input, vocabularies, TransformationType.All)) Crossref2Oaf
.convert(input, vocabularies, TransformationType.All)
.foreach(record => {
Assertions.assertNotNull(record)
})
} }
@Test
def mappingAffiliation(): Unit = {
val input =
IOUtils.toString(
getClass.getResourceAsStream("/eu/dnetlib/dhp/collection/crossref/affiliationTest.json"),
"utf-8"
)
val data = Crossref2Oaf.convert(input, vocabularies, TransformationType.OnlyResult)
data.foreach(record => {
Assertions.assertNotNull(record)
Assertions.assertTrue(record.isInstanceOf[Publication])
val publication = record.asInstanceOf[Publication]
publication.getAuthor.asScala.foreach(author => {
Assertions.assertNotNull(author.getRawAffiliationString)
Assertions.assertTrue(author.getRawAffiliationString.size() > 0)
})
})
println(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(data.head))
}
} }

View File

@ -5,7 +5,10 @@ import eu.dnetlib.dhp.aggregation.AbstractVocabularyTest
import eu.dnetlib.dhp.schema.oaf.utils.PidType import eu.dnetlib.dhp.schema.oaf.utils.PidType
import eu.dnetlib.dhp.schema.oaf.{Oaf, Publication, Relation, Result} import eu.dnetlib.dhp.schema.oaf.{Oaf, Publication, Relation, Result}
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.ScholixResolved import eu.dnetlib.dhp.sx.bio.BioDBToOAF.ScholixResolved
import eu.dnetlib.dhp.sx.bio.pubmed.{PMArticle, PMParser, PMSubject, PubMedToOaf} import eu.dnetlib.dhp.sx.bio.ebi.SparkCreatePubmedDump
import eu.dnetlib.dhp.sx.bio.pubmed._
import org.apache.commons.io.IOUtils
import org.apache.spark.sql.SparkSession
import org.json4s.DefaultFormats import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JField, JObject, JString} import org.json4s.JsonAST.{JField, JObject, JString}
import org.json4s.jackson.JsonMethods.parse import org.json4s.jackson.JsonMethods.parse
@ -13,14 +16,16 @@ import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.extension.ExtendWith import org.junit.jupiter.api.extension.ExtendWith
import org.junit.jupiter.api.{BeforeEach, Test} import org.junit.jupiter.api.{BeforeEach, Test}
import org.mockito.junit.jupiter.MockitoExtension import org.mockito.junit.jupiter.MockitoExtension
import org.slf4j.LoggerFactory
import java.io.{BufferedReader, InputStream, InputStreamReader} import java.io.{BufferedReader, InputStream, InputStreamReader}
import java.util.regex.Pattern
import java.util.zip.GZIPInputStream import java.util.zip.GZIPInputStream
import javax.xml.stream.XMLInputFactory import javax.xml.stream.XMLInputFactory
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer import scala.collection.mutable.ListBuffer
import scala.io.Source import scala.io.Source
import scala.xml.pull.XMLEventReader
@ExtendWith(Array(classOf[MockitoExtension])) @ExtendWith(Array(classOf[MockitoExtension]))
class BioScholixTest extends AbstractVocabularyTest { class BioScholixTest extends AbstractVocabularyTest {
@ -48,6 +53,76 @@ class BioScholixTest extends AbstractVocabularyTest {
} }
} }
@Test
def testPid(): Unit = {
val pids = List(
"0000000163025705",
"000000018494732X",
"0000000308873343",
"0000000335964515",
"0000000333457333",
"0000000335964515",
"0000000302921949",
"http://orcid.org/0000-0001-8567-3543",
"http://orcid.org/0000-0001-7868-8528",
"0000-0001-9189-1440",
"0000-0003-3727-9247",
"0000-0001-7246-1058",
"000000033962389X",
"0000000330371470",
"0000000171236123",
"0000000272569752",
"0000000293231371",
"http://orcid.org/0000-0003-3345-7333",
"0000000340145688",
"http://orcid.org/0000-0003-4894-1689"
)
pids.foreach(pid => {
val pidCleaned = new PMIdentifier(pid, "ORCID").getPid
// assert pid is in the format of ORCID
println(pidCleaned)
assertTrue(pidCleaned.matches("[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{3}[0-9X]"))
})
}
def extractAffiliation(s: String): List[String] = {
val regex: String = "<Affiliation>(.*)<\\/Affiliation>"
val pattern = Pattern.compile(regex, Pattern.MULTILINE)
val matcher = pattern.matcher(s)
val l: mutable.ListBuffer[String] = mutable.ListBuffer()
while (matcher.find()) {
l += matcher.group(1)
}
l.toList
}
case class AuthorPID(pidType: String, pid: String) {}
def extractAuthorIdentifier(s: String): List[AuthorPID] = {
val regex: String = "<Identifier Source=\"(.*)\">(.*)<\\/Identifier>"
val pattern = Pattern.compile(regex, Pattern.MULTILINE)
val matcher = pattern.matcher(s)
val l: mutable.ListBuffer[AuthorPID] = mutable.ListBuffer()
while (matcher.find()) {
l += AuthorPID(pidType = matcher.group(1), pid = matcher.group(2))
}
l.toList
}
@Test
def testParsingPubmed2(): Unit = {
val mapper = new ObjectMapper()
val xml = IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/graph/bio/single_pubmed.xml"))
val parser = new PMParser2()
val article = parser.parse(xml)
// println(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(article))
println(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(PubMedToOaf.convert(article, vocabularies)))
}
@Test @Test
def testEBIData() = { def testEBIData() = {
val inputFactory = XMLInputFactory.newInstance val inputFactory = XMLInputFactory.newInstance
@ -124,6 +199,14 @@ class BioScholixTest extends AbstractVocabularyTest {
} }
} }
def testPubmedSplitting(): Unit = {
val spark: SparkSession = SparkSession.builder().appName("test").master("local").getOrCreate()
new SparkCreatePubmedDump("", Array.empty, LoggerFactory.getLogger(getClass))
.createPubmedDump(spark, "/home/sandro/Downloads/pubmed", "/home/sandro/Downloads/pubmed_mapped", vocabularies)
}
@Test @Test
def testPubmedOriginalID(): Unit = { def testPubmedOriginalID(): Unit = {
val article: PMArticle = new PMArticle val article: PMArticle = new PMArticle

View File

@ -135,7 +135,7 @@ public class DedupRecordFactory {
return Collections.emptyIterator(); return Collections.emptyIterator();
} }
OafEntity mergedEntity = MergeUtils.mergeGroup(dedupId, cliques.iterator()); OafEntity mergedEntity = MergeUtils.mergeGroup(cliques.iterator());
// dedup records do not have date of transformation attribute // dedup records do not have date of transformation attribute
mergedEntity.setDateoftransformation(null); mergedEntity.setDateoftransformation(null);
mergedEntity mergedEntity

View File

@ -69,6 +69,7 @@ public class SparkPropagateRelation extends AbstractSparkAction {
Dataset<Relation> mergeRels = spark Dataset<Relation> mergeRels = spark
.read() .read()
.schema(REL_BEAN_ENC.schema())
.load(DedupUtility.createMergeRelPath(workingPath, "*", "*")) .load(DedupUtility.createMergeRelPath(workingPath, "*", "*"))
.as(REL_BEAN_ENC); .as(REL_BEAN_ENC);

View File

@ -46,8 +46,8 @@ class DatasetMergerTest implements Serializable {
} }
@Test @Test
void datasetMergerTest() throws InstantiationException, IllegalAccessException, InvocationTargetException { void datasetMergerTest() {
Dataset pub_merged = MergeUtils.mergeGroup(dedupId, datasets.stream().map(Tuple2::_2).iterator()); Dataset pub_merged = MergeUtils.mergeGroup(datasets.stream().map(Tuple2::_2).iterator());
// verify id // verify id
assertEquals(dedupId, pub_merged.getId()); assertEquals(dedupId, pub_merged.getId());

View File

@ -21,17 +21,19 @@ class DecisionTreeTest {
void testJPath() throws IOException { void testJPath() throws IOException {
DedupConfig conf = DedupConfig DedupConfig conf = DedupConfig
.load(IOUtils.toString(getClass().getResourceAsStream("dedup_conf_organization.json"))); .load(
IOUtils
.toString(
getClass().getResourceAsStream("/eu/dnetlib/dhp/oa/dedup/jpath/dedup_conf_organization.json")));
final String org = IOUtils.toString(getClass().getResourceAsStream("organization.json")); final String org = IOUtils
.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/oa/dedup/jpath/organization.json"));
Row row = SparkModel.apply(conf).rowFromJson(org); Row row = SparkModel.apply(conf).rowFromJson(org);
System.out.println("row = " + row); System.out.println("row = " + row);
Assertions.assertNotNull(row); Assertions.assertNotNull(row);
Assertions.assertTrue(StringUtils.isNotBlank(row.getAs("identifier"))); Assertions.assertTrue(StringUtils.isNotBlank(row.getAs("identifier")));
System.out.println("row = " + row.getAs("countrytitle"));
} }
@Test @Test
@ -44,7 +46,8 @@ class DecisionTreeTest {
.getResourceAsStream( .getResourceAsStream(
"/eu/dnetlib/dhp/dedup/conf/org.curr.conf.json"))); "/eu/dnetlib/dhp/dedup/conf/org.curr.conf.json")));
final String org = IOUtils.toString(getClass().getResourceAsStream("organization_example1.json")); final String org = IOUtils
.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/oa/dedup/jpath/organization_example1.json"));
Row row = SparkModel.apply(conf).rowFromJson(org); Row row = SparkModel.apply(conf).rowFromJson(org);
// to check that the same parsing returns the same row // to check that the same parsing returns the same row

View File

@ -190,7 +190,7 @@ public class SparkDedupTest implements Serializable {
System.out.println("orp_simrel = " + orp_simrel); System.out.println("orp_simrel = " + orp_simrel);
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(742, orgs_simrel); assertEquals(720, orgs_simrel);
assertEquals(566, pubs_simrel); assertEquals(566, pubs_simrel);
assertEquals(113, sw_simrel); assertEquals(113, sw_simrel);
assertEquals(148, ds_simrel); assertEquals(148, ds_simrel);
@ -251,7 +251,7 @@ public class SparkDedupTest implements Serializable {
// entities simrels supposed to be equal to the number of previous step (no rels in whitelist) // entities simrels supposed to be equal to the number of previous step (no rels in whitelist)
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(742, orgs_simrel); assertEquals(720, orgs_simrel);
assertEquals(566, pubs_simrel); assertEquals(566, pubs_simrel);
assertEquals(148, ds_simrel); assertEquals(148, ds_simrel);
assertEquals(280, orp_simrel); assertEquals(280, orp_simrel);
@ -440,25 +440,27 @@ public class SparkDedupTest implements Serializable {
.count(); .count();
final List<Relation> merges = pubs final List<Relation> merges = pubs
.filter("source == '50|arXiv_dedup_::c93aeb433eb90ed7a86e29be00791b7c'") .filter("source == '50|doi_dedup___::d5021b53204e4fdeab6ff5d5bc468032'")// and relClass =
// '"+ModelConstants.MERGES+"'")
.collectAsList(); .collectAsList();
assertEquals(1, merges.size()); assertEquals(4, merges.size());
Set<String> dups = Sets Set<String> dups = Sets
.newHashSet( .newHashSet(
"50|doi_________::3b1d0d8e8f930826665df9d6b82fbb73", "50|doi_________::3b1d0d8e8f930826665df9d6b82fbb73",
"50|doi_________::d5021b53204e4fdeab6ff5d5bc468032", "50|doi_________::d5021b53204e4fdeab6ff5d5bc468032",
"50|arXiv_______::c93aeb433eb90ed7a86e29be00791b7c"); "50|arXiv_______::c93aeb433eb90ed7a86e29be00791b7c",
"50|arXiv_dedup_::c93aeb433eb90ed7a86e29be00791b7c");
merges.forEach(r -> { merges.forEach(r -> {
assertEquals(ModelConstants.RESULT_RESULT, r.getRelType()); assertEquals(ModelConstants.RESULT_RESULT, r.getRelType());
assertEquals(ModelConstants.DEDUP, r.getSubRelType()); assertEquals(ModelConstants.DEDUP, r.getSubRelType());
assertEquals(ModelConstants.IS_MERGED_IN, r.getRelClass()); assertEquals(ModelConstants.MERGES, r.getRelClass());
assertTrue(dups.contains(r.getTarget())); assertTrue(dups.contains(r.getTarget()));
}); });
final List<Relation> mergedIn = pubs final List<Relation> mergedIn = pubs
.filter("target == '50|arXiv_dedup_::c93aeb433eb90ed7a86e29be00791b7c'") .filter("target == '50|doi_dedup___::d5021b53204e4fdeab6ff5d5bc468032'")
.collectAsList(); .collectAsList();
assertEquals(3, mergedIn.size()); assertEquals(4, mergedIn.size());
mergedIn.forEach(r -> { mergedIn.forEach(r -> {
assertEquals(ModelConstants.RESULT_RESULT, r.getRelType()); assertEquals(ModelConstants.RESULT_RESULT, r.getRelType());
assertEquals(ModelConstants.DEDUP, r.getSubRelType()); assertEquals(ModelConstants.DEDUP, r.getSubRelType());
@ -473,8 +475,8 @@ public class SparkDedupTest implements Serializable {
System.out.println("orp_mergerel = " + orp_mergerel); System.out.println("orp_mergerel = " + orp_mergerel);
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(1268, orgs_mergerel); assertEquals(1280, orgs_mergerel);
assertEquals(1156, pubs.count()); assertEquals(1158, pubs.count());
assertEquals(292, sw_mergerel); assertEquals(292, sw_mergerel);
assertEquals(476, ds_mergerel); assertEquals(476, ds_mergerel);
assertEquals(742, orp_mergerel); assertEquals(742, orp_mergerel);
@ -561,7 +563,7 @@ public class SparkDedupTest implements Serializable {
System.out.println("orp_mergerel = " + orp_mergerel); System.out.println("orp_mergerel = " + orp_mergerel);
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(1278, orgs_mergerel); assertEquals(1280, orgs_mergerel);
assertEquals(1156, pubs.count()); assertEquals(1156, pubs.count());
assertEquals(292, sw_mergerel); assertEquals(292, sw_mergerel);
assertEquals(476, ds_mergerel); assertEquals(476, ds_mergerel);
@ -618,7 +620,7 @@ public class SparkDedupTest implements Serializable {
System.out.println("orp_deduprecord = " + orp_deduprecord); System.out.println("orp_deduprecord = " + orp_deduprecord);
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(78, orgs_deduprecord); assertEquals(87, orgs_deduprecord);
assertEquals(96, pubs.count()); assertEquals(96, pubs.count());
assertEquals(47, sw_deduprecord); assertEquals(47, sw_deduprecord);
assertEquals(97, ds_deduprecord); assertEquals(97, ds_deduprecord);
@ -761,7 +763,7 @@ public class SparkDedupTest implements Serializable {
if (CHECK_CARDINALITIES) { if (CHECK_CARDINALITIES) {
assertEquals(930, publications); assertEquals(930, publications);
assertEquals(831, organizations); assertEquals(840, organizations);
assertEquals(100, projects); assertEquals(100, projects);
assertEquals(100, datasource); assertEquals(100, datasource);
assertEquals(196, softwares); assertEquals(196, softwares);

View File

@ -146,7 +146,7 @@ public class SparkOpenorgsDedupTest implements Serializable {
.load(DedupUtility.createSimRelPath(testOutputBasePath, testActionSetId, "organization")) .load(DedupUtility.createSimRelPath(testOutputBasePath, testActionSetId, "organization"))
.count(); .count();
assertEquals(92, orgs_simrel); assertEquals(91, orgs_simrel);
} }
@Test @Test
@ -175,7 +175,7 @@ public class SparkOpenorgsDedupTest implements Serializable {
.load(DedupUtility.createSimRelPath(testOutputBasePath, testActionSetId, "organization")) .load(DedupUtility.createSimRelPath(testOutputBasePath, testActionSetId, "organization"))
.count(); .count();
assertEquals(128, orgs_simrel); assertEquals(127, orgs_simrel);
} }
@Test @Test

View File

@ -324,7 +324,7 @@ public class SparkPublicationRootsTest implements Serializable {
private void verifyRoot_case_3(Dataset<Publication> roots, Dataset<Publication> pubs) { private void verifyRoot_case_3(Dataset<Publication> roots, Dataset<Publication> pubs) {
Publication root = roots Publication root = roots
.filter("id = '50|dedup_wf_001::31ca734cc22181b704c4aa8fd050062a'") .filter("id = '50|dedup_wf_002::7143f4ff5708f3657db0b7e68ea74d55'")
.first(); .first();
assertNotNull(root); assertNotNull(root);

View File

@ -143,7 +143,9 @@ public class SparkPublicationRootsTest2 implements Serializable {
"--graphBasePath", graphInputPath, "--graphBasePath", graphInputPath,
"--actionSetId", testActionSetId, "--actionSetId", testActionSetId,
"--isLookUpUrl", "lookupurl", "--isLookUpUrl", "lookupurl",
"--workingPath", workingPath "--workingPath", workingPath,
"--hiveMetastoreUris", "",
}), spark) }), spark)
.run(isLookUpService); .run(isLookUpService);
@ -153,7 +155,7 @@ public class SparkPublicationRootsTest2 implements Serializable {
.as(Encoders.bean(Relation.class)); .as(Encoders.bean(Relation.class));
assertEquals( assertEquals(
3, merges 4, merges
.filter("relclass == 'isMergedIn'") .filter("relclass == 'isMergedIn'")
.map((MapFunction<Relation, String>) Relation::getTarget, Encoders.STRING()) .map((MapFunction<Relation, String>) Relation::getTarget, Encoders.STRING())
.distinct() .distinct()
@ -178,7 +180,7 @@ public class SparkPublicationRootsTest2 implements Serializable {
.textFile(workingPath + "/" + testActionSetId + "/publication_deduprecord") .textFile(workingPath + "/" + testActionSetId + "/publication_deduprecord")
.map(asEntity(Publication.class), Encoders.bean(Publication.class)); .map(asEntity(Publication.class), Encoders.bean(Publication.class));
assertEquals(3, roots.count()); assertEquals(4, roots.count());
final Dataset<Publication> pubs = spark final Dataset<Publication> pubs = spark
.read() .read()
@ -195,7 +197,6 @@ public class SparkPublicationRootsTest2 implements Serializable {
.collectAsList() .collectAsList()
.get(0); .get(0);
assertEquals(crossref_duplicate.getDateofacceptance().getValue(), root.getDateofacceptance().getValue());
assertEquals(crossref_duplicate.getJournal().getName(), root.getJournal().getName()); assertEquals(crossref_duplicate.getJournal().getName(), root.getJournal().getName());
assertEquals(crossref_duplicate.getJournal().getIssnPrinted(), root.getJournal().getIssnPrinted()); assertEquals(crossref_duplicate.getJournal().getIssnPrinted(), root.getJournal().getIssnPrinted());
assertEquals(crossref_duplicate.getPublisher().getValue(), root.getPublisher().getValue()); assertEquals(crossref_duplicate.getPublisher().getValue(), root.getPublisher().getValue());

View File

@ -168,7 +168,7 @@ public class SparkStatsTest implements Serializable {
.load(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_blockstats") .load(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_blockstats")
.count(); .count();
assertEquals(414, orgs_blocks); assertEquals(406, orgs_blocks);
assertEquals(221, pubs_blocks); assertEquals(221, pubs_blocks);
assertEquals(134, sw_blocks); assertEquals(134, sw_blocks);
assertEquals(196, ds_blocks); assertEquals(196, ds_blocks);

View File

@ -19,17 +19,19 @@ class JsonPathTest {
void testJPath() throws IOException { void testJPath() throws IOException {
DedupConfig conf = DedupConfig DedupConfig conf = DedupConfig
.load(IOUtils.toString(getClass().getResourceAsStream("dedup_conf_organization.json"))); .load(
IOUtils
.toString(
getClass().getResourceAsStream("/eu/dnetlib/dhp/oa/dedup/jpath/dedup_conf_organization.json")));
final String org = IOUtils.toString(getClass().getResourceAsStream("organization.json")); final String org = IOUtils
.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/oa/dedup/jpath/organization.json"));
Row row = SparkModel.apply(conf).rowFromJson(org); Row row = SparkModel.apply(conf).rowFromJson(org);
System.out.println("row = " + row); System.out.println("row = " + row);
Assertions.assertNotNull(row); Assertions.assertNotNull(row);
Assertions.assertTrue(StringUtils.isNotBlank(row.getAs("identifier"))); Assertions.assertTrue(StringUtils.isNotBlank(row.getAs("identifier")));
System.out.println("row = " + row.getAs("countrytitle"));
} }
@Test @Test

View File

@ -24,22 +24,19 @@
"start": { "start": {
"fields": [ "fields": [
{ {
"field": "gridid", "field": "pid",
"comparator": "exactMatch", "comparator": "jsonListMatch",
"weight": 1, "weight": 1,
"countIfUndefined": "false", "countIfUndefined": "false",
"params": {} "params": {
}, "jpath_classid": "$.qualifier.classid",
{ "jpath_value": "$.value",
"field": "rorid", "mode": "type"
"comparator": "exactMatch", }
"weight": 1,
"countIfUndefined": "false",
"params": {}
} }
], ],
"threshold": 1, "threshold": 1,
"aggregation": "OR", "aggregation": "MAX",
"positive": "MATCH", "positive": "MATCH",
"negative": "NO_MATCH", "negative": "NO_MATCH",
"undefined": "necessaryConditions", "undefined": "necessaryConditions",
@ -149,11 +146,10 @@
"model" : [ "model" : [
{ "name" : "country", "type" : "String", "path" : "$.country.classid", "infer" : "country", "inferenceFrom" : "$.legalname.value"}, { "name" : "country", "type" : "String", "path" : "$.country.classid", "infer" : "country", "inferenceFrom" : "$.legalname.value"},
{ "name" : "legalshortname", "type" : "String", "path" : "$.legalshortname.value", "infer" : "city_keyword"}, { "name" : "legalshortname", "type" : "String", "path" : "$.legalshortname.value", "infer" : "city_keyword"},
{ "name" : "original_legalname", "type" : "String", "path" : "$.legalname.value" }, { "name" : "original_legalname", "type" : "String", "path" : "$.legalname.value", "clean": "title"},
{ "name" : "legalname", "type" : "String", "path" : "$.legalname.value", "infer" : "city_keyword"}, { "name" : "legalname", "type" : "String", "path" : "$.legalname.value", "infer" : "city_keyword"},
{ "name" : "websiteurl", "type" : "URL", "path" : "$.websiteurl.value" }, { "name" : "websiteurl", "type" : "URL", "path" : "$.websiteurl.value" },
{ "name" : "gridid", "type" : "String", "path" : "$.pid[?(@.qualifier.classid =='grid')].value"}, { "name": "pid", "type": "JSON", "path": "$.pid[*]", "overrideMatch": "true"},
{ "name" : "rorid", "type" : "String", "path" : "$.pid[?(@.qualifier.classid =='ROR')].value"},
{ "name" : "originalId", "type" : "String", "path" : "$.id" } { "name" : "originalId", "type" : "String", "path" : "$.id" }
], ],
"blacklists" : {}, "blacklists" : {},

View File

@ -566,7 +566,26 @@ case object Crossref2Oaf {
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY) queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES) queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
case _ => logger.debug("no match for " + funder.DOI.get) case _ => logger.debug("no match for " + funder.DOI.get)
//Add for Danish funders
//Independent Research Fund Denmark (IRFD)
case "10.13039/501100004836" =>
generateSimpleRelationFromAward(funder, "irfd________", a => a)
val targetId = getProjectId("irfd________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
//Carlsberg Foundation (CF)
case "10.13039/501100002808" =>
generateSimpleRelationFromAward(funder, "cf__________", a => a)
val targetId = getProjectId("cf__________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
//Novo Nordisk Foundation (NNF)
case "10.13039/501100009708" =>
generateSimpleRelationFromAward(funder, "nnf___________", a => a)
val targetId = getProjectId("nnf_________", "1e5e62235d094afd01cd56e65112fc63")
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
case _ => logger.debug("no match for " + funder.DOI.get)
} }
} else { } else {

View File

@ -1,6 +1,8 @@
package eu.dnetlib.dhp; package eu.dnetlib.dhp;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Optional; import java.util.Optional;
@ -21,8 +23,6 @@ import eu.dnetlib.dhp.schema.oaf.DataInfo;
import eu.dnetlib.dhp.schema.oaf.Qualifier; import eu.dnetlib.dhp.schema.oaf.Qualifier;
import eu.dnetlib.dhp.schema.oaf.Relation; import eu.dnetlib.dhp.schema.oaf.Relation;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
public class PropagationConstant { public class PropagationConstant {
private PropagationConstant() { private PropagationConstant() {

View File

@ -171,7 +171,7 @@ public class Utils implements Serializable {
public static List<String> getCommunityIdList(String baseURL) throws IOException { public static List<String> getCommunityIdList(String baseURL) throws IOException {
return getValidCommunities(baseURL) return getValidCommunities(baseURL)
.stream() .stream()
.map(community -> community.getId()) .map(CommunityModel::getId)
.collect(Collectors.toList()); .collect(Collectors.toList());
} }

View File

@ -130,6 +130,7 @@ public class ResultTagger implements Serializable {
// log.info("Remove constraints for " + communityId); // log.info("Remove constraints for " + communityId);
if (conf.getRemoveConstraintsMap().keySet().contains(communityId) && if (conf.getRemoveConstraintsMap().keySet().contains(communityId) &&
conf.getRemoveConstraintsMap().get(communityId).getCriteria() != null && conf.getRemoveConstraintsMap().get(communityId).getCriteria() != null &&
!conf.getRemoveConstraintsMap().get(communityId).getCriteria().isEmpty() &&
conf conf
.getRemoveConstraintsMap() .getRemoveConstraintsMap()
.get(communityId) .get(communityId)
@ -161,29 +162,30 @@ public class ResultTagger implements Serializable {
// Tagging for datasource // Tagging for datasource
final Set<String> datasources = new HashSet<>(); final Set<String> datasources = new HashSet<>();
final Set<String> collfrom = new HashSet<>(); final Set<String> cfhb = new HashSet<>();
final Set<String> hostdby = new HashSet<>(); final Set<String> hostdby = new HashSet<>();
if (Objects.nonNull(result.getInstance())) { if (Objects.nonNull(result.getInstance())) {
for (Instance i : result.getInstance()) { for (Instance i : result.getInstance()) {
if (Objects.nonNull(i.getCollectedfrom()) && Objects.nonNull(i.getCollectedfrom().getKey())) { if (Objects.nonNull(i.getCollectedfrom()) && Objects.nonNull(i.getCollectedfrom().getKey())) {
collfrom.add(i.getCollectedfrom().getKey()); cfhb.add(i.getCollectedfrom().getKey());
} }
if (Objects.nonNull(i.getHostedby()) && Objects.nonNull(i.getHostedby().getKey())) { if (Objects.nonNull(i.getHostedby()) && Objects.nonNull(i.getHostedby().getKey())) {
cfhb.add(i.getHostedby().getKey());
hostdby.add(i.getHostedby().getKey()); hostdby.add(i.getHostedby().getKey());
} }
} }
collfrom cfhb
.forEach( .forEach(
dsId -> datasources dsId -> datasources
.addAll( .addAll(
conf.getCommunityForDatasource(dsId, param))); conf.getCommunityForDatasource(dsId, param)));
hostdby.forEach(dsId -> { hostdby.forEach(dsId -> {
datasources // datasources
.addAll( // .addAll(
conf.getCommunityForDatasource(dsId, param)); // conf.getCommunityForDatasource(dsId, param));
if (conf.isEoscDatasource(dsId)) { if (conf.isEoscDatasource(dsId)) {
datasources.add("eosc"); datasources.add("eosc");
} }
@ -226,6 +228,7 @@ public class ResultTagger implements Serializable {
.forEach(communityId -> { .forEach(communityId -> {
if (!removeCommunities.contains(communityId) && if (!removeCommunities.contains(communityId) &&
conf.getSelectionConstraintsMap().get(communityId).getCriteria() != null && conf.getSelectionConstraintsMap().get(communityId).getCriteria() != null &&
!conf.getSelectionConstraintsMap().get(communityId).getCriteria().isEmpty() &&
conf conf
.getSelectionConstraintsMap() .getSelectionConstraintsMap()
.get(communityId) .get(communityId)

View File

@ -33,6 +33,8 @@ public class SelectionConstraints implements Serializable {
// Constraints in or // Constraints in or
public boolean verifyCriteria(final Map<String, List<String>> param) { public boolean verifyCriteria(final Map<String, List<String>> param) {
if (criteria.isEmpty())
return true;
for (Constraints selc : criteria) { for (Constraints selc : criteria) {
if (selc.verifyCriteria(param)) { if (selc.verifyCriteria(param)) {
return true; return true;

View File

@ -1,10 +1,11 @@
package eu.dnetlib.dhp.orcidtoresultfromsemrel;
import eu.dnetlib.dhp.utils.OrcidAuthor; package eu.dnetlib.dhp.orcidtoresultfromsemrel;
import java.io.Serializable; import java.io.Serializable;
import java.util.List; import java.util.List;
import eu.dnetlib.dhp.utils.OrcidAuthor;
public class OrcidAuthors implements Serializable { public class OrcidAuthors implements Serializable {
List<OrcidAuthor> orcidAuthorList; List<OrcidAuthor> orcidAuthorList;

View File

@ -1,3 +1,4 @@
package eu.dnetlib.dhp.orcidtoresultfromsemrel; package eu.dnetlib.dhp.orcidtoresultfromsemrel;
import java.util.List; import java.util.List;

View File

@ -3,6 +3,7 @@ package eu.dnetlib.dhp.projecttoresult;
import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;

View File

@ -3,6 +3,7 @@ package eu.dnetlib.dhp.resulttocommunityfromorganization;
import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;

View File

@ -5,6 +5,7 @@ import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.PropagationConstant.PROPAGATION_RESULT_COMMUNITY_ORGANIZATION_CLASS_NAME; import static eu.dnetlib.dhp.PropagationConstant.PROPAGATION_RESULT_COMMUNITY_ORGANIZATION_CLASS_NAME;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
import java.io.Serializable; import java.io.Serializable;
import java.util.ArrayList; import java.util.ArrayList;

View File

@ -1,11 +1,14 @@
package eu.dnetlib.dhp.resulttocommunityfromsemrel; package eu.dnetlib.dhp.resulttocommunityfromsemrel;
import static java.lang.String.join;
import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
@ -19,6 +22,7 @@ import com.google.gson.Gson;
import eu.dnetlib.dhp.api.Utils; import eu.dnetlib.dhp.api.Utils;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.resulttocommunityfromorganization.ResultCommunityList; import eu.dnetlib.dhp.resulttocommunityfromorganization.ResultCommunityList;
import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.Relation; import eu.dnetlib.dhp.schema.oaf.Relation;
import eu.dnetlib.dhp.schema.oaf.Result; import eu.dnetlib.dhp.schema.oaf.Result;
import eu.dnetlib.dhp.utils.ISLookupClientFactory; import eu.dnetlib.dhp.utils.ISLookupClientFactory;
@ -45,7 +49,7 @@ public class PrepareResultCommunitySetStep1 {
/** /**
* a dataset for example could be linked to more than one publication. For each publication linked to that dataset * a dataset for example could be linked to more than one publication. For each publication linked to that dataset
* the previous query will produce a row: targetId set of community context the target could possibly inherit with * the previous query will produce a row: targetId, set of community context the target could possibly inherit. With
* the following query there will be a single row for each result linked to more than one result of the result type * the following query there will be a single row for each result linked to more than one result of the result type
* currently being used * currently being used
*/ */
@ -56,6 +60,27 @@ public class PrepareResultCommunitySetStep1 {
+ "where length(co) > 0 " + "where length(co) > 0 "
+ "group by resultId"; + "group by resultId";
private static final String RESULT_CONTEXT_QUERY_TEMPLATE_IS_RELATED_TO = "select target as resultId, community_context "
+
"from resultWithContext rwc " +
"join relatedToRelations r " +
"join patents p " +
"on rwc.id = r.source and r.target = p.id";
private static final String RESULT_WITH_CONTEXT = "select id, collect_set(co.id) community_context \n" +
" from result " +
" lateral view explode (context) c as co " +
" where lower(co.id) IN %s" +
" group by id";
private static final String RESULT_PATENT = "select id " +
" from result " +
" where array_contains(instance.instancetype.classname, 'Patent')";
private static final String IS_RELATED_TO_RELATIONS = "select source, target " +
" from relation " +
" where lower(relClass) = 'isrelatedto' and datainfo.deletedbyinference = false";
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils String jsonConfiguration = IOUtils
.toString( .toString(
@ -82,14 +107,25 @@ public class PrepareResultCommunitySetStep1 {
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
final List<String> allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";")); final String allowedsemrel = "(" + join(
log.info("allowedSemRel: {}", new Gson().toJson(allowedsemrel)); ",",
Arrays
.asList(parser.get("allowedsemrels").split(";"))
.stream()
.map(value -> "'" + value.toLowerCase() + "'")
.toArray(String[]::new))
+ ")";
log.info("allowedSemRel: {}", allowedsemrel);
final String baseURL = parser.get("baseURL"); final String baseURL = parser.get("baseURL");
log.info("baseURL: {}", baseURL); log.info("baseURL: {}", baseURL);
final List<String> communityIdList = getCommunityList(baseURL); final String communityIdList = "(" + join(
log.info("communityIdList: {}", new Gson().toJson(communityIdList)); ",", getCommunityList(baseURL)
.stream()
.map(value -> "'" + value.toLowerCase() + "'")
.toArray(String[]::new))
+ ")";
final String resultType = resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase(); final String resultType = resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase();
log.info("resultType: {}", resultType); log.info("resultType: {}", resultType);
@ -118,10 +154,10 @@ public class PrepareResultCommunitySetStep1 {
SparkSession spark, SparkSession spark,
String inputPath, String inputPath,
String outputPath, String outputPath,
List<String> allowedsemrel, String allowedsemrel,
Class<R> resultClazz, Class<R> resultClazz,
String resultType, String resultType,
List<String> communityIdList) { String communityIdList) {
final String inputResultPath = inputPath + "/" + resultType; final String inputResultPath = inputPath + "/" + resultType;
log.info("Reading Graph table from: {}", inputResultPath); log.info("Reading Graph table from: {}", inputResultPath);
@ -132,7 +168,8 @@ public class PrepareResultCommunitySetStep1 {
Dataset<Relation> relation = readPath(spark, inputRelationPath, Relation.class); Dataset<Relation> relation = readPath(spark, inputRelationPath, Relation.class);
relation.createOrReplaceTempView("relation"); relation.createOrReplaceTempView("relation");
Dataset<R> result = readPath(spark, inputResultPath, resultClazz); Dataset<R> result = readPath(spark, inputResultPath, resultClazz)
.where("datainfo.deletedbyinference != true AND datainfo.invisible != true");
result.createOrReplaceTempView("result"); result.createOrReplaceTempView("result");
final String outputResultPath = outputPath + "/" + resultType; final String outputResultPath = outputPath + "/" + resultType;
@ -141,10 +178,20 @@ public class PrepareResultCommunitySetStep1 {
String resultContextQuery = String String resultContextQuery = String
.format( .format(
RESULT_CONTEXT_QUERY_TEMPLATE, RESULT_CONTEXT_QUERY_TEMPLATE,
getConstraintList(" lower(co.id) = '", communityIdList), "AND lower(co.id) IN " + communityIdList,
getConstraintList(" lower(relClass) = '", allowedsemrel)); "AND lower(relClass) IN " + allowedsemrel);
Dataset<Row> result_context = spark.sql(resultContextQuery); Dataset<Row> result_context = spark.sql(resultContextQuery);
Dataset<Row> rwc = spark.sql(String.format(RESULT_WITH_CONTEXT, communityIdList));
Dataset<Row> patents = spark.sql(RESULT_PATENT);
Dataset<Row> relatedToRelations = spark.sql(IS_RELATED_TO_RELATIONS);
rwc.createOrReplaceTempView("resultWithContext");
patents.createOrReplaceTempView("patents");
relatedToRelations.createOrReplaceTempView("relatedTorelations");
result_context = result_context.unionAll(spark.sql(RESULT_CONTEXT_QUERY_TEMPLATE_IS_RELATED_TO));
result_context.createOrReplaceTempView("result_context"); result_context.createOrReplaceTempView("result_context");
spark spark
@ -152,8 +199,9 @@ public class PrepareResultCommunitySetStep1 {
.as(Encoders.bean(ResultCommunityList.class)) .as(Encoders.bean(ResultCommunityList.class))
.write() .write()
.option("compression", "gzip") .option("compression", "gzip")
.mode(SaveMode.Overwrite) .mode(SaveMode.Append)
.json(outputResultPath); .json(outputResultPath);
} }
public static List<String> getCommunityList(final String baseURL) throws IOException { public static List<String> getCommunityList(final String baseURL) throws IOException {

View File

@ -4,6 +4,7 @@ package eu.dnetlib.dhp.resulttocommunityfromsemrel;
import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.util.ArrayList;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
@ -76,22 +77,13 @@ public class PrepareResultCommunitySetStep2 {
if (b == null) { if (b == null) {
return a; return a;
} }
Set<String> community_set = new HashSet<>(); Set<String> community_set = new HashSet<>(a.getCommunityList());
a.getCommunityList().stream().forEach(aa -> community_set.add(aa)); community_set.addAll(b.getCommunityList());
b a.setCommunityList(new ArrayList<>(community_set));
.getCommunityList()
.stream()
.forEach(
aa -> {
if (!community_set.contains(aa)) {
a.getCommunityList().add(aa);
community_set.add(aa);
}
});
return a; return a;
}) })
.map(Tuple2::_2) .map(Tuple2::_2)
.map(r -> OBJECT_MAPPER.writeValueAsString(r)) .map(OBJECT_MAPPER::writeValueAsString)
.saveAsTextFile(outputPath, GzipCodec.class); .saveAsTextFile(outputPath, GzipCodec.class);
} }

View File

@ -3,6 +3,7 @@ package eu.dnetlib.dhp.resulttocommunityfromsemrel;
import static eu.dnetlib.dhp.PropagationConstant.*; import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import static eu.dnetlib.dhp.common.enrichment.Constants.PROPAGATION_DATA_INFO_TYPE;
import java.util.*; import java.util.*;
import java.util.stream.Collectors; import java.util.stream.Collectors;

View File

@ -7,6 +7,7 @@ import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
@ -33,7 +34,7 @@ public class CountryPropagationJobTest {
private static final Logger log = LoggerFactory.getLogger(CountryPropagationJobTest.class); private static final Logger log = LoggerFactory.getLogger(CountryPropagationJobTest.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static SparkSession spark; private static SparkSession spark;

View File

@ -5,6 +5,7 @@ import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
@ -19,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
public class DatasourceCountryPreparationTest { public class DatasourceCountryPreparationTest {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static SparkSession spark; private static SparkSession spark;

View File

@ -25,6 +25,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.KeyValueSet; import eu.dnetlib.dhp.KeyValueSet;
import eu.dnetlib.dhp.PropagationConstant; import eu.dnetlib.dhp.PropagationConstant;
import eu.dnetlib.dhp.common.enrichment.Constants;
import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelConstants;
import eu.dnetlib.dhp.schema.oaf.Relation; import eu.dnetlib.dhp.schema.oaf.Relation;
@ -145,7 +146,7 @@ public class SparkJobTest {
.foreach( .foreach(
r -> Assertions r -> Assertions
.assertEquals( .assertEquals(
PropagationConstant.PROPAGATION_DATA_INFO_TYPE, r.getDataInfo().getInferenceprovenance())); Constants.PROPAGATION_DATA_INFO_TYPE, r.getDataInfo().getInferenceprovenance()));
result result
.foreach( .foreach(
r -> Assertions r -> Assertions
@ -428,7 +429,7 @@ public class SparkJobTest {
.foreach( .foreach(
r -> Assertions r -> Assertions
.assertEquals( .assertEquals(
PropagationConstant.PROPAGATION_DATA_INFO_TYPE, r.getDataInfo().getInferenceprovenance())); Constants.PROPAGATION_DATA_INFO_TYPE, r.getDataInfo().getInferenceprovenance()));
project project
.foreach( .foreach(
r -> Assertions r -> Assertions

View File

@ -71,23 +71,24 @@ public class OrcidPropagationJobTest {
.getResource( .getResource(
"/eu/dnetlib/dhp/orcidtoresultfromsemrel/preparedInfo/mergedOrcidAssoc") "/eu/dnetlib/dhp/orcidtoresultfromsemrel/preparedInfo/mergedOrcidAssoc")
.getPath(); .getPath();
SparkOrcidToResultFromSemRelJob SparkPropagateOrcidAuthor
.main( .main(
new String[] { new String[] {
"-isTest", Boolean.TRUE.toString(), "-graphPath",
"-isSparkSessionManaged", Boolean.FALSE.toString(), getClass()
"-sourcePath", sourcePath, .getResource(
"-hive_metastore_uris", "", "/eu/dnetlib/dhp/orcidtoresultfromsemrel/sample/noupdate")
"-saveGraph", "true", .getPath(),
"-resultTableName", Dataset.class.getCanonicalName(), "-targetPath",
"-outputPath", workingDir.toString() + "/dataset", workingDir.toString() + "/graph",
"-possibleUpdatesPath", possibleUpdatesPath "-orcidPath", "",
"-workingDir", workingDir.toString()
}); });
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext()); final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
JavaRDD<Dataset> tmp = sc JavaRDD<Dataset> tmp = sc
.textFile(workingDir.toString() + "/dataset") .textFile(workingDir.toString() + "/graph/dataset")
.map(item -> OBJECT_MAPPER.readValue(item, Dataset.class)); .map(item -> OBJECT_MAPPER.readValue(item, Dataset.class));
// tmp.map(s -> new Gson().toJson(s)).foreach(s -> System.out.println(s)); // tmp.map(s -> new Gson().toJson(s)).foreach(s -> System.out.println(s));
@ -110,36 +111,24 @@ public class OrcidPropagationJobTest {
@Test @Test
void oneUpdateTest() throws Exception { void oneUpdateTest() throws Exception {
SparkOrcidToResultFromSemRelJob SparkPropagateOrcidAuthor
.main( .main(
new String[] { new String[] {
"-isTest", "-graphPath",
Boolean.TRUE.toString(),
"-isSparkSessionManaged",
Boolean.FALSE.toString(),
"-sourcePath",
getClass()
.getResource("/eu/dnetlib/dhp/orcidtoresultfromsemrel/sample/oneupdate")
.getPath(),
"-hive_metastore_uris",
"",
"-saveGraph",
"true",
"-resultTableName",
"eu.dnetlib.dhp.schema.oaf.Dataset",
"-outputPath",
workingDir.toString() + "/dataset",
"-possibleUpdatesPath",
getClass() getClass()
.getResource( .getResource(
"/eu/dnetlib/dhp/orcidtoresultfromsemrel/preparedInfo/mergedOrcidAssoc") "/eu/dnetlib/dhp/orcidtoresultfromsemrel/sample/oneupdate")
.getPath() .getPath(),
"-targetPath",
workingDir.toString() + "/graph",
"-orcidPath", "",
"-workingDir", workingDir.toString()
}); });
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
JavaRDD<Dataset> tmp = sc JavaRDD<Dataset> tmp = sc
.textFile(workingDir.toString() + "/dataset") .textFile(workingDir.toString() + "/graph/dataset")
.map(item -> OBJECT_MAPPER.readValue(item, Dataset.class)); .map(item -> OBJECT_MAPPER.readValue(item, Dataset.class));
// tmp.map(s -> new Gson().toJson(s)).foreach(s -> System.out.println(s)); // tmp.map(s -> new Gson().toJson(s)).foreach(s -> System.out.println(s));
@ -177,31 +166,18 @@ public class OrcidPropagationJobTest {
@Test @Test
void twoUpdatesTest() throws Exception { void twoUpdatesTest() throws Exception {
SparkOrcidToResultFromSemRelJob SparkPropagateOrcidAuthor
.main( .main(
new String[] { new String[] {
"-isTest", "-graphPath",
Boolean.TRUE.toString(),
"-isSparkSessionManaged",
Boolean.FALSE.toString(),
"-sourcePath",
getClass() getClass()
.getResource( .getResource(
"/eu/dnetlib/dhp/orcidtoresultfromsemrel/sample/twoupdates") "/eu/dnetlib/dhp/orcidtoresultfromsemrel/sample/twoupdates")
.getPath(), .getPath(),
"-hive_metastore_uris", "-targetPath",
"",
"-saveGraph",
"true",
"-resultTableName",
"eu.dnetlib.dhp.schema.oaf.Dataset",
"-outputPath",
workingDir.toString() + "/dataset", workingDir.toString() + "/dataset",
"-possibleUpdatesPath", "-orcidPath", "",
getClass() "-workingDir", workingDir.toString()
.getResource(
"/eu/dnetlib/dhp/orcidtoresultfromsemrel/preparedInfo/mergedOrcidAssoc")
.getPath()
}); });
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());

View File

@ -7,6 +7,7 @@ import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
@ -30,7 +31,7 @@ public class ResultToCommunityJobTest {
private static final Logger log = LoggerFactory.getLogger(ResultToCommunityJobTest.class); private static final Logger log = LoggerFactory.getLogger(ResultToCommunityJobTest.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static SparkSession spark; private static SparkSession spark;

View File

@ -6,8 +6,11 @@ import static org.apache.spark.sql.functions.desc;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaRDD;
@ -24,13 +27,15 @@ import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.resulttocommunityfromorganization.ResultCommunityList;
import eu.dnetlib.dhp.schema.oaf.Dataset; import eu.dnetlib.dhp.schema.oaf.Dataset;
import scala.collection.Seq;
public class ResultToCommunityJobTest { public class ResultToCommunityJobTest {
private static final Logger log = LoggerFactory.getLogger(ResultToCommunityJobTest.class); private static final Logger log = LoggerFactory.getLogger(ResultToCommunityJobTest.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static SparkSession spark; private static SparkSession spark;
@ -271,4 +276,59 @@ public class ResultToCommunityJobTest {
.get(0) .get(0)
.getString(0)); .getString(0));
} }
@Test
public void prepareStep1Test() throws Exception {
/*
* final String allowedsemrel = join(",", Arrays.stream(parser.get("allowedsemrels").split(";")) .map(value ->
* "'" + value.toLowerCase() + "'") .toArray(String[]::new)); log.info("allowedSemRel: {}", new
* Gson().toJson(allowedsemrel)); final String baseURL = parser.get("baseURL"); log.info("baseURL: {}",
* baseURL);
*/
PrepareResultCommunitySetStep1
.main(
new String[] {
"-isSparkSessionManaged", Boolean.FALSE.toString(),
"-sourcePath", getClass()
.getResource("/eu/dnetlib/dhp/resulttocommunityfromsemrel/graph")
.getPath(),
"-hive_metastore_uris", "",
"-resultTableName", "eu.dnetlib.dhp.schema.oaf.Publication",
"-outputPath", workingDir.toString() + "/preparedInfo",
"-allowedsemrels", "issupplementto;issupplementedby",
"-baseURL", "https://dev-openaire.d4science.org/openaire/community/"
});
org.apache.spark.sql.Dataset<ResultCommunityList> resultCommunityList = spark
.read()
.schema(Encoders.bean(ResultCommunityList.class).schema())
.json(workingDir.toString() + "/preparedInfo/publication")
.as(Encoders.bean(ResultCommunityList.class));
Assertions.assertEquals(2, resultCommunityList.count());
Assertions
.assertEquals(
1,
resultCommunityList.filter("resultId = '50|dedup_wf_001::06e51d2bf295531b2d2e7a1b55500783'").count());
Assertions
.assertEquals(
1,
resultCommunityList.filter("resultId = '50|pending_org_::82f63b2d21ae88596b9d8991780e9888'").count());
ArrayList<String> communities = resultCommunityList
.filter("resultId = '50|dedup_wf_001::06e51d2bf295531b2d2e7a1b55500783'")
.first()
.getCommunityList();
Assertions.assertEquals(2, communities.size());
Assertions.assertTrue(communities.stream().anyMatch(cid -> "beopen".equals(cid)));
Assertions.assertTrue(communities.stream().anyMatch(cid -> "dh-ch".equals(cid)));
communities = resultCommunityList
.filter("resultId = '50|pending_org_::82f63b2d21ae88596b9d8991780e9888'")
.first()
.getCommunityList();
Assertions.assertEquals(1, communities.size());
Assertions.assertEquals("dh-ch", communities.get(0));
}
} }

View File

@ -0,0 +1 @@
{"subRelType": "supplement", "relClass": "isSupplementedBy", "dataInfo": {"provenanceaction": {"classid": "iis", "classname": "Inferred by OpenAIRE", "schemeid": "dnet:provenanceActions", "schemename": "dnet:provenanceActions"}, "deletedbyinference": false, "inferred": true, "inferenceprovenance": "iis::document_affiliations", "invisible": false, "trust": "0.7731"}, "target": "50|dedup_wf_001::95b033c0c3961f6a1cdcd41a99a9632e", "lastupdatetimestamp": 1694431186898, "relType": "resultOrganization", "source": "50|dedup_wf_001::36bcfaa1494c849547a346da688ade24", "collectedfrom": [], "validated": false, "properties": []}

View File

@ -0,0 +1,24 @@
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"issupplementedby","relType":"resultOrganization","source":"50|355e65625b88::e7d48a470b13bda61f7ebe3513e20cb6","subRelType":"affiliation","target":"50|pending_org_::82f63b2d21ae88596b9d8991780e9888","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"issupplementedby","relType":"resultOrganization","source":"50|355e65625b88::e7d48a470b13bda61f7ebe3513e20cb6","subRelType":"affiliation","target":"50|dedup_wf_001::06e51d2bf295531b2d2e7a1b55500783","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","source":"10|opendoar____::f0dd4a99fba6075a9494772b58f95280","subRelType":"affiliation","target":"20|openorgs____::322ff2a6524820640bc5d1311871585e","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","source":"10|eurocrisdris::9ae43d14471c4b33661fedda6f06b539","subRelType":"affiliation","target":"20|openorgs____::58e60f1715d219aa6757ba0b0f2ccbce","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","target":"20|openorgs____::64badd35233ba2cd4946368ef2f4cf57","subRelType":"affiliation","source":"10|issn___print::a7a2010e75d849442790955162ef4e42","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","source":"10|issn___print::a7a2010e75d849442790955162ef4e43","subRelType":"affiliation","target":"20|openorgs____::64badd35233ba2cd4946368ef2f4cf57","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","source":"10|issn___print::a7a2010e75d849442790955162ef4e44","subRelType":"affiliation","target":"20|openorgs____::548cbb0c5a93722f3a9aa62aa17a1ba1","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"IsProvidedBy","relType":"resultOrganization","source":"10|issn___print::a7a2010e75d849442790955162ef4e45","subRelType":"affiliation","target":"20|pending_org_::c522a7c935f9fd9578122e60eeec282c","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isrelatedto","relType":"resultOrganization","source":"50|openorgs____::64badd35233ba2cd4946368ef2f4cf57","subRelType":"affiliation","target":"50|dedup_wf_001::06e51d2bf295531b2d2e7a1b55500783","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::06e51d2bf295531b2d2e7a1b55500783","subRelType":"affiliation","target":"20|openorgs____::64badd35233ba2cd4946368ef2f4cf57","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isrelatedto","relType":"resultOrganization","source":"50|355e65625b88::74009c567c81b4aa55c813db658734df","subRelType":"affiliation","target":"50|dedup_wf_001::08d6f2001319c86d0e69b0f83ad75df2","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::08d6f2001319c86d0e69b0f83ad75df2","subRelType":"affiliation","target":"20|openorgs____::91a81877815afb4ebf25c1a3f3b03c5d","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|openorgs____::548cbb0c5a93722f3a9aa62aa17a1ba1","subRelType":"affiliation","target":"50|dedup_wf_001::0a1cdf269375d32ce341fdeb0e92dfa8","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::0a1cdf269375d32ce341fdeb0e92dfa8","subRelType":"affiliation","target":"20|openorgs____::548cbb0c5a93722f3a9aa62aa17a1ba1","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|pending_org_::a50fdd7f7e77b74ea2b16823151c391a","subRelType":"affiliation","target":"50|dedup_wf_001::0ab92bed024ee6883c7a1244722e5eec","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::0ab92bed024ee6883c7a1244722e5eec","subRelType":"affiliation","target":"20|pending_org_::a50fdd7f7e77b74ea2b16823151c391a","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|openorgs____::64badd35233ba2cd4946368ef2f4cf57","subRelType":"affiliation","target":"50|dedup_wf_001::0ca26c736ad4d15b3d5ee90a4d7853e1","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::0ca26c736ad4d15b3d5ee90a4d7853e1","subRelType":"affiliation","target":"20|openorgs____::64badd35233ba2cd4946368ef2f4cf57","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|pending_org_::a50fdd7f7e77b74ea2b16823151c391a","subRelType":"affiliation","target":"50|dedup_wf_001::0ef8dfab3927cb4d69df0d3113f05a42","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::0ef8dfab3927cb4d69df0d3113f05a42","subRelType":"affiliation","target":"20|pending_org_::a50fdd7f7e77b74ea2b16823151c391a","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|openorgs____::548cbb0c5a93722f3a9aa62aa17a1ba1","subRelType":"affiliation","target":"50|dedup_wf_001::0f488ad00253126c14a21abe6b2d406c","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::0f488ad00253126c14a21abe6b2d406c","subRelType":"affiliation","target":"20|openorgs____::548cbb0c5a93722f3a9aa62aa17a1ba1","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"isAuthorInstitutionOf","relType":"resultOrganization","source":"20|pending_org_::c522a7c935f9fd9578122e60eeec282c","subRelType":"affiliation","target":"50|dedup_wf_001::12206bf78aabd7d52132477182d19147","validated":false}
{"dataInfo":{"deletedbyinference":false,"inferenceprovenance":"propagation","inferred":true,"invisible":false,"provenanceaction":{"classid":"result:organization:instrepo","classname":"Propagation of affiliation to result collected from datasources of type institutional repository","schemeid":"dnet:provenanceActions","schemename":"dnet:provenanceActions"},"trust":"0.85"},"properties":[],"relClass":"hasAuthorInstitution","relType":"resultOrganization","source":"50|dedup_wf_001::12206bf78aabd7d52132477182d19147","subRelType":"affiliation","target":"20|pending_org_::c522a7c935f9fd9578122e60eeec282c","validated":false}

View File

@ -153,34 +153,40 @@ public abstract class AbstractMdRecordToOafMapper {
final DataInfo entityInfo = prepareDataInfo(doc, this.invisible); final DataInfo entityInfo = prepareDataInfo(doc, this.invisible);
final long lastUpdateTimestamp = new Date().getTime(); final long lastUpdateTimestamp = new Date().getTime();
final List<Instance> instances = prepareInstances(doc, entityInfo, collectedFrom, hostedBy); final Instance instance = prepareInstances(doc, entityInfo, collectedFrom, hostedBy);
final String type = getResultType(doc, instances); if (!Optional
.ofNullable(instance.getInstancetype())
.map(Qualifier::getClassid)
.filter(StringUtils::isNotBlank)
.isPresent()) {
return Lists.newArrayList();
}
return createOafs(doc, type, instances, collectedFrom, entityInfo, lastUpdateTimestamp); final String type = getResultType(instance);
return createOafs(doc, type, instance, collectedFrom, entityInfo, lastUpdateTimestamp);
} catch (final DocumentException e) { } catch (final DocumentException e) {
log.error("Error with record:\n" + xml); log.error("Error with record:\n" + xml);
return Lists.newArrayList(); return Lists.newArrayList();
} }
} }
protected String getResultType(final Document doc, final List<Instance> instances) { protected String getResultType(final Instance instance) {
final String type = doc.valueOf("//dr:CobjCategory/@type"); if (this.vocs.vocabularyExists(ModelConstants.DNET_RESULT_TYPOLOGIES)) {
if (StringUtils.isBlank(type) && this.vocs.vocabularyExists(ModelConstants.DNET_RESULT_TYPOLOGIES)) {
final String instanceType = instances
.stream()
.map(i -> i.getInstancetype().getClassid())
.findFirst()
.filter(s -> !UNKNOWN.equalsIgnoreCase(s))
.orElse("0000"); // Unknown
return Optional return Optional
.ofNullable(this.vocs.getSynonymAsQualifier(ModelConstants.DNET_RESULT_TYPOLOGIES, instanceType)) .ofNullable(instance.getInstancetype())
.map(Qualifier::getClassid) .map(Qualifier::getClassid)
.map(
instanceType -> Optional
.ofNullable(
this.vocs.getSynonymAsQualifier(ModelConstants.DNET_RESULT_TYPOLOGIES, instanceType))
.map(Qualifier::getClassid)
.orElse("0000"))
.orElse("0000"); .orElse("0000");
} else {
throw new IllegalStateException("Missing vocabulary: " + ModelConstants.DNET_RESULT_TYPOLOGIES);
} }
return type;
} }
private KeyValue getProvenanceDatasource(final Document doc, final String xpathId, final String xpathName) { private KeyValue getProvenanceDatasource(final Document doc, final String xpathId, final String xpathName) {
@ -197,12 +203,12 @@ public abstract class AbstractMdRecordToOafMapper {
protected List<Oaf> createOafs( protected List<Oaf> createOafs(
final Document doc, final Document doc,
final String type, final String type,
final List<Instance> instances, final Instance instance,
final KeyValue collectedFrom, final KeyValue collectedFrom,
final DataInfo info, final DataInfo info,
final long lastUpdateTimestamp) { final long lastUpdateTimestamp) {
final OafEntity entity = createEntity(doc, type, instances, collectedFrom, info, lastUpdateTimestamp); final OafEntity entity = createEntity(doc, type, instance, collectedFrom, info, lastUpdateTimestamp);
final Set<String> originalId = Sets.newHashSet(entity.getOriginalId()); final Set<String> originalId = Sets.newHashSet(entity.getOriginalId());
originalId.add(entity.getId()); originalId.add(entity.getId());
@ -235,19 +241,19 @@ public abstract class AbstractMdRecordToOafMapper {
private OafEntity createEntity(final Document doc, private OafEntity createEntity(final Document doc,
final String type, final String type,
final List<Instance> instances, final Instance instance,
final KeyValue collectedFrom, final KeyValue collectedFrom,
final DataInfo info, final DataInfo info,
final long lastUpdateTimestamp) { final long lastUpdateTimestamp) {
switch (type.toLowerCase()) { switch (type.toLowerCase()) {
case "publication": case "publication":
final Publication p = new Publication(); final Publication p = new Publication();
populateResultFields(p, doc, instances, collectedFrom, info, lastUpdateTimestamp); populateResultFields(p, doc, instance, collectedFrom, info, lastUpdateTimestamp);
p.setJournal(prepareJournal(doc, info)); p.setJournal(prepareJournal(doc, info));
return p; return p;
case "dataset": case "dataset":
final Dataset d = new Dataset(); final Dataset d = new Dataset();
populateResultFields(d, doc, instances, collectedFrom, info, lastUpdateTimestamp); populateResultFields(d, doc, instance, collectedFrom, info, lastUpdateTimestamp);
d.setStoragedate(prepareDatasetStorageDate(doc, info)); d.setStoragedate(prepareDatasetStorageDate(doc, info));
d.setDevice(prepareDatasetDevice(doc, info)); d.setDevice(prepareDatasetDevice(doc, info));
d.setSize(prepareDatasetSize(doc, info)); d.setSize(prepareDatasetSize(doc, info));
@ -258,7 +264,7 @@ public abstract class AbstractMdRecordToOafMapper {
return d; return d;
case "software": case "software":
final Software s = new Software(); final Software s = new Software();
populateResultFields(s, doc, instances, collectedFrom, info, lastUpdateTimestamp); populateResultFields(s, doc, instance, collectedFrom, info, lastUpdateTimestamp);
s.setDocumentationUrl(prepareSoftwareDocumentationUrls(doc, info)); s.setDocumentationUrl(prepareSoftwareDocumentationUrls(doc, info));
s.setLicense(prepareSoftwareLicenses(doc, info)); s.setLicense(prepareSoftwareLicenses(doc, info));
s.setCodeRepositoryUrl(prepareSoftwareCodeRepositoryUrl(doc, info)); s.setCodeRepositoryUrl(prepareSoftwareCodeRepositoryUrl(doc, info));
@ -268,7 +274,7 @@ public abstract class AbstractMdRecordToOafMapper {
case "otherresearchproducts": case "otherresearchproducts":
default: default:
final OtherResearchProduct o = new OtherResearchProduct(); final OtherResearchProduct o = new OtherResearchProduct();
populateResultFields(o, doc, instances, collectedFrom, info, lastUpdateTimestamp); populateResultFields(o, doc, instance, collectedFrom, info, lastUpdateTimestamp);
o.setContactperson(prepareOtherResearchProductContactPersons(doc, info)); o.setContactperson(prepareOtherResearchProductContactPersons(doc, info));
o.setContactgroup(prepareOtherResearchProductContactGroups(doc, info)); o.setContactgroup(prepareOtherResearchProductContactGroups(doc, info));
o.setTool(prepareOtherResearchProductTools(doc, info)); o.setTool(prepareOtherResearchProductTools(doc, info));
@ -415,7 +421,7 @@ public abstract class AbstractMdRecordToOafMapper {
private void populateResultFields( private void populateResultFields(
final Result r, final Result r,
final Document doc, final Document doc,
final List<Instance> instances, final Instance instance,
final KeyValue collectedFrom, final KeyValue collectedFrom,
final DataInfo info, final DataInfo info,
final long lastUpdateTimestamp) { final long lastUpdateTimestamp) {
@ -449,8 +455,8 @@ public abstract class AbstractMdRecordToOafMapper {
r.setExternalReference(new ArrayList<>()); // NOT PRESENT IN MDSTORES r.setExternalReference(new ArrayList<>()); // NOT PRESENT IN MDSTORES
r.setProcessingchargeamount(field(doc.valueOf("//oaf:processingchargeamount"), info)); r.setProcessingchargeamount(field(doc.valueOf("//oaf:processingchargeamount"), info));
r.setProcessingchargecurrency(field(doc.valueOf("//oaf:processingchargeamount/@currency"), info)); r.setProcessingchargecurrency(field(doc.valueOf("//oaf:processingchargeamount/@currency"), info));
r.setInstance(instances); r.setInstance(Arrays.asList(instance));
r.setBestaccessright(OafMapperUtils.createBestAccessRights(instances)); r.setBestaccessright(OafMapperUtils.createBestAccessRights(Arrays.asList(instance)));
r.setEoscifguidelines(prepareEOSCIfGuidelines(doc, info)); r.setEoscifguidelines(prepareEOSCIfGuidelines(doc, info));
} }
@ -509,7 +515,7 @@ public abstract class AbstractMdRecordToOafMapper {
protected abstract Qualifier prepareResourceType(Document doc, DataInfo info); protected abstract Qualifier prepareResourceType(Document doc, DataInfo info);
protected abstract List<Instance> prepareInstances( protected abstract Instance prepareInstances(
Document doc, Document doc,
DataInfo info, DataInfo info,
KeyValue collectedfrom, KeyValue collectedfrom,

View File

@ -133,7 +133,7 @@ public class GenerateEntitiesApplication extends AbstractMigrationApplication {
inputRdd inputRdd
.keyBy(oaf -> ModelSupport.idFn().apply(oaf)) .keyBy(oaf -> ModelSupport.idFn().apply(oaf))
.groupByKey() .groupByKey()
.map(t -> MergeUtils.mergeGroup(t._1, t._2.iterator())), .map(t -> MergeUtils.mergeGroup(t._2.iterator())),
// .mapToPair(oaf -> new Tuple2<>(ModelSupport.idFn().apply(oaf), oaf)) // .mapToPair(oaf -> new Tuple2<>(ModelSupport.idFn().apply(oaf), oaf))
// .reduceByKey(MergeUtils::merge) // .reduceByKey(MergeUtils::merge)
// .map(Tuple2::_2), // .map(Tuple2::_2),

View File

@ -135,7 +135,7 @@ public class OafToOafMapper extends AbstractMdRecordToOafMapper {
} }
@Override @Override
protected List<Instance> prepareInstances( protected Instance prepareInstances(
final Document doc, final Document doc,
final DataInfo info, final DataInfo info,
final KeyValue collectedfrom, final KeyValue collectedfrom,
@ -197,7 +197,7 @@ public class OafToOafMapper extends AbstractMdRecordToOafMapper {
instance.getUrl().addAll(validUrl); instance.getUrl().addAll(validUrl);
} }
return Lists.newArrayList(instance); return instance;
} }
/** /**

View File

@ -126,7 +126,7 @@ public class OdfToOafMapper extends AbstractMdRecordToOafMapper {
} }
@Override @Override
protected List<Instance> prepareInstances( protected Instance prepareInstances(
final Document doc, final Document doc,
final DataInfo info, final DataInfo info,
final KeyValue collectedfrom, final KeyValue collectedfrom,
@ -210,7 +210,7 @@ public class OdfToOafMapper extends AbstractMdRecordToOafMapper {
instance.setUrl(new ArrayList<>()); instance.setUrl(new ArrayList<>());
instance.getUrl().addAll(validUrl); instance.getUrl().addAll(validUrl);
} }
return Arrays.asList(instance); return instance;
} }
protected String trimAndDecodeUrl(String url) { protected String trimAndDecodeUrl(String url) {
@ -319,7 +319,7 @@ public class OdfToOafMapper extends AbstractMdRecordToOafMapper {
@Override @Override
protected List<Field<String>> prepareDescriptions(final Document doc, final DataInfo info) { protected List<Field<String>> prepareDescriptions(final Document doc, final DataInfo info) {
return prepareListFields(doc, "//*[local-name()='description' and ./@descriptionType='Abstract']", info); return prepareListFields(doc, "//datacite:description[./@descriptionType='Abstract'] | //dc:description", info);
} }
@Override @Override

View File

@ -51,6 +51,7 @@
<arg>--orcidPath</arg><arg>${orcidPath}</arg> <arg>--orcidPath</arg><arg>${orcidPath}</arg>
<arg>--targetPath</arg><arg>${targetPath}</arg> <arg>--targetPath</arg><arg>${targetPath}</arg>
<arg>--graphPath</arg><arg>${graphPath}</arg> <arg>--graphPath</arg><arg>${graphPath}</arg>
<arg>--workingDir</arg><arg>${workingDir}</arg>
<arg>--master</arg><arg>yarn</arg> <arg>--master</arg><arg>yarn</arg>
</spark> </spark>
<ok to="reset_outputpath"/> <ok to="reset_outputpath"/>

View File

@ -162,6 +162,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=15000 --conf spark.sql.shuffle.partitions=15000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/publication</arg> <arg>--inputPath</arg><arg>${graphInputPath}/publication</arg>
@ -197,6 +198,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=8000 --conf spark.sql.shuffle.partitions=8000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/dataset</arg> <arg>--inputPath</arg><arg>${graphInputPath}/dataset</arg>
@ -232,6 +234,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=5000 --conf spark.sql.shuffle.partitions=5000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/otherresearchproduct</arg> <arg>--inputPath</arg><arg>${graphInputPath}/otherresearchproduct</arg>
@ -267,6 +270,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=2000 --conf spark.sql.shuffle.partitions=2000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/software</arg> <arg>--inputPath</arg><arg>${graphInputPath}/software</arg>
@ -302,6 +306,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=1000 --conf spark.sql.shuffle.partitions=1000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/datasource</arg> <arg>--inputPath</arg><arg>${graphInputPath}/datasource</arg>
@ -337,6 +342,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=1000 --conf spark.sql.shuffle.partitions=1000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/organization</arg> <arg>--inputPath</arg><arg>${graphInputPath}/organization</arg>
@ -372,6 +378,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=2000 --conf spark.sql.shuffle.partitions=2000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/project</arg> <arg>--inputPath</arg><arg>${graphInputPath}/project</arg>
@ -407,6 +414,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=2000 --conf spark.sql.shuffle.partitions=2000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/person</arg> <arg>--inputPath</arg><arg>${graphInputPath}/person</arg>
@ -442,6 +450,7 @@
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.autoBroadcastJoinThreshold=-1
--conf spark.sql.shuffle.partitions=20000 --conf spark.sql.shuffle.partitions=20000
</spark-opts> </spark-opts>
<arg>--inputPath</arg><arg>${graphInputPath}/relation</arg> <arg>--inputPath</arg><arg>${graphInputPath}/relation</arg>

View File

@ -68,6 +68,7 @@
<path start="merge_datasource"/> <path start="merge_datasource"/>
<path start="merge_organization"/> <path start="merge_organization"/>
<path start="merge_project"/> <path start="merge_project"/>
<path start="merge_person"/>
<path start="merge_relation"/> <path start="merge_relation"/>
</fork> </fork>
@ -260,6 +261,33 @@
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="merge_person">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>Merge person</name>
<class>eu.dnetlib.dhp.oa.graph.merge.MergeGraphTableSparkJob</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-cores=${sparkExecutorCores}
--executor-memory=${sparkExecutorMemory}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=7680
</spark-opts>
<arg>--betaInputPath</arg><arg>${betaInputGraphPath}/person</arg>
<arg>--prodInputPath</arg><arg>${prodInputGraphPath}/person</arg>
<arg>--outputPath</arg><arg>${graphOutputPath}/person</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Person</arg>
<arg>--priority</arg><arg>${priority}</arg>
</spark>
<ok to="wait_merge"/>
<error to="Kill"/>
</action>
<action name="merge_relation"> <action name="merge_relation">
<spark xmlns="uri:oozie:spark-action:0.2"> <spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master> <master>yarn</master>

View File

@ -649,6 +649,7 @@
<path start="merge_claims_datasource"/> <path start="merge_claims_datasource"/>
<path start="merge_claims_organization"/> <path start="merge_claims_organization"/>
<path start="merge_claims_project"/> <path start="merge_claims_project"/>
<path start="merge_claims_person"/>
<path start="merge_claims_relation"/> <path start="merge_claims_relation"/>
</fork> </fork>
@ -860,6 +861,32 @@
<error to="Kill"/> <error to="Kill"/>
</action> </action>
<action name="merge_claims_person">
<spark xmlns="uri:oozie:spark-action:0.2">
<master>yarn</master>
<mode>cluster</mode>
<name>MergeClaims_person</name>
<class>eu.dnetlib.dhp.oa.graph.raw.MergeClaimsApplication</class>
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
<spark-opts>
--executor-memory ${sparkExecutorMemory}
--executor-cores ${sparkExecutorCores}
--driver-memory=${sparkDriverMemory}
--conf spark.extraListeners=${spark2ExtraListeners}
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
--conf spark.sql.shuffle.partitions=200
</spark-opts>
<arg>--rawGraphPath</arg><arg>${workingDir}/graph_raw</arg>
<arg>--claimsGraphPath</arg><arg>${workingDir}/graph_claims</arg>
<arg>--outputRawGaphPath</arg><arg>${graphOutputPath}</arg>
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Person</arg>
</spark>
<ok to="wait_merge"/>
<error to="Kill"/>
</action>
<join name="wait_merge" to="decisionPatchRelations"/> <join name="wait_merge" to="decisionPatchRelations"/>
<decision name="decisionPatchRelations"> <decision name="decisionPatchRelations">

Some files were not shown because too many files have changed in this diff Show More