Compare commits
187 Commits
main
...
affiliatio
Author | SHA1 | Date |
---|---|---|
Serafeim Chatzopoulos | 50401a872f | |
Serafeim Chatzopoulos | 37c04cbad7 | |
Claudio Atzori | 8e7ef79ce0 | |
Claudio Atzori | fecbf93e0e | |
Claudio Atzori | 64740475d0 | |
Claudio Atzori | 8f551afa52 | |
Miriam Baglioni | 1af6571474 | |
Claudio Atzori | a81c555fe6 | |
Claudio Atzori | 359b8ebda8 | |
Miriam Baglioni | c7f6669f1a | |
Miriam Baglioni | 7cff281d3e | |
Claudio Atzori | d4bf449e8c | |
Miriam Baglioni | fc60661ac5 | |
Claudio Atzori | d771a883f9 | |
Claudio Atzori | 01958a3e07 | |
Claudio Atzori | ceb210993c | |
Miriam Baglioni | 6f1801d7d1 | |
Miriam Baglioni | 19806c2ae3 | |
Miriam Baglioni | 62649dc5c4 | |
Miriam Baglioni | 9573bf576d | |
Michele Artini | d27e9ea50f | |
Michele De Bonis | 4f4c73d65b | |
Miriam Baglioni | 79985ad197 | |
Claudio Atzori | c25b048e12 | |
Claudio Atzori | 06e3985b77 | |
Claudio Atzori | 83327239de | |
Claudio Atzori | db9c54c944 | |
Claudio Atzori | e39e8bbd47 | |
Claudio Atzori | e94ae771ff | |
Claudio Atzori | 6c98d69215 | |
Claudio Atzori | 78b5e4bb6f | |
Claudio Atzori | 40c5d87645 | |
Claudio Atzori | a65241fcaf | |
Claudio Atzori | 6665976604 | |
Claudio Atzori | c99f92efaa | |
Claudio Atzori | f17e1243ba | |
Claudio Atzori | 6a19337dab | |
Miriam Baglioni | d96215cb9b | |
Miriam Baglioni | 9246bdec1c | |
Miriam Baglioni | 9d27910144 | |
Claudio Atzori | beb93cdfe9 | |
Claudio Atzori | 5aa7847ea6 | |
Claudio Atzori | 38f8ed27fd | |
Claudio Atzori | 1fb44198fb | |
Claudio Atzori | 6f6e85ddf4 | |
Claudio Atzori | 7fa3d51200 | |
Michele Artini | f99fb21040 | |
Claudio Atzori | e17edb2581 | |
Claudio Atzori | 61d1fa9b9f | |
Claudio Atzori | f9ed2ae33c | |
Michele Artini | bbe52584f7 | |
Michele Artini | 5cdba9172b | |
Michele De Bonis | 2a36ccb997 | |
Miriam Baglioni | c465835061 | |
Miriam Baglioni | 814e650e12 | |
Miriam Baglioni | ddd20e7f8e | |
Claudio Atzori | 1180d78b71 | |
Claudio Atzori | bb12d0b4df | |
Claudio Atzori | 7d3292551b | |
Claudio Atzori | c7634c55c7 | |
Miriam Baglioni | a2b708bb71 | |
Miriam Baglioni | 9cbe966b4a | |
Miriam Baglioni | 236b64d830 | |
Miriam Baglioni | 67ff783e65 | |
Michele De Bonis | a10e8d9f05 | |
Claudio Atzori | 14539f9c8b | |
Claudio Atzori | 1bc8c5d173 | |
Claudio Atzori | 1ccf01cdb8 | |
Claudio Atzori | b79cb155ba | |
Claudio Atzori | 33a02c5b9e | |
Claudio Atzori | 1182bca9eb | |
Claudio Atzori | 1c30eacac2 | |
Claudio Atzori | 6055212f77 | |
Claudio Atzori | 0031cf849e | |
Serafeim Chatzopoulos | 9f6e16a03c | |
Lampros Smyrnaios | 66cd28f70a | |
Lampros Smyrnaios | c6b1ab2a18 | |
Miriam Baglioni | d35edac212 | |
Miriam Baglioni | 6421f8fece | |
Miriam Baglioni | ac270f795b | |
Lampros Smyrnaios | 236aed8954 | |
Claudio Atzori | dd541f8cf5 | |
Lampros Smyrnaios | ff335578ea | |
Lampros Smyrnaios | 285416c74e | |
Lampros Smyrnaios | 3095047e5e | |
Antonis Lempesis | 0456f1b788 | |
Antonis Lempesis | 38636942c7 | |
Lampros Smyrnaios | d942a1101b | |
Giambattista Bloisi | 9bf2bda1c6 | |
Giambattista Bloisi | d90cb099b8 | |
Giambattista Bloisi | 4f2a61e10f | |
Claudio Atzori | 11fe3a4fe0 | |
Claudio Atzori | a8d68c9d29 | |
Miriam Baglioni | 8fe934810f | |
Miriam Baglioni | 9da006e98c | |
Giambattista Bloisi | 85c1eae7e0 | |
Claudio Atzori | b0eba210c0 | |
Claudio Atzori | 3776327a8c | |
Claudio Atzori | 0139f23d66 | |
Michele Artini | c726572418 | |
Claudio Atzori | ec79405cc9 | |
Miriam Baglioni | 1477406ecc | |
Claudio Atzori | 92c3abd5a4 | |
Claudio Atzori | ce2364743a | |
Claudio Atzori | f70dc76b61 | |
Claudio Atzori | 73bd1938a5 | |
Claudio Atzori | da5c1e73a4 | |
Claudio Atzori | a02f3f0d2b | |
Alessia Bardi | eadfd8d71d | |
Alessia Bardi | 05ee783c07 | |
Alessia Bardi | fe9fb59c90 | |
Claudio Atzori | c272c4ad68 | |
Alessia Bardi | c5f4da16a4 | |
Alessia | 1b165a14a0 | |
Michele Artini | e996787be2 | |
Claudio Atzori | 62716141c5 | |
Miriam Baglioni | 5d85b70e1f | |
Lampros Smyrnaios | e3f28338c1 | |
Giambattista Bloisi | 73316d8c83 | |
Miriam Baglioni | 75d5ddb999 | |
Miriam Baglioni | 87c9c61b41 | |
Miriam Baglioni | b55fed09f8 | |
Claudio Atzori | 107d958b89 | |
Claudio Atzori | 3a7a6ecc32 | |
Claudio Atzori | 1af4224d3d | |
Claudio Atzori | 0d5bdb2db0 | |
Claudio Atzori | 66548e6a83 | |
Antonis Lempesis | 15b54a345a | |
Lampros Smyrnaios | b48ed6e617 | |
Lampros Smyrnaios | 68322843e2 | |
Lampros Smyrnaios | c7b32bbacc | |
Giambattista Bloisi | 1b2357e10a | |
Sandro La Bruzzo | f1fe363b19 | |
Sandro La Bruzzo | 66c1ffc866 | |
Claudio Atzori | 1ea67eba82 | |
Claudio Atzori | f9fb2fef6e | |
Claudio Atzori | 834461ba26 | |
Sandro La Bruzzo | e8a61d5dd5 | |
Sandro La Bruzzo | ca9414b737 | |
Sandro La Bruzzo | 032bcc8279 | |
Sandro La Bruzzo | 103e2652b3 | |
Sandro La Bruzzo | a87f9ea643 | |
Sandro La Bruzzo | 6efab4d88e | |
Claudio Atzori | 92f018d196 | |
Claudio Atzori | 0611c81a2f | |
Claudio Atzori | 1efe7f7e39 | |
Claudio Atzori | 53e7bb4336 | |
Claudio Atzori | f7d56e2ef2 | |
Claudio Atzori | c1237ab39e | |
Claudio Atzori | dc3a5858f7 | |
Claudio Atzori | 55f39f7850 | |
Claudio Atzori | 39a2afe8b5 | |
Claudio Atzori | 908ed9da7a | |
Antonis Lempesis | 0cada3cc8f | |
Antonis Lempesis | 90a4fb3547 | |
Claudio Atzori | 18aa323ee9 | |
Claudio Atzori | b4e3389432 | |
Giambattista Bloisi | 711048ceed | |
Sandro La Bruzzo | db358ad0d2 | |
Sandro La Bruzzo | 26bf8e763a | |
Sandro La Bruzzo | a860c57bbc | |
Sandro La Bruzzo | 0646d0d064 | |
Michele Artini | f4068de298 | |
Michele Artini | 2615136efc | |
Sandro La Bruzzo | 133ead1e3e | |
Sandro La Bruzzo | 052c6aac9d | |
Sandro La Bruzzo | 9cd3bc0f10 | |
Sandro La Bruzzo | 0d628cd62b | |
Lampros Smyrnaios | 49af2e5740 | |
Antonis Lempesis | d2649a1429 | |
Sandro La Bruzzo | 073f320c6a | |
Sandro La Bruzzo | b84ad0c06e | |
Antonis Lempesis | b52a5a753b | |
Sandro La Bruzzo | 8dd9cf84e2 | |
Sandro La Bruzzo | 342cb6189b | |
Antonis Lempesis | c3fe9662b2 | |
Antonis Lempesis | 0c71c58df6 | |
Antonis Lempesis | 43d05dbebb | |
Antonis Lempesis | e728a0897c | |
Antonis Lempesis | 308ae580a9 | |
Antonis Lempesis | 27d22bd8f9 | |
Antonis Lempesis | 1f5aba12fa | |
Giambattista Bloisi | 613ec5ffce | |
Sandro La Bruzzo | 52495f2cd2 | |
Sandro La Bruzzo | 8c3e9a09d3 | |
Giambattista Bloisi | 2fa78f6071 | |
Giambattista Bloisi | 326c9dc08c |
|
@ -27,3 +27,4 @@ spark-warehouse
|
|||
/**/.factorypath
|
||||
/**/.scalafmt.conf
|
||||
/.java-version
|
||||
/dhp-shade-package/dependency-reduced-pom.xml
|
||||
|
|
|
@ -80,7 +80,15 @@ class WritePredefinedProjectPropertiesTest {
|
|||
mojo.outputFile = testFolder;
|
||||
|
||||
// execute
|
||||
Assertions.assertThrows(MojoExecutionException.class, () -> mojo.execute());
|
||||
try {
|
||||
mojo.execute();
|
||||
Assertions.assertTrue(false); // not reached
|
||||
} catch (Exception e) {
|
||||
Assertions
|
||||
.assertTrue(
|
||||
MojoExecutionException.class.isAssignableFrom(e.getClass()) ||
|
||||
IllegalArgumentException.class.isAssignableFrom(e.getClass()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -70,10 +70,7 @@
|
|||
<groupId>com.ibm.icu</groupId>
|
||||
<artifactId>icu4j</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.github.sisyphsu</groupId>
|
||||
<artifactId>dateparser</artifactId>
|
||||
|
@ -163,7 +160,7 @@
|
|||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>${dhp-schemas.artifact}</artifactId>
|
||||
<artifactId>dhp-schemas</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -172,4 +169,23 @@
|
|||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<!-- dependencies required on JDK9+ because J2EE has been removed -->
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spark-34</id>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
<version>2.2.11</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.xml.ws</groupId>
|
||||
<artifactId>jaxws-ri</artifactId>
|
||||
<version>2.3.3</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
|
|
|
@ -38,7 +38,7 @@ public class PacePerson {
|
|||
PacePerson.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/common/name_particles.txt")));
|
||||
} catch (IOException e) {
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,8 +217,6 @@ public class ZenodoAPIClient implements Serializable {
|
|||
* part of the url for the DOI Zenodo suggests to use to cite all versions: DOI: 10.xxx/zenodo.656930
|
||||
* concept_rec_id = 656930
|
||||
* @return response code
|
||||
* @throws IOException
|
||||
* @throws MissingConceptDoiException
|
||||
*/
|
||||
public int newVersion(String concept_rec_id) throws IOException, MissingConceptDoiException {
|
||||
setDepositionId(concept_rec_id, 1);
|
||||
|
|
|
@ -12,9 +12,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
import org.apache.commons.lang3.time.DateUtils;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.joda.time.Instant;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
|
||||
public class MergeEntitiesComparator implements Comparator<Oaf> {
|
||||
static final List<String> PID_AUTHORITIES = Arrays
|
||||
.asList(
|
||||
ModelConstants.ARXIV_ID,
|
||||
ModelConstants.PUBMED_CENTRAL_ID,
|
||||
ModelConstants.EUROPE_PUBMED_CENTRAL_ID,
|
||||
ModelConstants.DATACITE_ID,
|
||||
ModelConstants.CROSSREF_ID);
|
||||
|
||||
static final List<String> RESULT_TYPES = Arrays
|
||||
.asList(
|
||||
ModelConstants.ORP_RESULTTYPE_CLASSID,
|
||||
ModelConstants.SOFTWARE_RESULTTYPE_CLASSID,
|
||||
ModelConstants.DATASET_RESULTTYPE_CLASSID,
|
||||
ModelConstants.PUBLICATION_RESULTTYPE_CLASSID);
|
||||
|
||||
public static final Comparator<Oaf> INSTANCE = new MergeEntitiesComparator();
|
||||
|
||||
@Override
|
||||
public int compare(Oaf left, Oaf right) {
|
||||
if (left == null && right == null)
|
||||
return 0;
|
||||
if (left == null)
|
||||
return -1;
|
||||
if (right == null)
|
||||
return 1;
|
||||
|
||||
int res = 0;
|
||||
|
||||
// pid authority
|
||||
int cfp1 = Optional
|
||||
.ofNullable(left.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(kv -> PID_AUTHORITIES.indexOf(kv.getKey()))
|
||||
.max(Integer::compare)
|
||||
.orElse(-1))
|
||||
.orElse(-1);
|
||||
int cfp2 = Optional
|
||||
.ofNullable(right.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(kv -> PID_AUTHORITIES.indexOf(kv.getKey()))
|
||||
.max(Integer::compare)
|
||||
.orElse(-1))
|
||||
.orElse(-1);
|
||||
|
||||
if (cfp1 >= 0 && cfp1 > cfp2) {
|
||||
return 1;
|
||||
} else if (cfp2 >= 0 && cfp2 > cfp1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// trust
|
||||
if (left.getDataInfo() != null && right.getDataInfo() != null) {
|
||||
res = left.getDataInfo().getTrust().compareTo(right.getDataInfo().getTrust());
|
||||
}
|
||||
|
||||
// result type
|
||||
if (res == 0) {
|
||||
if (left instanceof Result && right instanceof Result) {
|
||||
Result r1 = (Result) left;
|
||||
Result r2 = (Result) right;
|
||||
|
||||
if (r1.getResulttype() == null || r1.getResulttype().getClassid() == null) {
|
||||
if (r2.getResulttype() != null && r2.getResulttype().getClassid() != null) {
|
||||
return -1;
|
||||
}
|
||||
} else if (r2.getResulttype() == null || r2.getResulttype().getClassid() == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int rt1 = RESULT_TYPES.indexOf(r1.getResulttype().getClassid());
|
||||
int rt2 = RESULT_TYPES.indexOf(r2.getResulttype().getClassid());
|
||||
|
||||
if (rt1 >= 0 && rt1 > rt2) {
|
||||
return 1;
|
||||
} else if (rt2 >= 0 && rt2 > rt1) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// id
|
||||
if (res == 0) {
|
||||
if (left instanceof OafEntity && right instanceof OafEntity) {
|
||||
res = ((OafEntity) left).getId().compareTo(((OafEntity) right).getId());
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
|
@ -40,27 +40,12 @@ public class MergeUtils {
|
|||
|
||||
public static <T extends Oaf> T mergeGroup(String s, Iterator<T> oafEntityIterator,
|
||||
boolean checkDelegateAuthority) {
|
||||
TreeSet<T> sortedEntities = new TreeSet<>((o1, o2) -> {
|
||||
int res = 0;
|
||||
|
||||
if (o1.getDataInfo() != null && o2.getDataInfo() != null) {
|
||||
res = o1.getDataInfo().getTrust().compareTo(o2.getDataInfo().getTrust());
|
||||
}
|
||||
ArrayList<T> sortedEntities = new ArrayList<>();
|
||||
oafEntityIterator.forEachRemaining(sortedEntities::add);
|
||||
sortedEntities.sort(MergeEntitiesComparator.INSTANCE.reversed());
|
||||
|
||||
if (res == 0) {
|
||||
if (o1 instanceof Result && o2 instanceof Result) {
|
||||
return ResultTypeComparator.INSTANCE.compare((Result) o1, (Result) o2);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
});
|
||||
|
||||
while (oafEntityIterator.hasNext()) {
|
||||
sortedEntities.add(oafEntityIterator.next());
|
||||
}
|
||||
|
||||
Iterator<T> it = sortedEntities.descendingIterator();
|
||||
Iterator<T> it = sortedEntities.iterator();
|
||||
T merged = it.next();
|
||||
|
||||
while (it.hasNext()) {
|
||||
|
@ -143,7 +128,7 @@ public class MergeUtils {
|
|||
* https://graph.openaire.eu/docs/data-model/pids-and-identifiers#delegated-authorities and in that case it prefers
|
||||
* such version.
|
||||
* <p>
|
||||
* Otherwise, it considers a resulttype priority order implemented in {@link ResultTypeComparator}
|
||||
* Otherwise, it considers a resulttype priority order implemented in {@link MergeEntitiesComparator}
|
||||
* and proceeds with the canonical property merging.
|
||||
*
|
||||
* @param left
|
||||
|
@ -161,8 +146,9 @@ public class MergeUtils {
|
|||
if (!leftFromDelegatedAuthority && rightFromDelegatedAuthority) {
|
||||
return right;
|
||||
}
|
||||
|
||||
// TODO: raise trust to have preferred fields from one or the other??
|
||||
if (new ResultTypeComparator().compare(left, right) < 0) {
|
||||
if (MergeEntitiesComparator.INSTANCE.compare(left, right) > 0) {
|
||||
return mergeResultFields(left, right);
|
||||
} else {
|
||||
return mergeResultFields(right, left);
|
||||
|
@ -225,9 +211,9 @@ public class MergeUtils {
|
|||
|
||||
private static <T, K> List<T> mergeLists(final List<T> left, final List<T> right, int trust,
|
||||
Function<T, K> keyExtractor, BinaryOperator<T> merger) {
|
||||
if (left == null) {
|
||||
return right;
|
||||
} else if (right == null) {
|
||||
if (left == null || left.isEmpty()) {
|
||||
return right != null ? right : new ArrayList<>();
|
||||
} else if (right == null || right.isEmpty()) {
|
||||
return left;
|
||||
}
|
||||
|
||||
|
@ -342,7 +328,7 @@ public class MergeUtils {
|
|||
final T merged = mergeOafFields(original, enrich, trust);
|
||||
|
||||
merged.setOriginalId(unionDistinctListOfString(merged.getOriginalId(), enrich.getOriginalId()));
|
||||
merged.setPid(unionDistinctLists(merged.getPid(), enrich.getPid(), trust));
|
||||
merged.setPid(mergeLists(merged.getPid(), enrich.getPid(), trust, MergeUtils::spKeyExtractor, (p1, p2) -> p1));
|
||||
merged.setDateofcollection(LocalDateTime.now().toString());
|
||||
merged
|
||||
.setDateoftransformation(
|
||||
|
@ -405,7 +391,7 @@ public class MergeUtils {
|
|||
}
|
||||
|
||||
// should be an instance attribute, get the first non-null value
|
||||
merge.setLanguage(coalesce(merge.getLanguage(), enrich.getLanguage()));
|
||||
merge.setLanguage(coalesceQualifier(merge.getLanguage(), enrich.getLanguage()));
|
||||
|
||||
// distinct countries, do not manage datainfo
|
||||
merge.setCountry(mergeQualifiers(merge.getCountry(), enrich.getCountry(), trust));
|
||||
|
@ -478,6 +464,10 @@ public class MergeUtils {
|
|||
merge.setIsInDiamondJournal(booleanOR(merge.getIsInDiamondJournal(), enrich.getIsInDiamondJournal()));
|
||||
merge.setPubliclyFunded(booleanOR(merge.getPubliclyFunded(), enrich.getPubliclyFunded()));
|
||||
|
||||
if (StringUtils.isBlank(merge.getTransformativeAgreement())) {
|
||||
merge.setTransformativeAgreement(enrich.getTransformativeAgreement());
|
||||
}
|
||||
|
||||
return merge;
|
||||
}
|
||||
|
||||
|
@ -575,6 +565,13 @@ public class MergeUtils {
|
|||
return m != null ? m : e;
|
||||
}
|
||||
|
||||
private static Qualifier coalesceQualifier(Qualifier m, Qualifier e) {
|
||||
if (m == null || m.getClassid() == null || StringUtils.isBlank(m.getClassid())) {
|
||||
return e;
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
private static List<Author> mergeAuthors(List<Author> author, List<Author> author1, int trust) {
|
||||
List<List<Author>> authors = new ArrayList<>();
|
||||
if (author != null) {
|
||||
|
@ -587,6 +584,10 @@ public class MergeUtils {
|
|||
}
|
||||
|
||||
private static String instanceKeyExtractor(Instance i) {
|
||||
// three levels of concatenating:
|
||||
// 1. ::
|
||||
// 2. @@
|
||||
// 3. ||
|
||||
return String
|
||||
.join(
|
||||
"::",
|
||||
|
@ -594,10 +595,10 @@ public class MergeUtils {
|
|||
kvKeyExtractor(i.getCollectedfrom()),
|
||||
qualifierKeyExtractor(i.getAccessright()),
|
||||
qualifierKeyExtractor(i.getInstancetype()),
|
||||
Optional.ofNullable(i.getUrl()).map(u -> String.join("::", u)).orElse(null),
|
||||
Optional.ofNullable(i.getUrl()).map(u -> String.join("@@", u)).orElse(null),
|
||||
Optional
|
||||
.ofNullable(i.getPid())
|
||||
.map(pp -> pp.stream().map(MergeUtils::spKeyExtractor).collect(Collectors.joining("::")))
|
||||
.map(pp -> pp.stream().map(MergeUtils::spKeyExtractor).collect(Collectors.joining("@@")))
|
||||
.orElse(null));
|
||||
}
|
||||
|
||||
|
@ -658,6 +659,13 @@ public class MergeUtils {
|
|||
return d1;
|
||||
}
|
||||
|
||||
if (StringUtils.contains(d1.getValue(), "null")) {
|
||||
return d2;
|
||||
}
|
||||
if (StringUtils.contains(d2.getValue(), "null")) {
|
||||
return d1;
|
||||
}
|
||||
|
||||
return Stream
|
||||
.of(d1, d2)
|
||||
.min(
|
||||
|
@ -706,7 +714,7 @@ public class MergeUtils {
|
|||
private static String spKeyExtractor(StructuredProperty sp) {
|
||||
return Optional
|
||||
.ofNullable(sp)
|
||||
.map(s -> Joiner.on("::").join(s, qualifierKeyExtractor(s.getQualifier())))
|
||||
.map(s -> Joiner.on("||").join(qualifierKeyExtractor(s.getQualifier()), s.getValue()))
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
|
||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
||||
|
||||
import static eu.dnetlib.dhp.schema.common.ModelConstants.CROSSREF_ID;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
|
||||
public class ResultTypeComparator implements Comparator<Result> {
|
||||
|
||||
public static final ResultTypeComparator INSTANCE = new ResultTypeComparator();
|
||||
|
||||
@Override
|
||||
public int compare(Result left, Result right) {
|
||||
|
||||
if (left == null && right == null)
|
||||
return 0;
|
||||
if (left == null)
|
||||
return 1;
|
||||
if (right == null)
|
||||
return -1;
|
||||
|
||||
HashSet<String> lCf = getCollectedFromIds(left);
|
||||
HashSet<String> rCf = getCollectedFromIds(right);
|
||||
|
||||
if (lCf.contains(CROSSREF_ID) && !rCf.contains(CROSSREF_ID)) {
|
||||
return -1;
|
||||
}
|
||||
if (!lCf.contains(CROSSREF_ID) && rCf.contains(CROSSREF_ID)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (left.getResulttype() == null || left.getResulttype().getClassid() == null) {
|
||||
if (right.getResulttype() == null || right.getResulttype().getClassid() == null) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
} else if (right.getResulttype() == null || right.getResulttype().getClassid() == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
String lClass = left.getResulttype().getClassid();
|
||||
String rClass = right.getResulttype().getClassid();
|
||||
|
||||
if (!lClass.equals(rClass)) {
|
||||
if (lClass.equals(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.DATASET_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.DATASET_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.SOFTWARE_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.SOFTWARE_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
|
||||
if (lClass.equals(ModelConstants.ORP_RESULTTYPE_CLASSID))
|
||||
return -1;
|
||||
if (rClass.equals(ModelConstants.ORP_RESULTTYPE_CLASSID))
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Else (but unlikely), lexicographical ordering will do.
|
||||
return lClass.compareTo(rClass);
|
||||
}
|
||||
|
||||
protected HashSet<String> getCollectedFromIds(Result left) {
|
||||
return Optional
|
||||
.ofNullable(left.getCollectedfrom())
|
||||
.map(
|
||||
cf -> cf
|
||||
.stream()
|
||||
.map(KeyValue::getKey)
|
||||
.collect(Collectors.toCollection(HashSet::new)))
|
||||
.orElse(new HashSet<>());
|
||||
}
|
||||
}
|
|
@ -154,5 +154,13 @@
|
|||
"unknown":{
|
||||
"original":"Unknown",
|
||||
"inverse":"Unknown"
|
||||
},
|
||||
"isamongtopnsimilardocuments": {
|
||||
"original": "IsAmongTopNSimilarDocuments",
|
||||
"inverse": "HasAmongTopNSimilarDocuments"
|
||||
},
|
||||
"hasamongtopnsimilardocuments": {
|
||||
"original": "HasAmongTopNSimilarDocuments",
|
||||
"inverse": "IsAmongTopNSimilarDocuments"
|
||||
}
|
||||
}
|
|
@ -65,12 +65,13 @@ abstract class AbstractScalaApplication(
|
|||
val conf: SparkConf = new SparkConf()
|
||||
val master = parser.get("master")
|
||||
log.info(s"Creating Spark session: Master: $master")
|
||||
SparkSession
|
||||
val b = SparkSession
|
||||
.builder()
|
||||
.config(conf)
|
||||
.appName(getClass.getSimpleName)
|
||||
.master(master)
|
||||
.getOrCreate()
|
||||
if (master != null)
|
||||
b.master(master)
|
||||
b.getOrCreate()
|
||||
}
|
||||
|
||||
def reportTotalSize(targetPath: String, outputBasePath: String): Unit = {
|
||||
|
|
|
@ -65,7 +65,11 @@ object ScholixUtils extends Serializable {
|
|||
}
|
||||
|
||||
def generateScholixResourceFromResult(r: Result): ScholixResource = {
|
||||
generateScholixResourceFromSummary(ScholixUtils.resultToSummary(r))
|
||||
val sum = ScholixUtils.resultToSummary(r)
|
||||
if (sum != null)
|
||||
generateScholixResourceFromSummary(ScholixUtils.resultToSummary(r))
|
||||
else
|
||||
null
|
||||
}
|
||||
|
||||
val statsAggregator: Aggregator[(String, String, Long), RelatedEntities, RelatedEntities] =
|
||||
|
@ -153,6 +157,14 @@ object ScholixUtils extends Serializable {
|
|||
|
||||
}
|
||||
|
||||
def invRel(rel: String): String = {
|
||||
val semanticRelation = relations.getOrElse(rel.toLowerCase, null)
|
||||
if (semanticRelation != null)
|
||||
semanticRelation.inverse
|
||||
else
|
||||
null
|
||||
}
|
||||
|
||||
def extractCollectedFrom(summary: ScholixResource): List[ScholixEntityId] = {
|
||||
if (summary.getCollectedFrom != null && !summary.getCollectedFrom.isEmpty) {
|
||||
val l: List[ScholixEntityId] = summary.getCollectedFrom.asScala.map { d =>
|
||||
|
@ -377,10 +389,7 @@ object ScholixUtils extends Serializable {
|
|||
if (persistentIdentifiers.isEmpty)
|
||||
return null
|
||||
s.setLocalIdentifier(persistentIdentifiers.asJava)
|
||||
if (r.isInstanceOf[Publication])
|
||||
s.setTypology(Typology.publication)
|
||||
else
|
||||
s.setTypology(Typology.dataset)
|
||||
// s.setTypology(r.getResulttype.getClassid)
|
||||
|
||||
s.setSubType(r.getInstance().get(0).getInstancetype.getClassname)
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
<executions>
|
||||
<execution>
|
||||
<id>scala-compile-first</id>
|
||||
<phase>initialize</phase>
|
||||
<phase>process-resources</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
<goal>compile</goal>
|
||||
|
@ -59,14 +59,6 @@
|
|||
<groupId>edu.cmu</groupId>
|
||||
<artifactId>secondstring</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.gson</groupId>
|
||||
<artifactId>gson</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
|
@ -91,10 +83,6 @@
|
|||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.jayway.jsonpath</groupId>
|
||||
<artifactId>json-path</artifactId>
|
||||
|
@ -113,4 +101,90 @@
|
|||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spark-24</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sources>
|
||||
<source>src/main/spark-2</source>
|
||||
</sources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
|
||||
<profile>
|
||||
<id>spark-34</id>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sources>
|
||||
<source>src/main/spark-2</source>
|
||||
</sources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
|
||||
<profile>
|
||||
<id>spark-35</id>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sources>
|
||||
<source>src/main/spark-35</source>
|
||||
</sources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
|
||||
</project>
|
||||
|
|
|
@ -2,31 +2,41 @@
|
|||
package eu.dnetlib.pace.clustering;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
|
||||
@ClusteringClass("keywordsclustering")
|
||||
public class KeywordsClustering extends AbstractClusteringFunction {
|
||||
@ClusteringClass("legalnameclustering")
|
||||
public class LegalnameClustering extends AbstractClusteringFunction {
|
||||
|
||||
public KeywordsClustering(Map<String, Object> params) {
|
||||
private static final Pattern CITY_CODE_PATTERN = Pattern.compile("city::\\d+");
|
||||
private static final Pattern KEYWORD_CODE_PATTERN = Pattern.compile("key::\\d+");
|
||||
|
||||
public LegalnameClustering(Map<String, Object> params) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
public Set<String> getRegexList(String input, Pattern codeRegex) {
|
||||
Matcher matcher = codeRegex.matcher(input);
|
||||
Set<String> cities = new HashSet<>();
|
||||
while (matcher.find()) {
|
||||
cities.add(matcher.group());
|
||||
}
|
||||
return cities;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<String> doApply(final Config conf, String s) {
|
||||
|
||||
// takes city codes and keywords codes without duplicates
|
||||
Set<String> keywords = getKeywords(s, conf.translationMap(), paramOrDefault("windowSize", 4));
|
||||
Set<String> cities = getCities(s, paramOrDefault("windowSize", 4));
|
||||
|
||||
// list of combination to return as result
|
||||
final Collection<String> combinations = new LinkedHashSet<String>();
|
||||
|
||||
for (String keyword : keywordsToCodes(keywords, conf.translationMap())) {
|
||||
for (String city : citiesToCodes(cities)) {
|
||||
for (String keyword : getRegexList(s, KEYWORD_CODE_PATTERN)) {
|
||||
for (String city : getRegexList(s, CITY_CODE_PATTERN)) {
|
||||
combinations.add(keyword + "-" + city);
|
||||
if (combinations.size() >= paramOrDefault("max", 2)) {
|
||||
return combinations;
|
||||
|
@ -42,9 +52,6 @@ public class KeywordsClustering extends AbstractClusteringFunction {
|
|||
return fields
|
||||
.stream()
|
||||
.filter(f -> !f.isEmpty())
|
||||
.map(KeywordsClustering::cleanup)
|
||||
.map(KeywordsClustering::normalize)
|
||||
.map(s -> filterAllStopWords(s))
|
||||
.map(s -> doApply(conf, s))
|
||||
.map(c -> filterBlacklisted(c, ngramBlacklist))
|
||||
.flatMap(c -> c.stream())
|
|
@ -1,12 +1,6 @@
|
|||
|
||||
package eu.dnetlib.pace.common;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.ibm.icu.text.Transliterator;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
@ -15,6 +9,13 @@ import java.util.regex.Matcher;
|
|||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.ibm.icu.text.Transliterator;
|
||||
|
||||
/**
|
||||
* Set of common functions for the framework
|
||||
*
|
||||
|
@ -26,6 +27,14 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
private static Map<String, String> cityMap = AbstractPaceFunctions
|
||||
.loadMapFromClasspath("/eu/dnetlib/pace/config/city_map.csv");
|
||||
|
||||
// keywords map to be used when translating the keyword names into codes
|
||||
private static Map<String, String> keywordMap = AbstractPaceFunctions
|
||||
.loadMapFromClasspath("/eu/dnetlib/pace/config/translation_map.csv");
|
||||
|
||||
// country map to be used when inferring the country from the city name
|
||||
private static Map<String, String> countryMap = AbstractPaceFunctions
|
||||
.loadCountryMapFromClasspath("/eu/dnetlib/pace/config/country_map.csv");
|
||||
|
||||
// list of stopwords in different languages
|
||||
protected static Set<String> stopwords_gr = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_gr.txt");
|
||||
protected static Set<String> stopwords_en = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_en.txt");
|
||||
|
@ -73,6 +82,64 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return s12;
|
||||
}
|
||||
|
||||
public static String countryInference(final String original, String inferFrom) {
|
||||
if (!original.equalsIgnoreCase("unknown"))
|
||||
return original;
|
||||
|
||||
inferFrom = cleanup(inferFrom);
|
||||
inferFrom = normalize(inferFrom);
|
||||
inferFrom = filterAllStopWords(inferFrom);
|
||||
Set<String> cities = getCities(inferFrom, 4);
|
||||
return citiesToCountry(cities).stream().findFirst().orElse("UNKNOWN");
|
||||
}
|
||||
|
||||
public static String cityInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> cities = getCities(original, 4);
|
||||
|
||||
for (String city : cities) {
|
||||
original = original.replaceAll(city, cityMap.get(city));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
public static String keywordInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> keywords = getKeywords(original, keywordMap, 4);
|
||||
|
||||
for (String keyword : keywords) {
|
||||
original = original.replaceAll(keyword, keywordMap.get(keyword));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
public static String cityKeywordInference(String original) {
|
||||
original = cleanup(original);
|
||||
original = normalize(original);
|
||||
original = filterAllStopWords(original);
|
||||
|
||||
Set<String> keywords = getKeywords(original, keywordMap, 4);
|
||||
Set<String> cities = getCities(original, 4);
|
||||
|
||||
for (String keyword : keywords) {
|
||||
original = original.replaceAll(keyword, keywordMap.get(keyword));
|
||||
}
|
||||
|
||||
for (String city : cities) {
|
||||
original = original.replaceAll(city, cityMap.get(city));
|
||||
}
|
||||
|
||||
return original;
|
||||
}
|
||||
|
||||
protected static String fixXML(final String a) {
|
||||
|
||||
return a
|
||||
|
@ -207,6 +274,30 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return m;
|
||||
}
|
||||
|
||||
public static Map<String, String> loadCountryMapFromClasspath(final String classpath) {
|
||||
|
||||
Transliterator transliterator = Transliterator.getInstance("Any-Eng");
|
||||
|
||||
final Map<String, String> m = new HashMap<>();
|
||||
try {
|
||||
for (final String s : IOUtils
|
||||
.readLines(AbstractPaceFunctions.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) {
|
||||
// string is like this: country_code;city1;city2;city3
|
||||
String[] line = s.split(";");
|
||||
String value = line[0];
|
||||
for (int i = 1; i < line.length; i++) {
|
||||
String city = fixAliases(transliterator.transliterate(line[i].toLowerCase()));
|
||||
String code = cityMap.get(city);
|
||||
m.put(code, value);
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
return m;
|
||||
|
||||
}
|
||||
|
||||
public static String removeKeywords(String s, Set<String> keywords) {
|
||||
|
||||
s = " " + s + " ";
|
||||
|
@ -236,6 +327,10 @@ public class AbstractPaceFunctions extends PaceCommonUtils {
|
|||
return toCodes(keywords, cityMap);
|
||||
}
|
||||
|
||||
public static Set<String> citiesToCountry(Set<String> cities) {
|
||||
return toCodes(toCodes(cities, cityMap), countryMap);
|
||||
}
|
||||
|
||||
protected static String firstLC(final String s) {
|
||||
return StringUtils.substring(s, 0, 1).toLowerCase();
|
||||
}
|
||||
|
|
|
@ -47,9 +47,21 @@ public class FieldDef implements Serializable {
|
|||
|
||||
private String clean;
|
||||
|
||||
private String infer;
|
||||
|
||||
private String inferenceFrom;
|
||||
|
||||
public FieldDef() {
|
||||
}
|
||||
|
||||
public String getInferenceFrom() {
|
||||
return inferenceFrom;
|
||||
}
|
||||
|
||||
public void setInferenceFrom(final String inferenceFrom) {
|
||||
this.inferenceFrom = inferenceFrom;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
@ -126,6 +138,14 @@ public class FieldDef implements Serializable {
|
|||
this.clean = clean;
|
||||
}
|
||||
|
||||
public String getInfer() {
|
||||
return infer;
|
||||
}
|
||||
|
||||
public void setInfer(String infer) {
|
||||
this.infer = infer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
|
|
|
@ -3,7 +3,7 @@ package eu.dnetlib.pace.model
|
|||
import com.jayway.jsonpath.{Configuration, JsonPath}
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions
|
||||
import eu.dnetlib.pace.config.{DedupConfig, Type}
|
||||
import eu.dnetlib.pace.util.MapDocumentUtil
|
||||
import eu.dnetlib.pace.util.{MapDocumentUtil, SparkCompatUtils}
|
||||
import org.apache.commons.lang3.StringUtils
|
||||
import org.apache.spark.sql.catalyst.encoders.RowEncoder
|
||||
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
|
||||
|
@ -52,7 +52,7 @@ case class SparkModel(conf: DedupConfig) {
|
|||
val orderingFieldPosition: Int = schema.fieldIndex(orderingFieldName)
|
||||
|
||||
val parseJsonDataset: (Dataset[String] => Dataset[Row]) = df => {
|
||||
df.map(r => rowFromJson(r))(RowEncoder(schema))
|
||||
df.map(r => rowFromJson(r))(SparkCompatUtils.encoderFor(schema))
|
||||
}
|
||||
|
||||
def rowFromJson(json: String): Row = {
|
||||
|
@ -123,9 +123,19 @@ case class SparkModel(conf: DedupConfig) {
|
|||
case _ => res(index)
|
||||
}
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(fdef.getInfer)) {
|
||||
val inferFrom : String = if (StringUtils.isNotBlank(fdef.getInferenceFrom)) fdef.getInferenceFrom else fdef.getPath
|
||||
res(index) = res(index) match {
|
||||
case x: Seq[String] => x.map(inference(_, MapDocumentUtil.getJPathString(inferFrom, documentContext), fdef.getInfer))
|
||||
case _ => inference(res(index).toString, MapDocumentUtil.getJPathString(inferFrom, documentContext), fdef.getInfer)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
res
|
||||
|
||||
}
|
||||
|
||||
new GenericRowWithSchema(values, schema)
|
||||
|
@ -146,5 +156,17 @@ case class SparkModel(conf: DedupConfig) {
|
|||
res
|
||||
}
|
||||
|
||||
def inference(value: String, inferfrom: String, infertype: String) : String = {
|
||||
val res = infertype match {
|
||||
case "country" => AbstractPaceFunctions.countryInference(value, inferfrom)
|
||||
case "city" => AbstractPaceFunctions.cityInference(value)
|
||||
case "keyword" => AbstractPaceFunctions.keywordInference(value)
|
||||
case "city_keyword" => AbstractPaceFunctions.cityKeywordInference(value)
|
||||
case _ => value
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("cityMatch")
|
||||
public class CityMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public CityMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = citiesToCodes(cities1);
|
||||
Set<String> codes2 = citiesToCodes(cities2);
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1; // undefined if one of the two has no cities
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("codeMatch")
|
||||
public class CodeMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
private Pattern CODE_REGEX;
|
||||
|
||||
public CodeMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
this.CODE_REGEX = Pattern.compile(params.getOrDefault("codeRegex", "[a-zA-Z]::\\d+"));
|
||||
}
|
||||
|
||||
public Set<String> getRegexList(String input) {
|
||||
Matcher matcher = this.CODE_REGEX.matcher(input);
|
||||
Set<String> cities = new HashSet<>();
|
||||
while (matcher.find()) {
|
||||
cities.add(matcher.group());
|
||||
}
|
||||
return cities;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
Set<String> codes1 = getRegexList(a);
|
||||
Set<String> codes2 = getRegexList(b);
|
||||
|
||||
// if no codes are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1; // undefined if one of the two has no codes
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("countryMatch")
|
||||
public class CountryMatch extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public CountryMatch(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public CountryMatch(final double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected CountryMatch(final double weight, final AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
if (a.isEmpty() || b.isEmpty()) {
|
||||
return -1.0; // return -1 if a field is missing
|
||||
}
|
||||
if (a.equalsIgnoreCase("unknown") || b.equalsIgnoreCase("unknown")) {
|
||||
return -1.0; // return -1 if a country is UNKNOWN
|
||||
}
|
||||
|
||||
return a.equals(b) ? 1.0 : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(final double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("jaroWinklerLegalname")
|
||||
public class JaroWinklerLegalname extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
private final String CITY_CODE_REGEX = "city::\\d+";
|
||||
private final String KEYWORD_CODE_REGEX = "key::\\d+";
|
||||
|
||||
public JaroWinklerLegalname(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public JaroWinklerLegalname(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerLegalname(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
|
||||
String ca = a.replaceAll(CITY_CODE_REGEX, "").replaceAll(KEYWORD_CODE_REGEX, " ");
|
||||
String cb = b.replaceAll(CITY_CODE_REGEX, "").replaceAll(KEYWORD_CODE_REGEX, " ");
|
||||
|
||||
ca = ca.replaceAll("[ ]{2,}", " ");
|
||||
cb = cb.replaceAll("[ ]{2,}", " ");
|
||||
|
||||
if (ca.isEmpty() && cb.isEmpty())
|
||||
return 1.0;
|
||||
else
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.wcohen.ss.AbstractStringDistance;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("jaroWinklerNormalizedName")
|
||||
public class JaroWinklerNormalizedName extends AbstractStringComparator {
|
||||
|
||||
private Map<String, String> params;
|
||||
|
||||
public JaroWinklerNormalizedName(Map<String, String> params) {
|
||||
super(params, new com.wcohen.ss.JaroWinkler());
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public JaroWinklerNormalizedName(double weight) {
|
||||
super(weight, new com.wcohen.ss.JaroWinkler());
|
||||
}
|
||||
|
||||
protected JaroWinklerNormalizedName(double weight, AbstractStringDistance ssalgo) {
|
||||
super(weight, ssalgo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(String a, String b, final Config conf) {
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> cities1 = getCities(ca, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> cities2 = getCities(cb, Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
ca = removeKeywords(ca, keywords1);
|
||||
ca = removeKeywords(ca, cities1);
|
||||
cb = removeKeywords(cb, keywords2);
|
||||
cb = removeKeywords(cb, cities2);
|
||||
|
||||
ca = ca.replaceAll("[ ]{2,}", " ");
|
||||
cb = cb.replaceAll("[ ]{2,}", " ");
|
||||
|
||||
if (ca.isEmpty() && cb.isEmpty())
|
||||
return 1.0;
|
||||
else
|
||||
return normalize(ssalgo.score(ca, cb));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getWeight() {
|
||||
return super.weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double normalize(double d) {
|
||||
return d;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
|
||||
package eu.dnetlib.pace.tree;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import eu.dnetlib.pace.config.Config;
|
||||
import eu.dnetlib.pace.tree.support.AbstractStringComparator;
|
||||
import eu.dnetlib.pace.tree.support.ComparatorClass;
|
||||
|
||||
@ComparatorClass("keywordMatch")
|
||||
public class KeywordMatch extends AbstractStringComparator {
|
||||
|
||||
Map<String, String> params;
|
||||
|
||||
public KeywordMatch(Map<String, String> params) {
|
||||
super(params);
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double distance(final String a, final String b, final Config conf) {
|
||||
|
||||
String ca = cleanup(a);
|
||||
String cb = cleanup(b);
|
||||
|
||||
ca = normalize(ca);
|
||||
cb = normalize(cb);
|
||||
|
||||
ca = filterAllStopWords(ca);
|
||||
cb = filterAllStopWords(cb);
|
||||
|
||||
Set<String> keywords1 = getKeywords(
|
||||
ca, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
Set<String> keywords2 = getKeywords(
|
||||
cb, conf.translationMap(), Integer.parseInt(params.getOrDefault("windowSize", "4")));
|
||||
|
||||
Set<String> codes1 = toCodes(keywords1, conf.translationMap());
|
||||
Set<String> codes2 = toCodes(keywords2, conf.translationMap());
|
||||
|
||||
// if no cities are detected, the comparator gives 1.0
|
||||
if (codes1.isEmpty() && codes2.isEmpty())
|
||||
return 1.0;
|
||||
else {
|
||||
if (codes1.isEmpty() ^ codes2.isEmpty())
|
||||
return -1.0; // undefined if one of the two has no keywords
|
||||
return commonElementsPercentage(codes1, codes2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -48,7 +48,7 @@ public class TreeNodeDef implements Serializable {
|
|||
// function for the evaluation of the node
|
||||
public TreeNodeStats evaluate(Row doc1, Row doc2, Config conf) {
|
||||
|
||||
TreeNodeStats stats = new TreeNodeStats();
|
||||
TreeNodeStats stats = new TreeNodeStats(ignoreUndefined);
|
||||
|
||||
// for each field in the node, it computes the
|
||||
for (FieldConf fieldConf : fields) {
|
||||
|
|
|
@ -9,8 +9,11 @@ public class TreeNodeStats implements Serializable {
|
|||
|
||||
private Map<String, FieldStats> results; // this is an accumulator for the results of the node
|
||||
|
||||
public TreeNodeStats() {
|
||||
private final boolean ignoreUndefined;
|
||||
|
||||
public TreeNodeStats(boolean ignoreUndefined) {
|
||||
this.results = new HashMap<>();
|
||||
this.ignoreUndefined = ignoreUndefined;
|
||||
}
|
||||
|
||||
public Map<String, FieldStats> getResults() {
|
||||
|
@ -22,7 +25,10 @@ public class TreeNodeStats implements Serializable {
|
|||
}
|
||||
|
||||
public int fieldsCount() {
|
||||
return this.results.size();
|
||||
if (ignoreUndefined)
|
||||
return this.results.size();
|
||||
else
|
||||
return this.results.size() - undefinedCount(); // do not count undefined
|
||||
}
|
||||
|
||||
public int undefinedCount() {
|
||||
|
@ -78,11 +84,22 @@ public class TreeNodeStats implements Serializable {
|
|||
double min = 100.0; // random high value
|
||||
for (FieldStats fs : this.results.values()) {
|
||||
if (fs.getResult() < min) {
|
||||
if (fs.getResult() >= 0.0 || (fs.getResult() == -1 && fs.isCountIfUndefined()))
|
||||
if (fs.getResult() == -1) {
|
||||
if (fs.isCountIfUndefined()) {
|
||||
min = 0.0;
|
||||
} else {
|
||||
min = -1;
|
||||
}
|
||||
} else {
|
||||
min = fs.getResult();
|
||||
}
|
||||
}
|
||||
}
|
||||
return min;
|
||||
if (ignoreUndefined) {
|
||||
return min == -1.0 ? 0.0 : min;
|
||||
} else {
|
||||
return min;
|
||||
}
|
||||
}
|
||||
|
||||
// if at least one is true, return 1.0
|
||||
|
@ -91,7 +108,11 @@ public class TreeNodeStats implements Serializable {
|
|||
if (fieldStats.getResult() >= fieldStats.getThreshold())
|
||||
return 1.0;
|
||||
}
|
||||
return 0.0;
|
||||
if (!ignoreUndefined && undefinedCount() > 0) {
|
||||
return -1.0;
|
||||
} else {
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
// if at least one is false, return 0.0
|
||||
|
@ -100,7 +121,7 @@ public class TreeNodeStats implements Serializable {
|
|||
|
||||
if (fieldStats.getResult() == -1) {
|
||||
if (fieldStats.isCountIfUndefined())
|
||||
return 0.0;
|
||||
return ignoreUndefined ? 0.0 : -1.0;
|
||||
} else {
|
||||
if (fieldStats.getResult() < fieldStats.getThreshold())
|
||||
return 0.0;
|
||||
|
|
|
@ -44,12 +44,10 @@ public class TreeProcessor {
|
|||
TreeNodeStats stats = currentNode.evaluate(doc1, doc2, config);
|
||||
treeStats.addNodeStats(nextNodeName, stats);
|
||||
|
||||
// if ignoreUndefined=false the miss is considered as undefined
|
||||
if (!currentNode.isIgnoreUndefined() && stats.undefinedCount() > 0) {
|
||||
double finalScore = stats.getFinalScore(currentNode.getAggregation());
|
||||
if (finalScore == -1.0)
|
||||
nextNodeName = currentNode.getUndefined();
|
||||
}
|
||||
// if ignoreUndefined=true the miss is ignored and the score computed anyway
|
||||
else if (stats.getFinalScore(currentNode.getAggregation()) >= currentNode.getThreshold()) {
|
||||
else if (finalScore >= currentNode.getThreshold()) {
|
||||
nextNodeName = currentNode.getPositive();
|
||||
} else {
|
||||
nextNodeName = currentNode.getNegative();
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,12 @@
|
|||
package eu.dnetlib.pace.util
|
||||
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
|
||||
import org.apache.spark.sql.types.StructType
|
||||
|
||||
object SparkCompatUtils {
|
||||
|
||||
def encoderFor(schema: StructType): ExpressionEncoder[Row] = {
|
||||
RowEncoder(schema)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package eu.dnetlib.pace.util
|
||||
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
|
||||
import org.apache.spark.sql.types.StructType
|
||||
|
||||
object SparkCompatUtils {
|
||||
|
||||
def encoderFor(schema: StructType): ExpressionEncoder[Row] = {
|
||||
ExpressionEncoder(schema)
|
||||
}
|
||||
}
|
|
@ -8,6 +8,7 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.mongodb.connection.Cluster;
|
||||
|
||||
import eu.dnetlib.pace.AbstractPaceTest;
|
||||
import eu.dnetlib.pace.common.AbstractPaceFunctions;
|
||||
|
@ -177,41 +178,16 @@ public class ClusteringFunctionTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testKeywordsClustering() {
|
||||
public void legalnameClustering() {
|
||||
|
||||
final ClusteringFunction cf = new KeywordsClustering(params);
|
||||
final String s = "Polytechnic University of Turin";
|
||||
final ClusteringFunction cf = new LegalnameClustering(params);
|
||||
String s = "key::1 key::2 city::1";
|
||||
System.out.println(s);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s)));
|
||||
|
||||
final String s1 = "POLITECNICO DI TORINO";
|
||||
System.out.println(s1);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s1)));
|
||||
|
||||
final String s2 = "Universita farmaceutica culturale di milano bergamo";
|
||||
System.out.println("s2 = " + s2);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s2)));
|
||||
|
||||
final String s3 = "universita universita milano milano";
|
||||
System.out.println("s3 = " + s3);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s3)));
|
||||
|
||||
final String s4 = "Politechniki Warszawskiej (Warsaw University of Technology)";
|
||||
System.out.println("s4 = " + s4);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s4)));
|
||||
|
||||
final String s5 = "İstanbul Ticarət Universiteti";
|
||||
System.out.println("s5 = " + s5);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s5)));
|
||||
|
||||
final String s6 = "National and Kapodistrian University of Athens";
|
||||
System.out.println("s6 = " + s6);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s6)));
|
||||
|
||||
final String s7 = "Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών";
|
||||
System.out.println("s7 = " + s7);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s7)));
|
||||
|
||||
s = "key::1 key::2 city::1 city::2";
|
||||
System.out.println(s);
|
||||
System.out.println(cf.apply(conf, Lists.newArrayList(s)));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -54,4 +54,47 @@ public class PaceFunctionTest extends AbstractPaceFunctions {
|
|||
System.out.println("Fixed aliases : " + fixAliases(TEST_STRING));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void countryInferenceTest() {
|
||||
assertEquals("IT", countryInference("UNKNOWN", "Università di Bologna"));
|
||||
assertEquals("UK", countryInference("UK", "Università di Bologna"));
|
||||
assertEquals("IT", countryInference("UNKNOWN", "Universiteé de Naples"));
|
||||
assertEquals("UNKNOWN", countryInference("UNKNOWN", "Università del Lavoro"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cityInferenceTest() {
|
||||
assertEquals("universita city::3181928", cityInference("Università di Bologna"));
|
||||
assertEquals("university city::3170647", cityInference("University of Pisa"));
|
||||
assertEquals("universita", cityInference("Università del lavoro"));
|
||||
assertEquals("universita city::3173331 city::3169522", cityInference("Università di Modena e Reggio Emilia"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void keywordInferenceTest() {
|
||||
assertEquals("key::41 turin", keywordInference("Polytechnic University of Turin"));
|
||||
assertEquals("key::41 torino", keywordInference("POLITECNICO DI TORINO"));
|
||||
assertEquals(
|
||||
"key::1 key::60 key::81 milano bergamo",
|
||||
keywordInference("Universita farmaceutica culturale di milano bergamo"));
|
||||
assertEquals("key::1 key::1 milano milano", keywordInference("universita universita milano milano"));
|
||||
assertEquals(
|
||||
"key::10 kapodistriako panepistemio athenon",
|
||||
keywordInference("Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cityKeywordInferenceTest() {
|
||||
assertEquals("key::41 city::3165524", cityKeywordInference("Polytechnic University of Turin"));
|
||||
assertEquals("key::41 city::3165524", cityKeywordInference("POLITECNICO DI TORINO"));
|
||||
assertEquals(
|
||||
"key::1 key::60 key::81 city::3173435 city::3182164",
|
||||
cityKeywordInference("Universita farmaceutica culturale di milano bergamo"));
|
||||
assertEquals(
|
||||
"key::1 key::1 city::3173435 city::3173435", cityKeywordInference("universita universita milano milano"));
|
||||
assertEquals(
|
||||
"key::10 kapodistriako panepistemio city::264371",
|
||||
cityKeywordInference("Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
params.put("name_th", "0.95");
|
||||
params.put("jpath_value", "$.value");
|
||||
params.put("jpath_classid", "$.qualifier.classid");
|
||||
params.put("codeRegex", "key::\\d+");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -44,52 +45,23 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void cityMatchTest() {
|
||||
final CityMatch cityMatch = new CityMatch(params);
|
||||
public void codeMatchTest() {
|
||||
CodeMatch codeMatch = new CodeMatch(params);
|
||||
|
||||
// both names with no cities
|
||||
assertEquals(1.0, cityMatch.distance("Università", "Centro di ricerca", conf));
|
||||
// both names with no codes
|
||||
assertEquals(1.0, codeMatch.distance("testing1", "testing2", conf));
|
||||
|
||||
// one of the two names with no cities
|
||||
assertEquals(-1.0, cityMatch.distance("Università di Bologna", "Centro di ricerca", conf));
|
||||
// one of the two names with no codes
|
||||
assertEquals(-1.0, codeMatch.distance("testing1 key::1", "testing", conf));
|
||||
|
||||
// both names with cities (same)
|
||||
assertEquals(1.0, cityMatch.distance("Universita di Bologna", "Biblioteca di Bologna", conf));
|
||||
// both names with codes (same)
|
||||
assertEquals(1.0, codeMatch.distance("testing1 key::1", "testing2 key::1", conf));
|
||||
|
||||
// both names with cities (different)
|
||||
assertEquals(0.0, cityMatch.distance("Universita di Bologna", "Universita di Torino", conf));
|
||||
assertEquals(0.0, cityMatch.distance("Franklin College", "Concordia College", conf));
|
||||
// both names with codes (different)
|
||||
assertEquals(0.0, codeMatch.distance("testing1 key::1", "testing2 key::2", conf));
|
||||
|
||||
// particular cases
|
||||
assertEquals(1.0, cityMatch.distance("Free University of Bozen-Bolzano", "Università di Bolzano", conf));
|
||||
assertEquals(
|
||||
1.0,
|
||||
cityMatch
|
||||
.distance(
|
||||
"Politechniki Warszawskiej (Warsaw University of Technology)", "Warsaw University of Technology",
|
||||
conf));
|
||||
|
||||
// failing becasuse 'Allen' is a transliterrated greek stopword
|
||||
// assertEquals(-1.0, cityMatch.distance("Allen (United States)", "United States Military Academy", conf));
|
||||
assertEquals(-1.0, cityMatch.distance("Washington (United States)", "United States Military Academy", conf));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void keywordMatchTest() {
|
||||
params.put("threshold", "0.5");
|
||||
|
||||
final KeywordMatch keywordMatch = new KeywordMatch(params);
|
||||
|
||||
assertEquals(
|
||||
0.5, keywordMatch.distance("Biblioteca dell'Universita di Bologna", "Università di Bologna", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Universita degli studi di Pisa", "Universita di Pisa", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Polytechnic University of Turin", "POLITECNICO DI TORINO", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Istanbul Commerce University", "İstanbul Ticarət Universiteti", conf));
|
||||
assertEquals(1.0, keywordMatch.distance("Franklin College", "Concordia College", conf));
|
||||
assertEquals(2.0 / 3.0, keywordMatch.distance("University of Georgia", "Georgia State University", conf));
|
||||
assertEquals(0.5, keywordMatch.distance("University College London", "University of London", conf));
|
||||
assertEquals(0.5, keywordMatch.distance("Washington State University", "University of Washington", conf));
|
||||
assertEquals(-1.0, keywordMatch.distance("Allen (United States)", "United States Military Academy", conf));
|
||||
// both names with codes (1 same, 1 different)
|
||||
assertEquals(0.5, codeMatch.distance("key::1 key::2 testing1", "key::1 testing", conf));
|
||||
|
||||
}
|
||||
|
||||
|
@ -155,15 +127,15 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void jaroWinklerNormalizedNameTest() {
|
||||
public void jaroWinklerLegalnameTest() {
|
||||
|
||||
final JaroWinklerNormalizedName jaroWinklerNormalizedName = new JaroWinklerNormalizedName(params);
|
||||
final JaroWinklerLegalname jaroWinklerLegalname = new JaroWinklerLegalname(params);
|
||||
|
||||
double result = jaroWinklerNormalizedName
|
||||
.distance("AT&T (United States)", "United States Military Academy", conf);
|
||||
double result = jaroWinklerLegalname
|
||||
.distance("AT&T (United States)", "United States key::2 key::1", conf);
|
||||
System.out.println("result = " + result);
|
||||
|
||||
result = jaroWinklerNormalizedName.distance("NOAA - Servicio Meteorol\\u00f3gico Nacional", "NOAA - NWS", conf);
|
||||
result = jaroWinklerLegalname.distance("NOAA - Servicio Meteorol\\u00f3gico Nacional", "NOAA - NWS", conf);
|
||||
System.out.println("result = " + result);
|
||||
|
||||
}
|
||||
|
@ -336,4 +308,23 @@ public class ComparatorTest extends AbstractPaceTest {
|
|||
System.out.println("compare = " + compare);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void countryMatch() {
|
||||
|
||||
CountryMatch countryMatch = new CountryMatch(params);
|
||||
|
||||
double result = countryMatch.distance("UNKNOWN", "UNKNOWN", conf);
|
||||
assertEquals(-1.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "UNKNOWN", conf);
|
||||
assertEquals(-1.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "IT", conf);
|
||||
assertEquals(0.0, result);
|
||||
|
||||
result = countryMatch.distance("CL", "CL", conf);
|
||||
assertEquals(1.0, result);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.junit.jupiter.api.Disabled;
|
|||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import eu.dnetlib.pace.model.Person;
|
||||
import jdk.nashorn.internal.ir.annotations.Ignore;
|
||||
|
||||
public class UtilTest {
|
||||
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<parent>
|
||||
<artifactId>dhp</artifactId>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>dhp-shade-package</artifactId>
|
||||
<description>This module create a jar of all module dependencies</description>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer>
|
||||
<mainClass>eu.dnetlib.dhp.oa.dedup.SparkCreateSimRels</mainClass>
|
||||
</transformer>
|
||||
<transformer />
|
||||
<transformer>
|
||||
<resource>META-INF/cxf/bus-extensions.txt</resource>
|
||||
</transformer>
|
||||
</transformers>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/maven/**</exclude>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com</pattern>
|
||||
<shadedPattern>repackaged.com.google.common</shadedPattern>
|
||||
<includes>
|
||||
<include>com.google.common.**</include>
|
||||
</includes>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<version>1.18.28</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>5.6.1</version>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-api</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-params</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>junit-jupiter-engine</artifactId>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>byte-buddy</artifactId>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>byte-buddy-agent</artifactId>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-junit-jupiter</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<distributionManagement>
|
||||
<site>
|
||||
<id>DHPSite</id>
|
||||
<url>${dhp.site.stage.path}/dhp-common</url>
|
||||
</site>
|
||||
</distributionManagement>
|
||||
</project>
|
|
@ -0,0 +1,169 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp</artifactId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
|
||||
</parent>
|
||||
|
||||
<artifactId>dhp-shade-package</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<distributionManagement>
|
||||
<site>
|
||||
<id>DHPSite</id>
|
||||
<url>${dhp.site.stage.path}/dhp-common</url>
|
||||
</site>
|
||||
</distributionManagement>
|
||||
|
||||
<description>This module create a jar of all module dependencies</description>
|
||||
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-actionmanager</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-aggregation</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-blacklist</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-broker-events</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-dedup-openaire</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>eu.dnetlib.dhp</groupId>-->
|
||||
<!-- <artifactId>dhp-enrichment</artifactId>-->
|
||||
<!-- <version>${project.version}</version>-->
|
||||
<!-- </dependency>-->
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-graph-mapper</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-graph-provision</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-impact-indicators</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-actionsets</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-hist-snaps</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-monitor-irish</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-promote</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-stats-update</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-swh</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-usage-raw-data-update</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-usage-stats-build</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<mainClass>eu.dnetlib.dhp.oa.dedup.SparkCreateSimRels</mainClass>
|
||||
</transformer>
|
||||
<!-- This is needed if you have dependencies that use Service Loader. Most Google Cloud client libraries do. -->
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
|
||||
<resource>META-INF/cxf/bus-extensions.txt</resource>
|
||||
</transformer>
|
||||
</transformers>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/maven/**</exclude>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com</pattern>
|
||||
<shadedPattern>repackaged.com.google.common</shadedPattern>
|
||||
<includes>
|
||||
<include>com.google.common.**</include>
|
||||
</includes>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
|
@ -51,48 +51,5 @@
|
|||
<artifactId>hadoop-distcp</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-actionmanager-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-actionmanager-common</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>eu.dnetlib</groupId>
|
||||
<artifactId>dnet-openaireplus-mapping-utils</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>saxonica</groupId>
|
||||
<artifactId>saxon</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>saxonica</groupId>
|
||||
<artifactId>saxon-dom</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>jgrapht</groupId>
|
||||
<artifactId>jgrapht</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.ehcache</groupId>
|
||||
<artifactId>ehcache</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-test</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.*</groupId>
|
||||
<artifactId>*</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>apache</groupId>
|
||||
<artifactId>*</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
|
@ -4,7 +4,6 @@ package eu.dnetlib.dhp.actionmanager;
|
|||
import java.io.Serializable;
|
||||
import java.io.StringReader;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -22,7 +21,6 @@ import com.google.common.base.Splitter;
|
|||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import eu.dnetlib.actionmanager.rmi.ActionManagerException;
|
||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpException;
|
||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
|
||||
|
@ -65,7 +63,7 @@ public class ISClient implements Serializable {
|
|||
.map(t -> buildDirectory(basePath, t))
|
||||
.collect(Collectors.toList()))
|
||||
.orElseThrow(() -> new IllegalStateException("empty set list"));
|
||||
} catch (ActionManagerException | ISLookUpException e) {
|
||||
} catch (ISLookUpException e) {
|
||||
throw new IllegalStateException("unable to query ActionSets info from the IS");
|
||||
}
|
||||
}
|
||||
|
@ -89,31 +87,18 @@ public class ISClient implements Serializable {
|
|||
return Joiner.on("/").join(basePath, t.getMiddle(), t.getRight());
|
||||
}
|
||||
|
||||
private String getBasePathHDFS(ISLookUpService isLookup) throws ActionManagerException {
|
||||
private String getBasePathHDFS(ISLookUpService isLookup) throws ISLookUpException {
|
||||
return queryServiceProperty(isLookup, "basePath");
|
||||
}
|
||||
|
||||
private String queryServiceProperty(ISLookUpService isLookup, final String propertyName)
|
||||
throws ActionManagerException {
|
||||
throws ISLookUpException {
|
||||
final String q = "for $x in /RESOURCE_PROFILE[.//RESOURCE_TYPE/@value='ActionManagerServiceResourceType'] return $x//SERVICE_PROPERTIES/PROPERTY[./@ key='"
|
||||
+ propertyName
|
||||
+ "']/@value/string()";
|
||||
log.debug("quering for service property: {}", q);
|
||||
try {
|
||||
final List<String> value = isLookup.quickSearchProfile(q);
|
||||
return Iterables.getOnlyElement(value);
|
||||
} catch (ISLookUpException e) {
|
||||
String msg = "Error accessing service profile, using query: " + q;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
} catch (NoSuchElementException e) {
|
||||
String msg = "missing service property: " + propertyName;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
String msg = "found more than one service property: " + propertyName;
|
||||
log.error(msg, e);
|
||||
throw new ActionManagerException(msg, e);
|
||||
}
|
||||
|
||||
final List<String> value = isLookup.quickSearchProfile(q);
|
||||
return Iterables.getOnlyElement(value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -156,6 +157,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -125,6 +125,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -155,11 +156,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=2560
|
||||
--conf spark.sql.shuffle.partitions=8000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/otherresearchproduct</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
|
|
@ -103,11 +103,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=7000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${inputGraphRootPath}/publication</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||
|
@ -156,11 +157,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=7000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/publication</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||
|
|
|
@ -95,11 +95,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=10000
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${inputGraphRootPath}/relation</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Relation</arg>
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
|
@ -155,11 +156,12 @@
|
|||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.shuffle.partitions=2560
|
||||
--conf spark.sql.shuffle.partitions=4000
|
||||
</spark-opts>
|
||||
<arg>--inputGraphTablePath</arg><arg>${workingDir}/software</arg>
|
||||
<arg>--graphTableClassName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
|
||||
|
|
|
@ -6,7 +6,18 @@
|
|||
<artifactId>dhp-workflows</artifactId>
|
||||
<version>1.2.5-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dhp-aggregation</artifactId>
|
||||
|
||||
<properties>
|
||||
<affro.release.version>1.0.0</affro.release.version>
|
||||
</properties>
|
||||
|
||||
<scm>
|
||||
<url>https://code-repo.d4science.org/mkallipo/affRo</url>
|
||||
<connection>scm:git:https://code-repo.d4science.org/mkallipo/affRo.git</connection>
|
||||
</scm>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
|
@ -43,6 +54,28 @@
|
|||
<scalaVersion>${scala.version}</scalaVersion>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-scm-plugin</artifactId>
|
||||
<version>1.8.1</version>
|
||||
<configuration>
|
||||
<connectionType>connection</connectionType>
|
||||
<scmVersionType>tag</scmVersionType><!-- 'branch' can also be provided here -->
|
||||
<scmVersion>${affro.release.version}</scmVersion><!-- in case of scmVersionType == 'branch', this field points to the branch name -->
|
||||
<checkoutDirectory>${project.build.directory}/${oozie.package.file.name}/${oozieAppDir}/affRo</checkoutDirectory>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>checkout-affro</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>checkout</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
|
||||
</build>
|
||||
|
|
|
@ -42,6 +42,9 @@ public class Constants {
|
|||
public static final String NULL = "NULL";
|
||||
public static final String NA = "N/A";
|
||||
|
||||
public static final String WEB_CRAWL_ID = "10|openaire____::fb98a192f6a055ba495ef414c330834b";
|
||||
public static final String WEB_CRAWL_NAME = "Web Crawl";
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
private Constants() {
|
||||
|
|
|
@ -9,6 +9,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
|
@ -40,9 +41,9 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
private static final Logger log = LoggerFactory.getLogger(PrepareAffiliationRelations.class);
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final String ID_PREFIX = "50|doi_________::";
|
||||
public static final String BIP_AFFILIATIONS_CLASSID = "result:organization:bipinference";
|
||||
public static final String BIP_AFFILIATIONS_CLASSNAME = "Affiliation relation inferred by BIP!";
|
||||
public static final String BIP_INFERENCE_PROVENANCE = "bip:affiliation:crossref";
|
||||
public static final String BIP_AFFILIATIONS_CLASSID = "result:organization:openaireinference";
|
||||
public static final String BIP_AFFILIATIONS_CLASSNAME = "Affiliation relation inferred by OpenAIRE";
|
||||
public static final String BIP_INFERENCE_PROVENANCE = "openaire:affiliation";
|
||||
|
||||
public static <I extends Result> void main(String[] args) throws Exception {
|
||||
|
||||
|
@ -70,6 +71,9 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
final String dataciteInputPath = parser.get("dataciteInputPath");
|
||||
log.info("dataciteInputPath: {}", dataciteInputPath);
|
||||
|
||||
final String webcrawlInputPath = parser.get("webCrawlInputPath");
|
||||
log.info("webcrawlInputPath: {}", webcrawlInputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
|
@ -101,12 +105,18 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
JavaPairRDD<Text, Text> dataciteRelations = prepareAffiliationRelations(
|
||||
spark, dataciteInputPath, collectedFromDatacite);
|
||||
|
||||
List<KeyValue> collectedFromWebCrawl = OafMapperUtils
|
||||
.listKeyValues(Constants.WEB_CRAWL_ID, Constants.WEB_CRAWL_NAME);
|
||||
JavaPairRDD<Text, Text> webCrawlRelations = prepareAffiliationRelations(
|
||||
spark, webcrawlInputPath, collectedFromWebCrawl);
|
||||
|
||||
crossrefRelations
|
||||
.union(pubmedRelations)
|
||||
.union(openAPCRelations)
|
||||
.union(dataciteRelations)
|
||||
.union(webCrawlRelations)
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
|
||||
});
|
||||
}
|
||||
|
@ -119,7 +129,8 @@ public class PrepareAffiliationRelations implements Serializable {
|
|||
Dataset<Row> df = spark
|
||||
.read()
|
||||
.schema("`DOI` STRING, `Matchings` ARRAY<STRUCT<`RORid`:STRING,`Confidence`:DOUBLE>>")
|
||||
.json(inputPath);
|
||||
.json(inputPath)
|
||||
.where("DOI is not null");
|
||||
|
||||
// unroll nested arrays
|
||||
df = df
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.util.stream.Collectors;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
|
@ -83,7 +84,7 @@ public class SparkAtomicActionScoreJob implements Serializable {
|
|||
resultsRDD
|
||||
.union(projectsRDD)
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -115,19 +115,7 @@ public class PrepareFOSSparkJob implements Serializable {
|
|||
.forEach(
|
||||
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
||||
r.setSubject(sbjs);
|
||||
r
|
||||
.setDataInfo(
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, true,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ModelConstants.PROVENANCE_ENRICH,
|
||||
null,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
null));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,26 +6,23 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.api.java.function.MapGroupsFunction;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SaveMode;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.model.SDGDataModel;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||
import eu.dnetlib.dhp.schema.oaf.Subject;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
|
||||
public class PrepareSDGSparkJob implements Serializable {
|
||||
|
@ -52,54 +49,91 @@ public class PrepareSDGSparkJob implements Serializable {
|
|||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
final Boolean distributeDOI = Optional
|
||||
.ofNullable(parser.get("distributeDoi"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("distribute doi {}", distributeDOI);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
doPrepare(
|
||||
spark,
|
||||
sourcePath,
|
||||
if (distributeDOI)
|
||||
doPrepare(
|
||||
spark,
|
||||
sourcePath,
|
||||
|
||||
outputPath);
|
||||
else
|
||||
doPrepareoaid(spark, sourcePath, outputPath);
|
||||
|
||||
outputPath);
|
||||
});
|
||||
}
|
||||
|
||||
private static void doPrepare(SparkSession spark, String sourcePath, String outputPath) {
|
||||
Dataset<SDGDataModel> sdgDataset = readPath(spark, sourcePath, SDGDataModel.class);
|
||||
Dataset<Row> sdgDataset = spark
|
||||
.read()
|
||||
.format("csv")
|
||||
.option("sep", DEFAULT_DELIMITER)
|
||||
.option("inferSchema", "true")
|
||||
.option("header", "true")
|
||||
.option("quotes", "\"")
|
||||
.load(sourcePath);
|
||||
|
||||
sdgDataset
|
||||
.groupByKey((MapFunction<SDGDataModel, String>) r -> r.getDoi().toLowerCase(), Encoders.STRING())
|
||||
.mapGroups((MapGroupsFunction<String, SDGDataModel, Result>) (k, it) -> {
|
||||
Result r = new Result();
|
||||
r.setId(DHPUtils.generateUnresolvedIdentifier(k, DOI));
|
||||
SDGDataModel first = it.next();
|
||||
List<Subject> sbjs = new ArrayList<>();
|
||||
sbjs.add(getSubject(first.getSbj(), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID));
|
||||
it
|
||||
.forEachRemaining(
|
||||
s -> sbjs
|
||||
.add(getSubject(s.getSbj(), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID)));
|
||||
r.setSubject(sbjs);
|
||||
r
|
||||
.setDataInfo(
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, true,
|
||||
false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ModelConstants.PROVENANCE_ENRICH,
|
||||
null,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
null));
|
||||
return r;
|
||||
}, Encoders.bean(Result.class))
|
||||
.groupByKey((MapFunction<Row, String>) v -> ((String) v.getAs("doi")).toLowerCase(), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Row, Result>) (k,
|
||||
it) -> getResult(
|
||||
DHPUtils
|
||||
.generateUnresolvedIdentifier(
|
||||
ModelSupport.entityIdPrefix.get(Result.class.getSimpleName().toLowerCase()) + "|" + k,
|
||||
DOI),
|
||||
it),
|
||||
Encoders.bean(Result.class))
|
||||
.write()
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath + "/sdg");
|
||||
}
|
||||
|
||||
private static void doPrepareoaid(SparkSession spark, String sourcePath, String outputPath) {
|
||||
Dataset<Row> sdgDataset = spark
|
||||
.read()
|
||||
.format("csv")
|
||||
.option("sep", DEFAULT_DELIMITER)
|
||||
.option("inferSchema", "true")
|
||||
.option("header", "true")
|
||||
.option("quotes", "\"")
|
||||
.load(sourcePath);
|
||||
;
|
||||
|
||||
sdgDataset
|
||||
.groupByKey((MapFunction<Row, String>) r -> "50|" + ((String) r.getAs("oaid")), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Row, Result>) PrepareSDGSparkJob::getResult, Encoders.bean(Result.class))
|
||||
.write()
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.json(outputPath + "/sdg");
|
||||
}
|
||||
|
||||
private static @NotNull Result getResult(String id, Iterator<Row> it) {
|
||||
Result r = new Result();
|
||||
r.setId(id);
|
||||
Row first = it.next();
|
||||
List<Subject> sbjs = new ArrayList<>();
|
||||
sbjs.add(getSubject(first.getAs("sdg"), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID));
|
||||
it
|
||||
.forEachRemaining(
|
||||
s -> sbjs
|
||||
.add(getSubject(s.getAs("sdg"), SDG_CLASS_ID, SDG_CLASS_NAME, UPDATE_SUBJECT_SDG_CLASS_ID)));
|
||||
r.setSubject(sbjs);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -13,9 +13,6 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
|
@ -24,13 +21,9 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class CreateActionSetSparkJob implements Serializable {
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
|
||||
public class CoAuthorshipIterator implements Iterator<Relation> {
|
||||
private int firstIndex;
|
||||
private int secondIndex;
|
||||
private boolean firstRelation;
|
||||
private List<String> authors;
|
||||
private static final String PERSON_PREFIX = ModelSupport.getIdPrefix(Person.class) + "|orcid_______::";
|
||||
private static final String OPENAIRE_PREFIX = "openaire____";
|
||||
private static final String SEPARATOR = "::";
|
||||
private static final String ORCID_KEY = "10|" + OPENAIRE_PREFIX + SEPARATOR
|
||||
+ DHPUtils.md5(ModelConstants.ORCID.toLowerCase());
|
||||
public static final String ORCID_AUTHORS_CLASSID = "sysimport:crosswalk:orcid";
|
||||
public static final String ORCID_AUTHORS_CLASSNAME = "Imported from ORCID";
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return firstIndex < authors.size() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Relation next() {
|
||||
Relation rel = null;
|
||||
if (firstRelation) {
|
||||
rel = getRelation(authors.get(firstIndex), authors.get(secondIndex));
|
||||
firstRelation = Boolean.FALSE;
|
||||
} else {
|
||||
rel = getRelation(authors.get(secondIndex), authors.get(firstIndex));
|
||||
firstRelation = Boolean.TRUE;
|
||||
secondIndex += 1;
|
||||
if (secondIndex >= authors.size()) {
|
||||
firstIndex += 1;
|
||||
secondIndex = firstIndex + 1;
|
||||
}
|
||||
}
|
||||
|
||||
return rel;
|
||||
}
|
||||
|
||||
public CoAuthorshipIterator(List<String> authors) {
|
||||
this.authors = authors;
|
||||
this.firstIndex = 0;
|
||||
this.secondIndex = 1;
|
||||
this.firstRelation = Boolean.TRUE;
|
||||
|
||||
}
|
||||
|
||||
private Relation getRelation(String orcid1, String orcid2) {
|
||||
String source = PERSON_PREFIX + IdentifierFactory.md5(orcid1);
|
||||
String target = PERSON_PREFIX + IdentifierFactory.md5(orcid2);
|
||||
return OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.PERSON_PERSON_RELTYPE,
|
||||
ModelConstants.PERSON_PERSON_SUBRELTYPE,
|
||||
ModelConstants.PERSON_PERSON_HASCOAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(ORCID_KEY, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
|
||||
public class Coauthors implements Serializable {
|
||||
private List<String> coauthors;
|
||||
|
||||
public List<String> getCoauthors() {
|
||||
return coauthors;
|
||||
}
|
||||
|
||||
public void setCoauthors(List<String> coauthors) {
|
||||
this.coauthors = coauthors;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class Couples implements Serializable {
|
||||
Person p;
|
||||
Relation r;
|
||||
|
||||
public Couples() {
|
||||
|
||||
}
|
||||
|
||||
public Person getP() {
|
||||
return p;
|
||||
}
|
||||
|
||||
public void setP(Person p) {
|
||||
this.p = p;
|
||||
}
|
||||
|
||||
public Relation getR() {
|
||||
return r;
|
||||
}
|
||||
|
||||
public void setR(Relation r) {
|
||||
this.r = r;
|
||||
}
|
||||
|
||||
public static <Tuples> Couples newInstance(Tuple2<Person, Relation> couple) {
|
||||
Couples c = new Couples();
|
||||
c.p = couple._1();
|
||||
c.r = couple._2();
|
||||
return c;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,437 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.*;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.spark_project.jetty.util.StringUtil;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Author;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Employment;
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Work;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidCleaner;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidType;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class ExtractPerson implements Serializable {
|
||||
private static final Logger log = LoggerFactory.getLogger(ExtractPerson.class);
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final String OPENAIRE_PREFIX = "openaire____";
|
||||
private static final String SEPARATOR = "::";
|
||||
private static final String orcidKey = "10|" + OPENAIRE_PREFIX + SEPARATOR
|
||||
+ DHPUtils.md5(ModelConstants.ORCID.toLowerCase());
|
||||
|
||||
private static final String DOI_PREFIX = "50|doi_________::";
|
||||
|
||||
private static final String PMID_PREFIX = "50|pmid________::";
|
||||
private static final String ARXIV_PREFIX = "50|arXiv_______::";
|
||||
|
||||
private static final String PMCID_PREFIX = "50|pmcid_______::";
|
||||
private static final String ROR_PREFIX = "20|ror_________::";
|
||||
private static final String PERSON_PREFIX = ModelSupport.getIdPrefix(Person.class) + "|orcid_______";
|
||||
public static final String ORCID_AUTHORS_CLASSID = "sysimport:crosswalk:orcid";
|
||||
public static final String ORCID_AUTHORS_CLASSNAME = "Imported from ORCID";
|
||||
|
||||
public static void main(final String[] args) throws IOException, ParseException {
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
Objects
|
||||
.requireNonNull(
|
||||
ExtractPerson.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/personentity/as_parameters.json"))));
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
final String inputPath = parser.get("inputPath");
|
||||
log.info("inputPath {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}", outputPath);
|
||||
|
||||
final String workingDir = parser.get("workingDir");
|
||||
log.info("workingDir {}", workingDir);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
|
||||
createActionSet(spark, inputPath, outputPath, workingDir);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
private static void createActionSet(SparkSession spark, String inputPath, String outputPath, String workingDir) {
|
||||
|
||||
Dataset<Author> authors = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Authors")
|
||||
.as(Encoders.bean(Author.class));
|
||||
|
||||
Dataset<Work> works = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Works")
|
||||
.as(Encoders.bean(Work.class))
|
||||
.filter(
|
||||
(FilterFunction<Work>) w -> Optional.ofNullable(w.getPids()).isPresent() &&
|
||||
w
|
||||
.getPids()
|
||||
.stream()
|
||||
.anyMatch(
|
||||
p -> p.getSchema().equalsIgnoreCase("doi") ||
|
||||
p.getSchema().equalsIgnoreCase("pmc") ||
|
||||
p.getSchema().equalsIgnoreCase("pmid") ||
|
||||
p.getSchema().equalsIgnoreCase("arxiv")));
|
||||
|
||||
Dataset<Employment> employmentDataset = spark
|
||||
.read()
|
||||
.parquet(inputPath + "Employments")
|
||||
.as(Encoders.bean(Employment.class));
|
||||
|
||||
Dataset<Author> peopleToMap = authors
|
||||
.joinWith(works, authors.col("orcid").equalTo(works.col("orcid")))
|
||||
.map((MapFunction<Tuple2<Author, Work>, Author>) t2 -> t2._1(), Encoders.bean(Author.class))
|
||||
.groupByKey((MapFunction<Author, String>) a -> a.getOrcid(), Encoders.STRING())
|
||||
.mapGroups((MapGroupsFunction<String, Author, Author>) (k, it) -> it.next(), Encoders.bean(Author.class));
|
||||
|
||||
Dataset<Employment> employment = employmentDataset
|
||||
.joinWith(peopleToMap, employmentDataset.col("orcid").equalTo(peopleToMap.col("orcid")))
|
||||
.map((MapFunction<Tuple2<Employment, Author>, Employment>) t2 -> t2._1(), Encoders.bean(Employment.class));
|
||||
|
||||
Dataset<Person> people;
|
||||
peopleToMap.map((MapFunction<Author, Person>) op -> {
|
||||
Person person = new Person();
|
||||
person.setId(DHPUtils.generateIdentifier(op.getOrcid(), PERSON_PREFIX));
|
||||
person
|
||||
.setBiography(
|
||||
Optional
|
||||
.ofNullable(op.getBiography())
|
||||
|
||||
.orElse(""));
|
||||
KeyValue kv = OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS);
|
||||
kv.setDataInfo(null);
|
||||
person.setCollectedfrom(Arrays.asList(kv));
|
||||
person
|
||||
.setAlternativeNames(
|
||||
Optional
|
||||
.ofNullable(op.getOtherNames())
|
||||
|
||||
.orElse(new ArrayList<>()));
|
||||
person
|
||||
.setFamilyName(
|
||||
Optional
|
||||
.ofNullable(op.getFamilyName())
|
||||
|
||||
.orElse(""));
|
||||
person
|
||||
.setGivenName(
|
||||
Optional
|
||||
.ofNullable(op.getGivenName())
|
||||
|
||||
.orElse(""));
|
||||
person
|
||||
.setPid(
|
||||
Optional
|
||||
.ofNullable(op.getOtherPids())
|
||||
.map(
|
||||
v -> v
|
||||
.stream()
|
||||
.map(
|
||||
p -> OafMapperUtils
|
||||
.structuredProperty(
|
||||
p.getValue(), p.getSchema(), p.getSchema(), ModelConstants.DNET_PID_TYPES,
|
||||
ModelConstants.DNET_PID_TYPES, null))
|
||||
.collect(Collectors.toList()))
|
||||
.orElse(new ArrayList<>()));
|
||||
person
|
||||
.getPid()
|
||||
.add(
|
||||
OafMapperUtils
|
||||
.structuredProperty(
|
||||
op.getOrcid(), ModelConstants.ORCID, ModelConstants.ORCID_CLASSNAME,
|
||||
ModelConstants.DNET_PID_TYPES, ModelConstants.DNET_PID_TYPES, null));
|
||||
person.setDateofcollection(op.getLastModifiedDate());
|
||||
person.setOriginalId(Arrays.asList(op.getOrcid()));
|
||||
return person;
|
||||
}, Encoders.bean(Person.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/people");
|
||||
|
||||
works
|
||||
.flatMap(
|
||||
(FlatMapFunction<Work, Relation>) ExtractPerson::getAuthorshipRelationIterator,
|
||||
Encoders.bean(Relation.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/authorship");
|
||||
|
||||
Dataset<Relation> coauthorship = works
|
||||
.flatMap((FlatMapFunction<Work, Tuple2<String, String>>) w -> {
|
||||
List<Tuple2<String, String>> lista = new ArrayList<>();
|
||||
w.getPids().stream().forEach(p -> {
|
||||
if (p.getSchema().equalsIgnoreCase("doi") || p.getSchema().equalsIgnoreCase("pmc")
|
||||
|| p.getSchema().equalsIgnoreCase("pmid") || p.getSchema().equalsIgnoreCase("arxiv"))
|
||||
lista.add(new Tuple2<>(p.getValue(), w.getOrcid()));
|
||||
});
|
||||
return lista.iterator();
|
||||
}, Encoders.tuple(Encoders.STRING(), Encoders.STRING()))
|
||||
.groupByKey((MapFunction<Tuple2<String, String>, String>) Tuple2::_1, Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Tuple2<String, String>, Coauthors>) (k, it) -> extractCoAuthors(it),
|
||||
Encoders.bean(Coauthors.class))
|
||||
.flatMap(
|
||||
(FlatMapFunction<Coauthors, Relation>) c -> new CoAuthorshipIterator(c.getCoauthors()),
|
||||
Encoders.bean(Relation.class))
|
||||
.groupByKey((MapFunction<Relation, String>) r -> r.getSource() + r.getTarget(), Encoders.STRING())
|
||||
.mapGroups(
|
||||
(MapGroupsFunction<String, Relation, Relation>) (k, it) -> it.next(), Encoders.bean(Relation.class));
|
||||
|
||||
coauthorship
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/coauthorship");
|
||||
|
||||
employment
|
||||
.filter((FilterFunction<Employment>) e -> Optional.ofNullable(e.getAffiliationId()).isPresent())
|
||||
.filter((FilterFunction<Employment>) e -> e.getAffiliationId().getSchema().equalsIgnoreCase("ror"))
|
||||
.map(
|
||||
(MapFunction<Employment, Relation>) ExtractPerson::getAffiliationRelation,
|
||||
Encoders.bean(Relation.class))
|
||||
.write()
|
||||
.option("compression", "gzip")
|
||||
.mode(SaveMode.Overwrite)
|
||||
.json(workingDir + "/affiliation");
|
||||
|
||||
people = spark
|
||||
.read()
|
||||
.textFile(workingDir + "/people")
|
||||
.map(
|
||||
(MapFunction<String, Person>) value -> OBJECT_MAPPER
|
||||
.readValue(value, Person.class),
|
||||
Encoders.bean(Person.class));
|
||||
|
||||
people.show(false);
|
||||
people
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/authorship").toJavaRDD().map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/coauthorship")
|
||||
.toJavaRDD()
|
||||
.map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.union(
|
||||
getRelations(spark, workingDir + "/affiliation")
|
||||
.toJavaRDD()
|
||||
.map(r -> new AtomicAction(r.getClass(), r)))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
}
|
||||
|
||||
private static Dataset<Relation> getRelations(SparkSession spark, String path) {
|
||||
return spark
|
||||
.read()
|
||||
.textFile(path)
|
||||
.map(
|
||||
(MapFunction<String, Relation>) value -> OBJECT_MAPPER
|
||||
.readValue(value, Relation.class),
|
||||
Encoders.bean(Relation.class));// spark.read().json(path).as(Encoders.bean(Relation.class));
|
||||
}
|
||||
|
||||
private static Coauthors extractCoAuthors(Iterator<Tuple2<String, String>> it) {
|
||||
Coauthors coauth = new Coauthors();
|
||||
List<String> coauthors = new ArrayList<>();
|
||||
while (it.hasNext())
|
||||
coauthors.add(it.next()._2());
|
||||
coauth.setCoauthors(coauthors);
|
||||
|
||||
return coauth;
|
||||
}
|
||||
|
||||
private static Relation getAffiliationRelation(Employment row) {
|
||||
String source = PERSON_PREFIX + IdentifierFactory.md5(row.getOrcid());
|
||||
String target = ROR_PREFIX
|
||||
+ IdentifierFactory.md5(PidCleaner.normalizePidValue("ROR", row.getAffiliationId().getValue()));
|
||||
List<KeyValue> properties = new ArrayList<>();
|
||||
|
||||
Relation relation = OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.ORG_PERSON_RELTYPE, ModelConstants.ORG_PERSON_SUBRELTYPE,
|
||||
ModelConstants.ORG_PERSON_PARTICIPATES,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME, ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null);
|
||||
|
||||
if (Optional.ofNullable(row.getStartDate()).isPresent() && StringUtil.isNotBlank(row.getStartDate())) {
|
||||
KeyValue kv = new KeyValue();
|
||||
kv.setKey("startDate");
|
||||
kv.setValue(row.getStartDate());
|
||||
properties.add(kv);
|
||||
}
|
||||
if (Optional.ofNullable(row.getEndDate()).isPresent() && StringUtil.isNotBlank(row.getEndDate())) {
|
||||
KeyValue kv = new KeyValue();
|
||||
kv.setKey("endDate");
|
||||
kv.setValue(row.getEndDate());
|
||||
properties.add(kv);
|
||||
}
|
||||
|
||||
if (properties.size() > 0)
|
||||
relation.setProperties(properties);
|
||||
return relation;
|
||||
|
||||
}
|
||||
|
||||
private static Collection<? extends Relation> getCoAuthorshipRelations(String orcid1, String orcid2) {
|
||||
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid1);
|
||||
String target = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid2);
|
||||
|
||||
return Arrays
|
||||
.asList(
|
||||
OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.PERSON_PERSON_RELTYPE,
|
||||
ModelConstants.PERSON_PERSON_SUBRELTYPE,
|
||||
ModelConstants.PERSON_PERSON_HASCOAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null),
|
||||
OafMapperUtils
|
||||
.getRelation(
|
||||
target, source, ModelConstants.PERSON_PERSON_RELTYPE,
|
||||
ModelConstants.PERSON_PERSON_SUBRELTYPE,
|
||||
ModelConstants.PERSON_PERSON_HASCOAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null));
|
||||
|
||||
}
|
||||
|
||||
private static @NotNull Iterator<Relation> getAuthorshipRelationIterator(Work w) {
|
||||
|
||||
if (Optional.ofNullable(w.getPids()).isPresent())
|
||||
return w
|
||||
.getPids()
|
||||
.stream()
|
||||
.map(pid -> getRelation(w.getOrcid(), pid))
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toList())
|
||||
.iterator();
|
||||
List<Relation> ret = new ArrayList<>();
|
||||
return ret.iterator();
|
||||
}
|
||||
|
||||
private static Relation getRelation(String orcid, eu.dnetlib.dhp.collection.orcid.model.Pid pid) {
|
||||
String target;
|
||||
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid);
|
||||
switch (pid.getSchema()) {
|
||||
case "doi":
|
||||
target = DOI_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), pid.getValue()));
|
||||
break;
|
||||
case "pmid":
|
||||
target = PMID_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), pid.getValue()));
|
||||
break;
|
||||
case "arxiv":
|
||||
target = ARXIV_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.arXiv.toString(), pid.getValue()));
|
||||
break;
|
||||
case "pmcid":
|
||||
target = PMCID_PREFIX
|
||||
+ IdentifierFactory
|
||||
.md5(PidCleaner.normalizePidValue(PidType.pmc.toString(), pid.getValue()));
|
||||
break;
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
return OafMapperUtils
|
||||
.getRelation(
|
||||
source, target, ModelConstants.RESULT_PERSON_RELTYPE,
|
||||
ModelConstants.RESULT_PERSON_SUBRELTYPE,
|
||||
ModelConstants.RESULT_PERSON_HASAUTHORED,
|
||||
Arrays.asList(OafMapperUtils.keyValue(orcidKey, ModelConstants.ORCID_DS)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
OafMapperUtils
|
||||
.qualifier(
|
||||
ORCID_AUTHORS_CLASSID, ORCID_AUTHORS_CLASSNAME, ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||
"0.91"),
|
||||
null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.personentity;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
|
||||
import eu.dnetlib.dhp.collection.orcid.model.Work;
|
||||
|
||||
public class WorkList implements Serializable {
|
||||
private ArrayList<Work> workArrayList;
|
||||
|
||||
public ArrayList<Work> getWorkArrayList() {
|
||||
return workArrayList;
|
||||
}
|
||||
|
||||
public void setWorkArrayList(ArrayList<Work> workArrayList) {
|
||||
this.workArrayList = workArrayList;
|
||||
}
|
||||
|
||||
public WorkList() {
|
||||
workArrayList = new ArrayList<>();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.sdgnodoi;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.fs.Hdfs;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.Encoders;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class CreateActionSetSparkJob implements Serializable {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(final String[] args) throws IOException, ParseException {
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
IOUtils
|
||||
.toString(
|
||||
Objects
|
||||
.requireNonNull(
|
||||
CreateActionSetSparkJob.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/fosnodoi/as_parameters.json"))));
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
final String inputPath = parser.get("sourcePath");
|
||||
log.info("inputPath {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath {}", outputPath);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
|
||||
createActionSet(spark, inputPath, outputPath);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
private static void createActionSet(SparkSession spark, String inputPath, String outputPath) {
|
||||
spark
|
||||
.read()
|
||||
.textFile(inputPath)
|
||||
.map(
|
||||
(MapFunction<String, Result>) value -> OBJECT_MAPPER.readValue(value, Result.class),
|
||||
Encoders.bean(Result.class))
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(
|
||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
}
|
||||
|
||||
}
|
|
@ -5,13 +5,13 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
@ -20,6 +20,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.Constants;
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
|
@ -43,8 +44,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
private static final String PMID_PREFIX = "50|pmid________::";
|
||||
|
||||
private static final String PMCID_PREFIX = "50|pmc_________::";
|
||||
private static final String WEB_CRAWL_ID = "10|openaire____::fb98a192f6a055ba495ef414c330834b";
|
||||
private static final String WEB_CRAWL_NAME = "Web Crawl";
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -70,6 +70,9 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
final String blackListInputPath = parser.get("blackListPath");
|
||||
log.info("blackListInputPath: {}", blackListInputPath);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
|
||||
runWithSparkSession(
|
||||
|
@ -77,35 +80,39 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
|
||||
createActionSet(spark, inputPath, outputPath);
|
||||
createActionSet(spark, inputPath, outputPath, blackListInputPath);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
public static void createActionSet(SparkSession spark, String inputPath,
|
||||
String outputPath) {
|
||||
String outputPath, String blackListInputPath) {
|
||||
|
||||
final Dataset<Row> dataset = readWebCrawl(spark, inputPath)
|
||||
.filter("publication_year <= 2020 or country_code=='IE'")
|
||||
.filter("country_code=='IE'")
|
||||
.drop("publication_year");
|
||||
|
||||
dataset.flatMap((FlatMapFunction<Row, Relation>) row -> {
|
||||
List<Relation> ret = new ArrayList<>();
|
||||
final String ror = ROR_PREFIX
|
||||
+ IdentifierFactory.md5(PidCleaner.normalizePidValue("ROR", row.getAs("ror")));
|
||||
ret.addAll(createAffiliationRelationPairDOI(row.getAs("doi"), ror));
|
||||
ret.addAll(createAffiliationRelationPairPMID(row.getAs("pmid"), ror));
|
||||
ret.addAll(createAffiliationRelationPairPMCID(row.getAs("pmcid"), ror));
|
||||
final Dataset<Row> blackList = readBlackList(spark, blackListInputPath);
|
||||
|
||||
return ret
|
||||
.iterator();
|
||||
}, Encoders.bean(Relation.class))
|
||||
dataset
|
||||
.join(blackList, dataset.col("id").equalTo(blackList.col("OpenAlexId")), "left")
|
||||
.filter((FilterFunction<Row>) r -> r.getAs("OpenAlexId") == null)
|
||||
.drop("OpenAlexId")
|
||||
.flatMap((FlatMapFunction<Row, Relation>) row -> {
|
||||
List<Relation> ret = new ArrayList<>();
|
||||
final String ror = ROR_PREFIX
|
||||
+ IdentifierFactory.md5(PidCleaner.normalizePidValue("ROR", row.getAs("ror")));
|
||||
ret.addAll(createAffiliationRelationPairDOI(row.getAs("doi"), ror));
|
||||
|
||||
return ret
|
||||
.iterator();
|
||||
}, Encoders.bean(Relation.class))
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
|
||||
}
|
||||
|
||||
|
@ -129,13 +136,22 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
"institution", functions
|
||||
.explode(
|
||||
functions.col("institutions")))
|
||||
|
||||
.selectExpr(
|
||||
"id", "doi", "ids.pmcid as pmcid", "ids.pmid as pmid", "institution.ror as ror",
|
||||
"id", "doi", "institution.ror as ror",
|
||||
"institution.country_code as country_code", "publication_year")
|
||||
.distinct();
|
||||
|
||||
}
|
||||
|
||||
private static Dataset<Row> readBlackList(SparkSession spark, String inputPath) {
|
||||
|
||||
return spark
|
||||
.read()
|
||||
.json(inputPath)
|
||||
.select("OpenAlexId");
|
||||
}
|
||||
|
||||
private static List<Relation> createAffiliationRelationPairPMCID(String pmcid, String ror) {
|
||||
if (pmcid == null)
|
||||
return new ArrayList<>();
|
||||
|
@ -195,7 +211,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
ModelConstants.IS_AUTHOR_INSTITUTION_OF,
|
||||
Arrays
|
||||
.asList(
|
||||
OafMapperUtils.keyValue(WEB_CRAWL_ID, WEB_CRAWL_NAME)),
|
||||
OafMapperUtils.keyValue(Constants.WEB_CRAWL_ID, Constants.WEB_CRAWL_NAME)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
|
@ -214,7 +230,7 @@ public class CreateActionSetFromWebEntries implements Serializable {
|
|||
ModelConstants.HAS_AUTHOR_INSTITUTION,
|
||||
Arrays
|
||||
.asList(
|
||||
OafMapperUtils.keyValue(WEB_CRAWL_ID, WEB_CRAWL_NAME)),
|
||||
OafMapperUtils.keyValue(Constants.WEB_CRAWL_ID, Constants.WEB_CRAWL_NAME)),
|
||||
OafMapperUtils
|
||||
.dataInfo(
|
||||
false, null, false, false,
|
||||
|
|
|
@ -0,0 +1,158 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.webcrawl;
|
||||
|
||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.Serializable;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.io.filefilter.DirectoryFileFilter;
|
||||
import org.apache.commons.io.filefilter.FileFileFilter;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||
import org.apache.hadoop.io.compress.GzipCodec;
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaPairRDD;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.api.java.function.FilterFunction;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.types.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import scala.Tuple2;
|
||||
|
||||
public class RemoveRelationFromActionSet
|
||||
implements Serializable {
|
||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetFromWebEntries.class);
|
||||
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
private static final StructType KV_SCHEMA = StructType$.MODULE$
|
||||
.apply(
|
||||
Arrays
|
||||
.asList(
|
||||
StructField$.MODULE$.apply("key", DataTypes.StringType, false, Metadata.empty()),
|
||||
StructField$.MODULE$.apply("value", DataTypes.StringType, false, Metadata.empty())));
|
||||
|
||||
private static final StructType ATOMIC_ACTION_SCHEMA = StructType$.MODULE$
|
||||
.apply(
|
||||
Arrays
|
||||
.asList(
|
||||
StructField$.MODULE$.apply("clazz", DataTypes.StringType, false, Metadata.empty()),
|
||||
StructField$.MODULE$
|
||||
.apply(
|
||||
"payload", DataTypes.StringType, false, Metadata.empty())));
|
||||
|
||||
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String jsonConfiguration = IOUtils
|
||||
.toString(
|
||||
CreateActionSetFromWebEntries.class
|
||||
.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/as_parameters.json"));
|
||||
|
||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
||||
parser.parseArgument(args);
|
||||
|
||||
Boolean isSparkSessionManaged = Optional
|
||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||
.map(Boolean::valueOf)
|
||||
.orElse(Boolean.TRUE);
|
||||
|
||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||
|
||||
// the actionSet path
|
||||
final String inputPath = parser.get("sourcePath");
|
||||
log.info("inputPath: {}", inputPath);
|
||||
|
||||
final String outputPath = parser.get("outputPath");
|
||||
log.info("outputPath: {}", outputPath);
|
||||
|
||||
final String blackListInputPath = parser.get("blackListPath");
|
||||
log.info("blackListInputPath: {}", blackListInputPath);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
|
||||
runWithSparkSession(
|
||||
conf,
|
||||
isSparkSessionManaged,
|
||||
spark -> {
|
||||
|
||||
removeFromActionSet(spark, inputPath, outputPath, blackListInputPath);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private static void removeFromActionSet(SparkSession spark, String inputPath, String outputPath,
|
||||
String blackListInputPath) {
|
||||
// read the blacklist
|
||||
Dataset<String> blackList = readBlackList(spark, blackListInputPath)
|
||||
.map(
|
||||
(MapFunction<Row, String>) r -> IdentifierFactory
|
||||
.idFromPid("50", "doi", ((String) r.getAs("doi")).substring(16), true),
|
||||
Encoders.STRING());
|
||||
|
||||
// read the old actionset and get the relations in the payload
|
||||
JavaPairRDD<Text, Text> seq = JavaSparkContext
|
||||
.fromSparkContext(spark.sparkContext())
|
||||
.sequenceFile(inputPath, Text.class, Text.class);
|
||||
|
||||
JavaRDD<Row> rdd = seq
|
||||
.map(x -> RowFactory.create(x._1().toString(), x._2().toString()));
|
||||
|
||||
Dataset<Row> actionSet = spark
|
||||
.createDataFrame(rdd, KV_SCHEMA)
|
||||
.withColumn("atomic_action", from_json(col("value"), ATOMIC_ACTION_SCHEMA))
|
||||
.select(expr("atomic_action.*"));
|
||||
|
||||
Dataset<Relation> relation = actionSet
|
||||
.map(
|
||||
(MapFunction<Row, Relation>) r -> MAPPER.readValue((String) r.getAs("payload"), Relation.class),
|
||||
Encoders.bean(Relation.class));
|
||||
|
||||
// select only the relation not matching any pid in the blacklist as source for the relation
|
||||
Dataset<Relation> relNoSource = relation
|
||||
.joinWith(blackList, relation.col("source").equalTo(blackList.col("value")), "left")
|
||||
.filter((FilterFunction<Tuple2<Relation, String>>) t2 -> t2._2() == null)
|
||||
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class));
|
||||
|
||||
// select only the relation not matching any pid in the blacklist as target of the relation
|
||||
relNoSource
|
||||
.joinWith(blackList, relNoSource.col("target").equalTo(blackList.col("value")), "left")
|
||||
.filter((FilterFunction<Tuple2<Relation, String>>) t2 -> t2._2() == null)
|
||||
.map((MapFunction<Tuple2<Relation, String>, Relation>) t2 -> t2._1(), Encoders.bean(Relation.class))
|
||||
.toJavaRDD()
|
||||
.map(p -> new AtomicAction(p.getClass(), p))
|
||||
.mapToPair(
|
||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, BZip2Codec.class);
|
||||
;
|
||||
|
||||
}
|
||||
|
||||
private static Dataset<Row> readBlackList(SparkSession spark, String inputPath) {
|
||||
|
||||
return spark
|
||||
.read()
|
||||
.json(inputPath)
|
||||
.select("doi");
|
||||
}
|
||||
|
||||
}
|
|
@ -20,6 +20,9 @@ public class Author extends ORCIDItem {
|
|||
|
||||
private String lastModifiedDate;
|
||||
|
||||
public Author() {
|
||||
}
|
||||
|
||||
public String getBiography() {
|
||||
return biography;
|
||||
}
|
||||
|
|
|
@ -11,4 +11,7 @@ public class ORCIDItem {
|
|||
public void setOrcid(String orcid) {
|
||||
this.orcid = orcid;
|
||||
}
|
||||
|
||||
public ORCIDItem() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,4 +32,6 @@ public class Work extends ORCIDItem {
|
|||
pids.add(pid);
|
||||
}
|
||||
|
||||
public Work() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.researchfi;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.entity.UrlEncodedFormEntity;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.message.BasicNameValuePair;
|
||||
import org.json.JSONObject;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class ResearchFiCollectorPlugin implements CollectorPlugin {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ResearchFiCollectorPlugin.class);
|
||||
|
||||
@Override
|
||||
public Stream<String> collect(final ApiDescriptor api, final AggregatorReport report)
|
||||
throws CollectorException {
|
||||
|
||||
final String authUrl = api.getParams().get("auth_url");
|
||||
final String clientId = api.getParams().get("auth_client_id");
|
||||
final String clientSecret = api.getParams().get("auth_client_secret");
|
||||
|
||||
final String authToken = authenticate(authUrl, clientId, clientSecret);
|
||||
|
||||
final Iterator<String> iter = new ResearchFiIterator(api.getBaseUrl(), authToken);
|
||||
|
||||
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iter, Spliterator.ORDERED), false);
|
||||
}
|
||||
|
||||
private String authenticate(final String authUrl, final String clientId, final String clientSecret)
|
||||
throws CollectorException {
|
||||
try (final CloseableHttpClient client = HttpClients.createDefault()) {
|
||||
final HttpPost req = new HttpPost(authUrl);
|
||||
final List<NameValuePair> params = new ArrayList<>();
|
||||
params.add(new BasicNameValuePair("grant_type", "client_credentials"));
|
||||
params.add(new BasicNameValuePair("client_id", clientId));
|
||||
params.add(new BasicNameValuePair("client_secret", clientSecret));
|
||||
|
||||
req.setEntity(new UrlEncodedFormEntity(params, "UTF-8"));
|
||||
|
||||
try (final CloseableHttpResponse response = client.execute(req)) {
|
||||
final String content = IOUtils.toString(response.getEntity().getContent());
|
||||
final JSONObject obj = new JSONObject(content);
|
||||
final String token = obj.getString("access_token");
|
||||
if (StringUtils.isNotBlank(token)) {
|
||||
return token;
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
log.warn("Error obtaining access token", e);
|
||||
throw new CollectorException("Error obtaining access token", e);
|
||||
}
|
||||
throw new CollectorException("Access token is missing");
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.researchfi;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.json.JSONArray;
|
||||
|
||||
import eu.dnetlib.dhp.collection.plugin.utils.JsonUtils;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class ResearchFiIterator implements Iterator<String> {
|
||||
|
||||
private static final Log log = LogFactory.getLog(ResearchFiIterator.class);
|
||||
|
||||
private static final int PAGE_SIZE = 100;
|
||||
|
||||
private final String baseUrl;
|
||||
private final String authToken;
|
||||
private int currPage;
|
||||
private int nPages;
|
||||
|
||||
private final Queue<String> queue = new PriorityBlockingQueue<>();
|
||||
|
||||
public ResearchFiIterator(final String baseUrl, final String authToken) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.authToken = authToken;
|
||||
this.currPage = 0;
|
||||
this.nPages = 0;
|
||||
}
|
||||
|
||||
private void verifyStarted() {
|
||||
if (this.currPage == 0) {
|
||||
try {
|
||||
nextCall();
|
||||
} catch (final CollectorException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
synchronized (this.queue) {
|
||||
verifyStarted();
|
||||
return !this.queue.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (this.queue) {
|
||||
verifyStarted();
|
||||
final String res = this.queue.poll();
|
||||
while (this.queue.isEmpty() && (this.currPage < this.nPages)) {
|
||||
try {
|
||||
nextCall();
|
||||
} catch (final CollectorException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
private void nextCall() throws CollectorException {
|
||||
|
||||
this.currPage += 1;
|
||||
|
||||
final String url;
|
||||
if (!this.baseUrl.contains("?")) {
|
||||
url = String.format("%s?PageNumber=%d&PageSize=%d", this.baseUrl, this.currPage, PAGE_SIZE);
|
||||
} else if (!this.baseUrl.contains("PageSize=")) {
|
||||
url = String.format("%s&PageNumber=%d&PageSize=%d", this.baseUrl, this.currPage, PAGE_SIZE);
|
||||
} else {
|
||||
url = String.format("%s&PageNumber=%d", this.baseUrl, this.currPage);
|
||||
}
|
||||
log.info("Calling url: " + url);
|
||||
|
||||
try (final CloseableHttpClient client = HttpClients.createDefault()) {
|
||||
|
||||
final HttpGet req = new HttpGet(url);
|
||||
req.addHeader("Authorization", "Bearer " + this.authToken);
|
||||
try (final CloseableHttpResponse response = client.execute(req)) {
|
||||
for (final Header header : response.getAllHeaders()) {
|
||||
log.debug("HEADER: " + header.getName() + " = " + header.getValue());
|
||||
if ("x-page-count".equals(header.getName())) {
|
||||
final int totalPages = NumberUtils.toInt(header.getValue());
|
||||
if (this.nPages != totalPages) {
|
||||
this.nPages = NumberUtils.toInt(header.getValue());
|
||||
log.info("Total pages: " + totalPages);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final String content = IOUtils.toString(response.getEntity().getContent());
|
||||
final JSONArray jsonArray = new JSONArray(content);
|
||||
|
||||
jsonArray.forEach(obj -> this.queue.add(JsonUtils.convertToXML(obj.toString())));
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
log.warn("Error calling url: " + url, e);
|
||||
throw new CollectorException("Error calling url: " + url, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.rest;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
|
@ -9,6 +10,8 @@ import java.util.stream.StreamSupport;
|
|||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.collection.plugin.CollectorPlugin;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
|
@ -47,6 +50,9 @@ public class RestCollectorPlugin implements CollectorPlugin {
|
|||
final String entityXpath = api.getParams().get("entityXpath");
|
||||
final String authMethod = api.getParams().get("authMethod");
|
||||
final String authToken = api.getParams().get("authToken");
|
||||
final String requestHeaderMap = api.getParams().get("requestHeaderMap");
|
||||
Gson gson = new Gson();
|
||||
Map requestHeaders = gson.fromJson(requestHeaderMap, Map.class);
|
||||
final String resultSizeValue = Optional
|
||||
.ofNullable(api.getParams().get("resultSizeValue"))
|
||||
.filter(StringUtils::isNotBlank)
|
||||
|
@ -64,9 +70,6 @@ public class RestCollectorPlugin implements CollectorPlugin {
|
|||
if (StringUtils.isBlank(resultFormatValue)) {
|
||||
throw new CollectorException("Param 'resultFormatValue' is null or empty");
|
||||
}
|
||||
if (StringUtils.isBlank(queryParams)) {
|
||||
throw new CollectorException("Param 'queryParams' is null or empty");
|
||||
}
|
||||
if (StringUtils.isBlank(entityXpath)) {
|
||||
throw new CollectorException("Param 'entityXpath' is null or empty");
|
||||
}
|
||||
|
@ -92,7 +95,8 @@ public class RestCollectorPlugin implements CollectorPlugin {
|
|||
entityXpath,
|
||||
authMethod,
|
||||
authToken,
|
||||
resultOutputFormat);
|
||||
resultOutputFormat,
|
||||
requestHeaders);
|
||||
|
||||
return StreamSupport
|
||||
.stream(
|
||||
|
|
|
@ -9,8 +9,11 @@ import java.net.URL;
|
|||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import javax.xml.transform.OutputKeys;
|
||||
import javax.xml.transform.Transformer;
|
||||
|
@ -22,20 +25,20 @@ import javax.xml.xpath.*;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.InputSource;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import eu.dnetlib.dhp.collection.plugin.utils.JsonUtils;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||
|
||||
/**
|
||||
* log.info(...) equal to log.trace(...) in the application-logs
|
||||
* log.info(...) equal to log.trace(...) in the application-logs
|
||||
* <p>
|
||||
* known bug: at resumptionType 'discover' if the (resultTotal % resultSizeValue) == 0 the collecting fails -> change the resultSizeValue
|
||||
*
|
||||
|
@ -44,24 +47,29 @@ import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
|||
*
|
||||
*/
|
||||
public class RestIterator implements Iterator<String> {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RestIterator.class);
|
||||
public static final String UTF_8 = "UTF-8";
|
||||
private static final int MAX_ATTEMPTS = 5;
|
||||
|
||||
private final HttpClientParams clientParams;
|
||||
|
||||
private final String BASIC = "basic";
|
||||
private final String AUTHBASIC = "basic";
|
||||
|
||||
private static final String XML_HEADER = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||
private static final String EMPTY_XML = XML_HEADER + "<" + JsonUtils.XML_WRAP_TAG + "></" + JsonUtils.XML_WRAP_TAG
|
||||
+ ">";
|
||||
|
||||
private final String baseUrl;
|
||||
private final String resumptionType;
|
||||
private final String resumptionParam;
|
||||
private final String resultFormatValue;
|
||||
private String queryParams;
|
||||
private String queryParams = "";
|
||||
private final int resultSizeValue;
|
||||
private int resumptionInt = 0; // integer resumption token (first record to harvest)
|
||||
private int resultTotal = -1;
|
||||
private String resumptionStr = Integer.toString(resumptionInt); // string resumption token (first record to harvest
|
||||
// or token scanned from results)
|
||||
private String resumptionStr = Integer.toString(this.resumptionInt); // string resumption token (first record to
|
||||
// harvest
|
||||
// or token scanned from results)
|
||||
private InputStream resultStream;
|
||||
private Transformer transformer;
|
||||
private XPath xpath;
|
||||
|
@ -73,7 +81,7 @@ public class RestIterator implements Iterator<String> {
|
|||
private final String querySize;
|
||||
private final String authMethod;
|
||||
private final String authToken;
|
||||
private final Queue<String> recordQueue = new PriorityBlockingQueue<String>();
|
||||
private final Queue<String> recordQueue = new PriorityBlockingQueue<>();
|
||||
private int discoverResultSize = 0;
|
||||
private int pagination = 1;
|
||||
/*
|
||||
|
@ -83,8 +91,13 @@ public class RestIterator implements Iterator<String> {
|
|||
*/
|
||||
private final String resultOutputFormat;
|
||||
|
||||
/** RestIterator class
|
||||
* compatible to version 1.3.33
|
||||
/*
|
||||
* Can be used to set additional request headers, like for content negotiation
|
||||
*/
|
||||
private Map<String, String> requestHeaders;
|
||||
|
||||
/**
|
||||
* RestIterator class compatible to version 1.3.33
|
||||
*/
|
||||
public RestIterator(
|
||||
final HttpClientParams clientParams,
|
||||
|
@ -101,47 +114,56 @@ public class RestIterator implements Iterator<String> {
|
|||
final String entityXpath,
|
||||
final String authMethod,
|
||||
final String authToken,
|
||||
final String resultOutputFormat) {
|
||||
final String resultOutputFormat,
|
||||
final Map<String, String> requestHeaders) {
|
||||
|
||||
this.clientParams = clientParams;
|
||||
this.baseUrl = baseUrl;
|
||||
this.resumptionType = resumptionType;
|
||||
this.resumptionParam = resumptionParam;
|
||||
this.resultFormatValue = resultFormatValue;
|
||||
this.resultSizeValue = Integer.valueOf(resultSizeValueStr);
|
||||
this.resultSizeValue = Integer.parseInt(resultSizeValueStr);
|
||||
this.queryParams = queryParams;
|
||||
this.authMethod = authMethod;
|
||||
this.authToken = authToken;
|
||||
this.resultOutputFormat = resultOutputFormat;
|
||||
this.requestHeaders = requestHeaders != null ? requestHeaders : Maps.newHashMap();
|
||||
|
||||
queryFormat = StringUtils.isNotBlank(resultFormatParam) ? "&" + resultFormatParam + "=" + resultFormatValue
|
||||
this.queryFormat = StringUtils.isNotBlank(resultFormatParam) ? "&" + resultFormatParam + "=" + resultFormatValue
|
||||
: "";
|
||||
this.querySize = StringUtils.isNotBlank(resultSizeParam) ? "&" + resultSizeParam + "=" + resultSizeValueStr
|
||||
: "";
|
||||
querySize = StringUtils.isNotBlank(resultSizeParam) ? "&" + resultSizeParam + "=" + resultSizeValueStr : "";
|
||||
|
||||
try {
|
||||
initXmlTransformation(resultTotalXpath, resumptionXpath, entityXpath);
|
||||
} catch (Exception e) {
|
||||
} catch (final Exception e) {
|
||||
throw new IllegalStateException("xml transformation init failed: " + e.getMessage());
|
||||
}
|
||||
|
||||
initQueue();
|
||||
}
|
||||
|
||||
private void initXmlTransformation(String resultTotalXpath, String resumptionXpath, String entityXpath)
|
||||
private void initXmlTransformation(final String resultTotalXpath, final String resumptionXpath,
|
||||
final String entityXpath)
|
||||
throws TransformerConfigurationException, XPathExpressionException {
|
||||
final TransformerFactory factory = TransformerFactory.newInstance();
|
||||
transformer = factory.newTransformer();
|
||||
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
|
||||
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "3");
|
||||
xpath = XPathFactory.newInstance().newXPath();
|
||||
xprResultTotalPath = xpath.compile(resultTotalXpath);
|
||||
xprResumptionPath = xpath.compile(StringUtils.isBlank(resumptionXpath) ? "/" : resumptionXpath);
|
||||
xprEntity = xpath.compile(entityXpath);
|
||||
this.transformer = factory.newTransformer();
|
||||
this.transformer.setOutputProperty(OutputKeys.INDENT, "yes");
|
||||
this.transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "3");
|
||||
this.xpath = XPathFactory.newInstance().newXPath();
|
||||
this.xprResultTotalPath = this.xpath.compile(resultTotalXpath);
|
||||
this.xprResumptionPath = this.xpath.compile(StringUtils.isBlank(resumptionXpath) ? "/" : resumptionXpath);
|
||||
this.xprEntity = this.xpath.compile(entityXpath);
|
||||
}
|
||||
|
||||
private void initQueue() {
|
||||
query = baseUrl + "?" + queryParams + querySize + queryFormat;
|
||||
log.info("REST calls starting with {}", query);
|
||||
if (queryParams.equals("") && querySize.equals("") && queryFormat.equals("")) {
|
||||
query = baseUrl;
|
||||
} else {
|
||||
query = baseUrl + "?" + queryParams + querySize + queryFormat;
|
||||
}
|
||||
|
||||
log.info("REST calls starting with {}", this.query);
|
||||
}
|
||||
|
||||
private void disconnect() {
|
||||
|
@ -154,11 +176,22 @@ public class RestIterator implements Iterator<String> {
|
|||
*/
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
if (recordQueue.isEmpty() && query.isEmpty()) {
|
||||
synchronized (this.recordQueue) {
|
||||
while (this.recordQueue.isEmpty() && !this.query.isEmpty()) {
|
||||
try {
|
||||
this.query = downloadPage(this.query, 0);
|
||||
} catch (final CollectorException e) {
|
||||
log.debug("CollectorPlugin.next()-Exception: {}", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.recordQueue.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
disconnect();
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,214 +201,248 @@ public class RestIterator implements Iterator<String> {
|
|||
*/
|
||||
@Override
|
||||
public String next() {
|
||||
synchronized (recordQueue) {
|
||||
while (recordQueue.isEmpty() && !query.isEmpty()) {
|
||||
try {
|
||||
query = downloadPage(query);
|
||||
} catch (CollectorException e) {
|
||||
log.debug("CollectorPlugin.next()-Exception: {}", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
return recordQueue.poll();
|
||||
synchronized (this.recordQueue) {
|
||||
return this.recordQueue.poll();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* download page and return nextQuery
|
||||
* download page and return nextQuery (with number of attempt)
|
||||
*/
|
||||
private String downloadPage(String query) throws CollectorException {
|
||||
String resultJson;
|
||||
String resultXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||
String nextQuery = "";
|
||||
String emptyXml = resultXml + "<" + JsonUtils.XML_WRAP_TAG + "></" + JsonUtils.XML_WRAP_TAG + ">";
|
||||
Node resultNode = null;
|
||||
NodeList nodeList = null;
|
||||
String qUrlArgument = "";
|
||||
int urlOldResumptionSize = 0;
|
||||
InputStream theHttpInputStream;
|
||||
private String downloadPage(String query, final int attempt) throws CollectorException {
|
||||
|
||||
// check if cursor=* is initial set otherwise add it to the queryParam URL
|
||||
if (resumptionType.equalsIgnoreCase("deep-cursor")) {
|
||||
log.debug("check resumptionType deep-cursor and check cursor=*?{}", query);
|
||||
if (!query.contains("&cursor=")) {
|
||||
query += "&cursor=*";
|
||||
if (attempt > MAX_ATTEMPTS) {
|
||||
throw new CollectorException("Max Number of attempts reached, query:" + query);
|
||||
}
|
||||
|
||||
if (attempt > 0) {
|
||||
final int delay = (attempt * 5000);
|
||||
log.debug("Attempt {} with delay {}", attempt, delay);
|
||||
try {
|
||||
Thread.sleep(delay);
|
||||
} catch (final InterruptedException e) {
|
||||
new CollectorException(e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("requestig URL [{}]", query);
|
||||
String resultJson;
|
||||
String resultXml = XML_HEADER;
|
||||
String nextQuery = "";
|
||||
Node resultNode = null;
|
||||
NodeList nodeList = null;
|
||||
String qUrlArgument = "";
|
||||
int urlOldResumptionSize = 0;
|
||||
InputStream theHttpInputStream;
|
||||
|
||||
URL qUrl = new URL(query);
|
||||
log.debug("authMethod: {}", authMethod);
|
||||
if ("bearer".equalsIgnoreCase(this.authMethod)) {
|
||||
log.trace("authMethod before inputStream: {}", resultXml);
|
||||
HttpURLConnection conn = (HttpURLConnection) qUrl.openConnection();
|
||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + authToken);
|
||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType());
|
||||
conn.setRequestMethod("GET");
|
||||
theHttpInputStream = conn.getInputStream();
|
||||
} else if (BASIC.equalsIgnoreCase(this.authMethod)) {
|
||||
log.trace("authMethod before inputStream: {}", resultXml);
|
||||
HttpURLConnection conn = (HttpURLConnection) qUrl.openConnection();
|
||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Basic " + authToken);
|
||||
conn.setRequestProperty(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
|
||||
conn.setRequestMethod("GET");
|
||||
theHttpInputStream = conn.getInputStream();
|
||||
} else {
|
||||
theHttpInputStream = qUrl.openStream();
|
||||
}
|
||||
|
||||
resultStream = theHttpInputStream;
|
||||
if ("json".equals(resultOutputFormat)) {
|
||||
resultJson = IOUtils.toString(resultStream, StandardCharsets.UTF_8);
|
||||
resultXml = JsonUtils.convertToXML(resultJson);
|
||||
resultStream = IOUtils.toInputStream(resultXml, UTF_8);
|
||||
}
|
||||
|
||||
if (!(emptyXml).equalsIgnoreCase(resultXml)) {
|
||||
resultNode = (Node) xpath.evaluate("/", new InputSource(resultStream), XPathConstants.NODE);
|
||||
nodeList = (NodeList) xprEntity.evaluate(resultNode, XPathConstants.NODESET);
|
||||
log.debug("nodeList.length: {}", nodeList.getLength());
|
||||
for (int i = 0; i < nodeList.getLength(); i++) {
|
||||
StringWriter sw = new StringWriter();
|
||||
transformer.transform(new DOMSource(nodeList.item(i)), new StreamResult(sw));
|
||||
String toEnqueue = sw.toString();
|
||||
if (toEnqueue == null || StringUtils.isBlank(toEnqueue) || emptyXml.equalsIgnoreCase(toEnqueue)) {
|
||||
log.warn("The following record resulted in empty item for the feeding queue: {}", resultXml);
|
||||
} else {
|
||||
recordQueue.add(sw.toString());
|
||||
}
|
||||
// check if cursor=* is initial set otherwise add it to the queryParam URL
|
||||
if ("deep-cursor".equalsIgnoreCase(this.resumptionType)) {
|
||||
log.debug("check resumptionType deep-cursor and check cursor=*?{}", query);
|
||||
if (!query.contains("&cursor=")) {
|
||||
query += "&cursor=*";
|
||||
}
|
||||
} else {
|
||||
log.warn("resultXml is equal with emptyXml");
|
||||
}
|
||||
|
||||
resumptionInt += resultSizeValue;
|
||||
// find pagination page start number in queryParam and remove before start the first query
|
||||
if ((resumptionType.toLowerCase().equals("pagination") || resumptionType.toLowerCase().equals("page"))
|
||||
&& (query.contains("paginationStart="))) {
|
||||
|
||||
switch (resumptionType.toLowerCase()) {
|
||||
case "scan": // read of resumptionToken , evaluate next results, e.g. OAI, iterate over items
|
||||
resumptionStr = xprResumptionPath.evaluate(resultNode);
|
||||
break;
|
||||
final Matcher m = Pattern.compile("paginationStart=([0-9]+)").matcher(query);
|
||||
m.find(); // guaranteed to be true for this regex
|
||||
|
||||
case "count": // begin at one step for all records, iterate over items
|
||||
resumptionStr = Integer.toString(resumptionInt);
|
||||
break;
|
||||
String[] pageVal = m.group(0).split("=");
|
||||
pagination = Integer.parseInt(pageVal[1]);
|
||||
|
||||
case "discover": // size of result items unknown, iterate over items (for openDOAR - 201808)
|
||||
if (resultSizeValue < 2) {
|
||||
throw new CollectorException("Mode: discover, Param 'resultSizeValue' is less than 2");
|
||||
// remove page start number from query and queryParams
|
||||
queryParams = queryParams.replaceFirst("&?paginationStart=[0-9]+", "");
|
||||
query = query.replaceFirst("&?paginationStart=[0-9]+", "");
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("requesting URL [{}]", query);
|
||||
|
||||
final URL qUrl = new URL(query);
|
||||
log.debug("authMethod: {}", this.authMethod);
|
||||
if (this.authMethod == "bearer") {
|
||||
log.trace("RestIterator.downloadPage():: authMethod before inputStream: " + resultXml);
|
||||
requestHeaders.put("Authorization", "Bearer " + authToken);
|
||||
// requestHeaders.put("Content-Type", "application/json");
|
||||
} else if (AUTHBASIC.equalsIgnoreCase(this.authMethod)) {
|
||||
log.trace("RestIterator.downloadPage():: authMethod before inputStream: " + resultXml);
|
||||
requestHeaders.put("Authorization", "Basic " + authToken);
|
||||
// requestHeaders.put("accept", "application/xml");
|
||||
}
|
||||
HttpURLConnection conn = (HttpURLConnection) qUrl.openConnection();
|
||||
conn.setRequestMethod("GET");
|
||||
this.setRequestHeader(conn);
|
||||
resultStream = conn.getInputStream();
|
||||
|
||||
if ("json".equals(this.resultOutputFormat)) {
|
||||
resultJson = IOUtils.toString(this.resultStream, StandardCharsets.UTF_8);
|
||||
resultXml = JsonUtils.convertToXML(resultJson);
|
||||
this.resultStream = IOUtils.toInputStream(resultXml, UTF_8);
|
||||
}
|
||||
|
||||
if (!isEmptyXml(resultXml)) {
|
||||
resultNode = (Node) this.xpath
|
||||
.evaluate("/", new InputSource(this.resultStream), XPathConstants.NODE);
|
||||
nodeList = (NodeList) this.xprEntity.evaluate(resultNode, XPathConstants.NODESET);
|
||||
log.debug("nodeList.length: {}", nodeList.getLength());
|
||||
for (int i = 0; i < nodeList.getLength(); i++) {
|
||||
final StringWriter sw = new StringWriter();
|
||||
this.transformer.transform(new DOMSource(nodeList.item(i)), new StreamResult(sw));
|
||||
final String toEnqueue = sw.toString();
|
||||
if ((toEnqueue == null) || StringUtils.isBlank(toEnqueue) || isEmptyXml(toEnqueue)) {
|
||||
log
|
||||
.warn(
|
||||
"The following record resulted in empty item for the feeding queue: {}", resultXml);
|
||||
} else {
|
||||
this.recordQueue.add(sw.toString());
|
||||
}
|
||||
}
|
||||
qUrlArgument = qUrl.getQuery();
|
||||
String[] arrayQUrlArgument = qUrlArgument.split("&");
|
||||
for (String arrayUrlArgStr : arrayQUrlArgument) {
|
||||
if (arrayUrlArgStr.startsWith(resumptionParam)) {
|
||||
String[] resumptionKeyValue = arrayUrlArgStr.split("=");
|
||||
if (isInteger(resumptionKeyValue[1])) {
|
||||
urlOldResumptionSize = Integer.parseInt(resumptionKeyValue[1]);
|
||||
log.debug("discover OldResumptionSize from Url (int): {}", urlOldResumptionSize);
|
||||
} else {
|
||||
log.debug("discover OldResumptionSize from Url (str): {}", resumptionKeyValue[1]);
|
||||
} else {
|
||||
log.warn("resultXml is equal with emptyXml");
|
||||
}
|
||||
|
||||
this.resumptionInt += this.resultSizeValue;
|
||||
|
||||
switch (this.resumptionType.toLowerCase()) {
|
||||
case "scan": // read of resumptionToken , evaluate next results, e.g. OAI, iterate over items
|
||||
this.resumptionStr = this.xprResumptionPath.evaluate(resultNode);
|
||||
break;
|
||||
|
||||
case "count": // begin at one step for all records, iterate over items
|
||||
this.resumptionStr = Integer.toString(this.resumptionInt);
|
||||
break;
|
||||
|
||||
case "discover": // size of result items unknown, iterate over items (for openDOAR - 201808)
|
||||
if (this.resultSizeValue < 2) {
|
||||
throw new CollectorException("Mode: discover, Param 'resultSizeValue' is less than 2");
|
||||
}
|
||||
qUrlArgument = qUrl.getQuery();
|
||||
|
||||
final String[] arrayQUrlArgument = qUrlArgument.split("&");
|
||||
for (final String arrayUrlArgStr : arrayQUrlArgument) {
|
||||
if (arrayUrlArgStr.startsWith(this.resumptionParam)) {
|
||||
final String[] resumptionKeyValue = arrayUrlArgStr.split("=");
|
||||
if (isInteger(resumptionKeyValue[1])) {
|
||||
urlOldResumptionSize = Integer.parseInt(resumptionKeyValue[1]);
|
||||
log.debug("discover OldResumptionSize from Url (int): {}", urlOldResumptionSize);
|
||||
} else {
|
||||
log.debug("discover OldResumptionSize from Url (str): {}", resumptionKeyValue[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (((emptyXml).equalsIgnoreCase(resultXml))
|
||||
|| ((nodeList != null) && (nodeList.getLength() < resultSizeValue))) {
|
||||
// resumptionStr = "";
|
||||
if (nodeList != null) {
|
||||
discoverResultSize += nodeList.getLength();
|
||||
if (isEmptyXml(resultXml)
|
||||
|| ((nodeList != null) && (nodeList.getLength() < this.resultSizeValue))) {
|
||||
// resumptionStr = "";
|
||||
if (nodeList != null) {
|
||||
this.discoverResultSize += nodeList.getLength();
|
||||
}
|
||||
this.resultTotal = this.discoverResultSize;
|
||||
} else {
|
||||
this.resumptionStr = Integer.toString(this.resumptionInt);
|
||||
this.resultTotal = this.resumptionInt + 1;
|
||||
if (nodeList != null) {
|
||||
this.discoverResultSize += nodeList.getLength();
|
||||
}
|
||||
}
|
||||
resultTotal = discoverResultSize;
|
||||
} else {
|
||||
resumptionStr = Integer.toString(resumptionInt);
|
||||
resultTotal = resumptionInt + 1;
|
||||
if (nodeList != null) {
|
||||
discoverResultSize += nodeList.getLength();
|
||||
log.info("discoverResultSize: {}", this.discoverResultSize);
|
||||
break;
|
||||
|
||||
case "pagination":
|
||||
case "page": // pagination, iterate over page numbers
|
||||
if (nodeList != null && nodeList.getLength() > 0) {
|
||||
this.discoverResultSize += nodeList.getLength();
|
||||
} else {
|
||||
this.resultTotal = this.discoverResultSize;
|
||||
this.pagination = this.discoverResultSize;
|
||||
}
|
||||
}
|
||||
log.info("discoverResultSize: {}", discoverResultSize);
|
||||
break;
|
||||
this.pagination += 1;
|
||||
this.resumptionInt = this.pagination;
|
||||
this.resumptionStr = Integer.toString(this.resumptionInt);
|
||||
break;
|
||||
|
||||
case "pagination":
|
||||
case "page": // pagination, iterate over page numbers
|
||||
pagination += 1;
|
||||
if (nodeList != null) {
|
||||
discoverResultSize += nodeList.getLength();
|
||||
} else {
|
||||
resultTotal = discoverResultSize;
|
||||
pagination = discoverResultSize;
|
||||
}
|
||||
resumptionInt = pagination;
|
||||
resumptionStr = Integer.toString(resumptionInt);
|
||||
break;
|
||||
case "deep-cursor": // size of result items unknown, iterate over items (for supporting deep cursor
|
||||
// in
|
||||
// solr)
|
||||
// isn't relevant -- if (resultSizeValue < 2) {throw new CollectorServiceException("Mode:
|
||||
// deep-cursor, Param 'resultSizeValue' is less than 2");}
|
||||
|
||||
case "deep-cursor": // size of result items unknown, iterate over items (for supporting deep cursor in
|
||||
// solr)
|
||||
// isn't relevant -- if (resultSizeValue < 2) {throw new CollectorServiceException("Mode:
|
||||
// deep-cursor, Param 'resultSizeValue' is less than 2");}
|
||||
this.resumptionStr = encodeValue(this.xprResumptionPath.evaluate(resultNode));
|
||||
this.queryParams = this.queryParams.replace("&cursor=*", "");
|
||||
|
||||
resumptionStr = encodeValue(xprResumptionPath.evaluate(resultNode));
|
||||
queryParams = queryParams.replace("&cursor=*", "");
|
||||
// terminating if length of nodeList is 0
|
||||
if ((nodeList != null) && (nodeList.getLength() < this.discoverResultSize)) {
|
||||
this.resumptionInt += ((nodeList.getLength() + 1) - this.resultSizeValue);
|
||||
} else {
|
||||
this.resumptionInt += (nodeList.getLength() - this.resultSizeValue); // subtract the
|
||||
// resultSizeValue
|
||||
// because the iteration is over
|
||||
// real length and the
|
||||
// resultSizeValue is added before
|
||||
// the switch()
|
||||
}
|
||||
|
||||
// terminating if length of nodeList is 0
|
||||
if ((nodeList != null) && (nodeList.getLength() < discoverResultSize)) {
|
||||
resumptionInt += (nodeList.getLength() + 1 - resultSizeValue);
|
||||
} else {
|
||||
resumptionInt += (nodeList.getLength() - resultSizeValue); // subtract the resultSizeValue
|
||||
// because the iteration is over
|
||||
// real length and the
|
||||
// resultSizeValue is added before
|
||||
// the switch()
|
||||
}
|
||||
this.discoverResultSize = nodeList.getLength();
|
||||
|
||||
discoverResultSize = nodeList.getLength();
|
||||
log
|
||||
.debug(
|
||||
"downloadPage().deep-cursor: resumptionStr=" + this.resumptionStr + " ; queryParams="
|
||||
+ this.queryParams + " resumptionLengthIncreased: " + this.resumptionInt);
|
||||
|
||||
log
|
||||
.debug(
|
||||
"downloadPage().deep-cursor: resumptionStr=" + resumptionStr + " ; queryParams="
|
||||
+ queryParams + " resumptionLengthIncreased: " + resumptionInt);
|
||||
break;
|
||||
|
||||
break;
|
||||
default: // otherwise: abort
|
||||
// resultTotal = resumptionInt;
|
||||
break;
|
||||
}
|
||||
|
||||
default: // otherwise: abort
|
||||
// resultTotal = resumptionInt;
|
||||
break;
|
||||
} catch (final Exception e) {
|
||||
log.error(e.getMessage(), e);
|
||||
throw new IllegalStateException("collection failed: " + e.getMessage());
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error(e.getMessage(), e);
|
||||
throw new IllegalStateException("collection failed: " + e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
if (resultTotal == -1) {
|
||||
resultTotal = Integer.parseInt(xprResultTotalPath.evaluate(resultNode));
|
||||
if (resumptionType.equalsIgnoreCase("page") && !BASIC.equalsIgnoreCase(authMethod)) {
|
||||
resultTotal += 1;
|
||||
} // to correct the upper bound
|
||||
log.info("resultTotal was -1 is now: " + resultTotal);
|
||||
try {
|
||||
if (this.resultTotal == -1) {
|
||||
this.resultTotal = Integer.parseInt(this.xprResultTotalPath.evaluate(resultNode));
|
||||
if ("page".equalsIgnoreCase(this.resumptionType)
|
||||
&& !this.AUTHBASIC.equalsIgnoreCase(this.authMethod)) {
|
||||
this.resultTotal += 1;
|
||||
} // to correct the upper bound
|
||||
log.info("resultTotal was -1 is now: " + this.resultTotal);
|
||||
}
|
||||
} catch (final Exception e) {
|
||||
log.error(e.getMessage(), e);
|
||||
throw new IllegalStateException("downloadPage resultTotal couldn't parse: " + e.getMessage());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error(e.getMessage(), e);
|
||||
throw new IllegalStateException("downloadPage resultTotal couldn't parse: " + e.getMessage());
|
||||
log.debug("resultTotal: " + this.resultTotal);
|
||||
log.debug("resInt: " + this.resumptionInt);
|
||||
if (this.resumptionInt <= this.resultTotal) {
|
||||
nextQuery = this.baseUrl + "?" + this.queryParams + this.querySize + "&" + this.resumptionParam + "="
|
||||
+ this.resumptionStr
|
||||
+ this.queryFormat;
|
||||
} else {
|
||||
nextQuery = "";
|
||||
// if (resumptionType.toLowerCase().equals("deep-cursor")) { resumptionInt -= 1; } // correct the
|
||||
// resumptionInt and prevent a NullPointer Exception at mdStore
|
||||
}
|
||||
log.debug("nextQueryUrl: " + nextQuery);
|
||||
return nextQuery;
|
||||
} catch (final Throwable e) {
|
||||
log.warn(e.getMessage(), e);
|
||||
return downloadPage(query, attempt + 1);
|
||||
}
|
||||
log.debug("resultTotal: " + resultTotal);
|
||||
log.debug("resInt: " + resumptionInt);
|
||||
if (resumptionInt <= resultTotal) {
|
||||
nextQuery = baseUrl + "?" + queryParams + querySize + "&" + resumptionParam + "=" + resumptionStr
|
||||
+ queryFormat;
|
||||
} else {
|
||||
nextQuery = "";
|
||||
// if (resumptionType.toLowerCase().equals("deep-cursor")) { resumptionInt -= 1; } // correct the
|
||||
// resumptionInt and prevent a NullPointer Exception at mdStore
|
||||
}
|
||||
log.debug("nextQueryUrl: " + nextQuery);
|
||||
return nextQuery;
|
||||
|
||||
}
|
||||
|
||||
private boolean isInteger(String s) {
|
||||
private boolean isEmptyXml(String s) {
|
||||
return EMPTY_XML.equalsIgnoreCase(s);
|
||||
}
|
||||
|
||||
private boolean isInteger(final String s) {
|
||||
boolean isValidInteger = false;
|
||||
try {
|
||||
Integer.parseInt(s);
|
||||
|
@ -383,7 +450,7 @@ public class RestIterator implements Iterator<String> {
|
|||
// s is a valid integer
|
||||
|
||||
isValidInteger = true;
|
||||
} catch (NumberFormatException ex) {
|
||||
} catch (final NumberFormatException ex) {
|
||||
// s is not an integer
|
||||
}
|
||||
|
||||
|
@ -391,20 +458,36 @@ public class RestIterator implements Iterator<String> {
|
|||
}
|
||||
|
||||
// Method to encode a string value using `UTF-8` encoding scheme
|
||||
private String encodeValue(String value) {
|
||||
private String encodeValue(final String value) {
|
||||
try {
|
||||
return URLEncoder.encode(value, StandardCharsets.UTF_8.toString());
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
} catch (final UnsupportedEncodingException ex) {
|
||||
throw new RuntimeException(ex.getCause());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* setRequestHeader
|
||||
*
|
||||
* setRequestProperty: Sets the general request property. If a property with the key already exists, overwrite its value with the new value.
|
||||
* @param conn
|
||||
*/
|
||||
private void setRequestHeader(HttpURLConnection conn) {
|
||||
if (requestHeaders != null) {
|
||||
for (String key : requestHeaders.keySet()) {
|
||||
conn.setRequestProperty(key, requestHeaders.get(key));
|
||||
}
|
||||
log.debug("Set Request Header with: " + requestHeaders);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public String getResultFormatValue() {
|
||||
return resultFormatValue;
|
||||
return this.resultFormatValue;
|
||||
}
|
||||
|
||||
public String getResultOutputFormat() {
|
||||
return resultOutputFormat;
|
||||
return this.resultOutputFormat;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,7 +8,10 @@ import java.io.StringWriter;
|
|||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.CharsetDecoder;
|
||||
import java.nio.charset.CodingErrorAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.xml.stream.XMLEventFactory;
|
||||
import javax.xml.stream.XMLEventReader;
|
||||
|
@ -19,6 +22,7 @@ import javax.xml.stream.XMLStreamException;
|
|||
import javax.xml.stream.events.StartElement;
|
||||
import javax.xml.stream.events.XMLEvent;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
|
@ -58,13 +62,23 @@ public class XMLIterator implements Iterator<String> {
|
|||
|
||||
private String element;
|
||||
|
||||
private List<String> elements;
|
||||
|
||||
private InputStream inputStream;
|
||||
|
||||
public XMLIterator(final String element, final InputStream inputStream) {
|
||||
super();
|
||||
this.element = element;
|
||||
if (element.contains(",")) {
|
||||
elements = Arrays
|
||||
.stream(element.split(","))
|
||||
.filter(StringUtils::isNoneBlank)
|
||||
.map(String::toLowerCase)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
this.inputStream = inputStream;
|
||||
this.parser = getParser();
|
||||
|
||||
try {
|
||||
this.current = findElement(parser);
|
||||
} catch (XMLStreamException e) {
|
||||
|
@ -113,7 +127,7 @@ public class XMLIterator implements Iterator<String> {
|
|||
final XMLEvent event = parser.nextEvent();
|
||||
|
||||
// TODO: replace with depth tracking instead of close tag tracking.
|
||||
if (event.isEndElement() && event.asEndElement().getName().getLocalPart().equals(element)) {
|
||||
if (event.isEndElement() && isCheckTag(event.asEndElement().getName().getLocalPart())) {
|
||||
writer.add(event);
|
||||
break;
|
||||
}
|
||||
|
@ -142,18 +156,16 @@ public class XMLIterator implements Iterator<String> {
|
|||
XMLEvent peek = parser.peek();
|
||||
if (peek != null && peek.isStartElement()) {
|
||||
String name = peek.asStartElement().getName().getLocalPart();
|
||||
if (element.equals(name)) {
|
||||
if (isCheckTag(name))
|
||||
return peek;
|
||||
}
|
||||
}
|
||||
|
||||
while (parser.hasNext()) {
|
||||
final XMLEvent event = parser.nextEvent();
|
||||
XMLEvent event = parser.nextEvent();
|
||||
if (event != null && event.isStartElement()) {
|
||||
String name = event.asStartElement().getName().getLocalPart();
|
||||
if (element.equals(name)) {
|
||||
if (isCheckTag(name))
|
||||
return event;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
@ -161,12 +173,31 @@ public class XMLIterator implements Iterator<String> {
|
|||
|
||||
private XMLEventReader getParser() {
|
||||
try {
|
||||
return inputFactory.get().createXMLEventReader(sanitize(inputStream));
|
||||
XMLInputFactory xif = inputFactory.get();
|
||||
xif.setProperty(XMLInputFactory.SUPPORT_DTD, false);
|
||||
return xif.createXMLEventReader(sanitize(inputStream));
|
||||
} catch (XMLStreamException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isCheckTag(final String tagName) {
|
||||
if (elements != null) {
|
||||
final String found = elements
|
||||
.stream()
|
||||
.filter(e -> e.equalsIgnoreCase(tagName))
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
if (found != null)
|
||||
return true;
|
||||
} else {
|
||||
if (element.equalsIgnoreCase(tagName)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private Reader sanitize(final InputStream in) {
|
||||
final CharsetDecoder charsetDecoder = Charset.forName(UTF_8).newDecoder();
|
||||
charsetDecoder.onMalformedInput(CodingErrorAction.REPLACE);
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
# --- You can override the following properties (if needed) coming from your ~/.dhp/application.properties ---
|
||||
# dhp.hadoop.frontend.temp.dir=/home/ilias.kanellos
|
||||
# dhp.hadoop.frontend.user.name=ilias.kanellos
|
||||
# dhp.hadoop.frontend.host.name=iis-cdh5-test-gw.ocean.icm.edu.pl
|
||||
# dhp.hadoop.frontend.port.ssh=22
|
||||
# oozieServiceLoc=http://iis-cdh5-test-m3:11000/oozie
|
||||
# jobTracker=yarnRM
|
||||
# nameNode=hdfs://nameservice1
|
||||
# oozie.execution.log.file.location = target/extract-and-run-on-remote-host.log
|
||||
# maven.executable=mvn
|
||||
|
||||
# Some memory and driver settings for more demanding tasks
|
||||
sparkDriverMemory=10G
|
||||
sparkExecutorMemory=10G
|
||||
sparkExecutorCores=4
|
||||
sparkShufflePartitions=7680
|
||||
|
||||
# The above is given differently in an example I found online
|
||||
oozie.action.sharelib.for.spark=spark2
|
||||
oozieActionShareLibForSpark2=spark2
|
||||
spark2YarnHistoryServerAddress=http://iis-cdh5-test-gw.ocean.icm.edu.pl:18089
|
||||
spark2EventLogDir=/user/spark/spark2ApplicationHistory
|
||||
sparkSqlWarehouseDir=/user/hive/warehouse
|
||||
#hiveMetastoreUris=thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083
|
||||
# This MAY avoid the no library used error
|
||||
oozie.use.system.libpath=true
|
||||
# Some stuff copied from openaire's jobs
|
||||
spark2ExtraListeners=com.cloudera.spark.lineage.NavigatorAppListener
|
||||
spark2SqlQueryExecutionListeners=com.cloudera.spark.lineage.NavigatorQueryListener
|
||||
|
||||
# The following is needed as a property of a workflow
|
||||
wfAppPath=${oozieTopWfApplicationPath}
|
||||
|
||||
sparkExecutorMemory=10G
|
||||
sparkDriverMemory=20G
|
||||
sparkExecutorCores=4
|
||||
sparkShufflePartitions=7680
|
||||
|
||||
resultFolder=${nameNode}/${workingDir}/affro-results
|
||||
|
||||
|
||||
#
|
||||
#crossrefInputPath=/data/bip-affiliations/crossref-data.json
|
||||
#pubmedInputPath=/data/bip-affiliations/pubmed-data.json
|
||||
#openapcInputPath=/data/bip-affiliations/openapc-data.json
|
||||
#dataciteInputPath=/data/bip-affiliations/datacite-data.json
|
||||
#
|
||||
#outputPath=/tmp/crossref-affiliations-output-v5
|
|
@ -0,0 +1,30 @@
|
|||
<configuration>
|
||||
<property>
|
||||
<name>jobTracker</name>
|
||||
<value>yarnRM</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>nameNode</name>
|
||||
<value>hdfs://nameservice1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.use.system.libpath</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveMetastoreUris</name>
|
||||
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveJdbcUrl</name>
|
||||
<value>jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveDbName</name>
|
||||
<value>openaire</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
</configuration>
|
|
@ -0,0 +1,100 @@
|
|||
<workflow-app name="AffroAffiliations" xmlns="uri:oozie:workflow:0.5">
|
||||
<parameters>
|
||||
<property>
|
||||
<name>sparkDriverMemory</name>
|
||||
<description>memory for driver process</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorMemory</name>
|
||||
<description>memory for individual executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorCores</name>
|
||||
<description>number of cores used by single executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozieActionShareLibForSpark2</name>
|
||||
<description>oozie action sharelib for spark 2.*</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2ExtraListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
||||
<description>spark 2.* extra listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2SqlQueryExecutionListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||
<description>spark 2.* sql query execution listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2YarnHistoryServerAddress</name>
|
||||
<description>spark 2.* yarn history server address</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2EventLogDir</name>
|
||||
<description>spark 2.* event log dir location</description>
|
||||
</property>
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.job.queuename</name>
|
||||
<value>${queueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
||||
<value>${oozieLauncherQueueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.action.sharelib.for.spark</name>
|
||||
<value>${oozieActionShareLibForSpark2}</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="run-affro"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
|
||||
<action name="run-affro">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
|
||||
<master>yarn-cluster</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Affiliations inference (Affro)</name>
|
||||
<jar>update_records.py</jar>
|
||||
|
||||
<spark-opts>
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=${sparkExecutorMemory}
|
||||
--conf spark.sql.shuffle.partitions=${sparkShufflePartitions}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.yarn.appMasterEnv.PYSPARK_PYTHON=python3
|
||||
--conf spark.executorEnv.PYSPARK_PYTHON=python3
|
||||
--py-files ${wfAppPath}/affRo/affro_cluster.py,${wfAppPath}/affRo/affro_test_example.py,${wfAppPath}/affRo/create_input_cluster.py,${wfAppPath}/affRo/functions_cluster.py,${wfAppPath}/affRo/matching_cluster.py
|
||||
--files ${wfAppPath}/affRo/dictionaries/dix_acad.json,${wfAppPath}/affRo/dictionaries/dix_categ.json,${wfAppPath}/affRo/dictionaries/dix_city.json,${wfAppPath}/affRo/dictionaries/dix_country.json,${wfAppPath}/affRo/dictionaries/dix_mult.json,${wfAppPath}/affRo/txt_files/city_names.txt,${wfAppPath}/affRo/txt_files/remove_list.txt,${wfAppPath}/affRo/txt_files/stop_words.txt,${wfAppPath}/affRo/txt_files/university_terms.txt
|
||||
</spark-opts>
|
||||
|
||||
<arg>${resultFolder}</arg>
|
||||
|
||||
<file>${wfAppPath}/affRo/update_records.py#update_records.py</file>
|
||||
</spark>
|
||||
|
||||
<ok to="End" />
|
||||
<error to="Kill" />
|
||||
|
||||
</action>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -28,7 +28,13 @@
|
|||
"paramLongName": "dataciteInputPath",
|
||||
"paramDescription": "the path to get the input data from Datacite",
|
||||
"paramRequired": true
|
||||
},
|
||||
},{
|
||||
"paramName": "wip",
|
||||
"paramLongName": "webCrawlInputPath",
|
||||
"paramDescription": "the path to get the input data from Web Crawl",
|
||||
"paramRequired": true
|
||||
}
|
||||
,
|
||||
{
|
||||
"paramName": "o",
|
||||
"paramLongName": "outputPath",
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
<name>dataciteInputPath</name>
|
||||
<description>the path where to find the inferred affiliation relations from Datacite</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>webCrawlInputPath</name>
|
||||
<description>the path where to find the inferred affiliation relations from webCrawl</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>outputPath</name>
|
||||
<description>the path where to store the actionset</description>
|
||||
|
@ -112,7 +116,7 @@
|
|||
<arg>--pubmedInputPath</arg><arg>${pubmedInputPath}</arg>
|
||||
<arg>--openapcInputPath</arg><arg>${openapcInputPath}</arg>
|
||||
<arg>--dataciteInputPath</arg><arg>${dataciteInputPath}</arg>
|
||||
|
||||
<arg>--webCrawlInputPath</arg><arg>${webCrawlInputPath}</arg>
|
||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
[
|
||||
{
|
||||
"paramName": "ip",
|
||||
"paramLongName": "inputPath",
|
||||
"paramDescription": "the zipped opencitations file",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "op",
|
||||
"paramLongName": "outputPath",
|
||||
"paramDescription": "the working path",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "issm",
|
||||
"paramLongName": "isSparkSessionManaged",
|
||||
"paramDescription": "the hdfs name node",
|
||||
"paramRequired": false
|
||||
}, {
|
||||
"paramName": "wd",
|
||||
"paramLongName": "workingDir",
|
||||
"paramDescription": "the hdfs name node",
|
||||
"paramRequired": false
|
||||
}
|
||||
]
|
|
@ -0,0 +1,2 @@
|
|||
inputPath=/data/orcid_2023/tables/
|
||||
outputPath=/user/miriam.baglioni/peopleAS
|
|
@ -0,0 +1,30 @@
|
|||
<configuration>
|
||||
<property>
|
||||
<name>jobTracker</name>
|
||||
<value>yarnRM</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>nameNode</name>
|
||||
<value>hdfs://nameservice1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.use.system.libpath</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveMetastoreUris</name>
|
||||
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveJdbcUrl</name>
|
||||
<value>jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveDbName</name>
|
||||
<value>openaire</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
</configuration>
|
|
@ -0,0 +1,111 @@
|
|||
<workflow-app name="PersonEntity" xmlns="uri:oozie:workflow:0.5">
|
||||
<parameters>
|
||||
|
||||
<property>
|
||||
<name>inputPath</name>
|
||||
<description>inputPath</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>outputPath</name>
|
||||
<description>the path where to store the actionset</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkDriverMemory</name>
|
||||
<description>memory for driver process</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorMemory</name>
|
||||
<description>memory for individual executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorCores</name>
|
||||
<description>number of cores used by single executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozieActionShareLibForSpark2</name>
|
||||
<description>oozie action sharelib for spark 2.*</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2ExtraListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
||||
<description>spark 2.* extra listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2SqlQueryExecutionListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||
<description>spark 2.* sql query execution listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2YarnHistoryServerAddress</name>
|
||||
<description>spark 2.* yarn history server address</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2EventLogDir</name>
|
||||
<description>spark 2.* event log dir location</description>
|
||||
</property>
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.job.queuename</name>
|
||||
<value>${queueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
||||
<value>${oozieLauncherQueueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.action.sharelib.for.spark</name>
|
||||
<value>${oozieActionShareLibForSpark2}</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
</global>
|
||||
<start to="deleteoutputpath"/>
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
<action name="deleteoutputpath">
|
||||
<fs>
|
||||
<delete path="${outputPath}"/>
|
||||
<mkdir path="${outputPath}"/>
|
||||
<delete path="${workingDir}"/>
|
||||
<mkdir path="${workingDir}"/>
|
||||
</fs>
|
||||
<ok to="atomicactions"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
||||
<action name="atomicactions">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Produces the ActionSet for Person entity and relevant relations</name>
|
||||
<class>eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson</class>
|
||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--executor-cores=4
|
||||
--executor-memory=4G
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.executor.memoryOverhead=5G
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
--conf spark.sql.shuffle.partitions=15000
|
||||
</spark-opts>
|
||||
<arg>--inputPath</arg><arg>${inputPath}</arg>
|
||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||
<arg>--workingDir</arg><arg>${workingDir}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -0,0 +1,20 @@
|
|||
[
|
||||
{
|
||||
"paramName": "sp",
|
||||
"paramLongName": "sourcePath",
|
||||
"paramDescription": "the zipped opencitations file",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "op",
|
||||
"paramLongName": "outputPath",
|
||||
"paramDescription": "the working path",
|
||||
"paramRequired": true
|
||||
},
|
||||
{
|
||||
"paramName": "issm",
|
||||
"paramLongName": "isSparkSessionManaged",
|
||||
"paramDescription": "the hdfs name node",
|
||||
"paramRequired": false
|
||||
}
|
||||
]
|
|
@ -0,0 +1,30 @@
|
|||
<configuration>
|
||||
<property>
|
||||
<name>jobTracker</name>
|
||||
<value>yarnRM</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>nameNode</name>
|
||||
<value>hdfs://nameservice1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.use.system.libpath</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveMetastoreUris</name>
|
||||
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveJdbcUrl</name>
|
||||
<value>jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hiveDbName</name>
|
||||
<value>openaire</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
</configuration>
|
|
@ -0,0 +1,125 @@
|
|||
|
||||
<workflow-app name="SDG no doi" xmlns="uri:oozie:workflow:0.5">
|
||||
<parameters>
|
||||
<property>
|
||||
<name>sdgPath</name>
|
||||
<description>the input path of the resources to be extended</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>outputPath</name>
|
||||
<description>the path where to store the actionset</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkDriverMemory</name>
|
||||
<description>memory for driver process</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorMemory</name>
|
||||
<description>memory for individual executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkExecutorCores</name>
|
||||
<description>number of cores used by single executor</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozieActionShareLibForSpark2</name>
|
||||
<description>oozie action sharelib for spark 2.*</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2ExtraListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
||||
<description>spark 2.* extra listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2SqlQueryExecutionListeners</name>
|
||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||
<description>spark 2.* sql query execution listeners classname</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2YarnHistoryServerAddress</name>
|
||||
<description>spark 2.* yarn history server address</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spark2EventLogDir</name>
|
||||
<description>spark 2.* event log dir location</description>
|
||||
</property>
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.job.queuename</name>
|
||||
<value>${queueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
||||
<value>${oozieLauncherQueueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.action.sharelib.for.spark</name>
|
||||
<value>${oozieActionShareLibForSpark2}</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
</global>
|
||||
<start to="prepareSDG"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
|
||||
<action name="prepareSDG">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Produces the results from FOS</name>
|
||||
<class>eu.dnetlib.dhp.actionmanager.createunresolvedentities.PrepareSDGSparkJob</class>
|
||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
</spark-opts>
|
||||
<arg>--sourcePath</arg><arg>${sdgPath}</arg>
|
||||
<arg>--outputPath</arg><arg>${workingDir}/prepared</arg>
|
||||
<arg>--distributeDoi</arg><arg>false</arg>
|
||||
</spark>
|
||||
<ok to="produceActionSet"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
|
||||
|
||||
<action name="produceActionSet">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Save the action set grouping results with the same id</name>
|
||||
<class>eu.dnetlib.dhp.actionmanager.sdgnodoi.CreateActionSetSparkJob</class>
|
||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
</spark-opts>
|
||||
<arg>--sourcePath</arg><arg>${workingDir}/prepared/sdg</arg>
|
||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
|
@ -16,5 +16,10 @@
|
|||
"paramLongName": "isSparkSessionManaged",
|
||||
"paramDescription": "the hdfs name node",
|
||||
"paramRequired": false
|
||||
}
|
||||
},{
|
||||
"paramName": "bl",
|
||||
"paramLongName": "blackListPath",
|
||||
"paramDescription": "the working path",
|
||||
"paramRequired": true
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,2 +1,11 @@
|
|||
sourcePath=/user/miriam.baglioni/openalex-snapshot/data/works/
|
||||
outputPath=/tmp/miriam/webcrawlComplete/
|
||||
#PROPERTIES TO CREATE THE ACTION SET
|
||||
#sourcePath=/user/miriam.baglioni/openalex-snapshot/data/works/
|
||||
#outputPath=/tmp/miriam/webcrawlComplete/
|
||||
#blackListPath=/user/miriam.baglioni/openalex-blackList
|
||||
#resumeFrom=create
|
||||
|
||||
#PROPERTIES TO REMOVE FROM THE ACTION SET
|
||||
sourcePath=/var/lib/dnet/actionManager_PROD/webcrawl/rawset_28247629-468b-478e-9a42-bc540877125d_1718121542061/
|
||||
outputPath=/tmp/miriam/webcrawlRemoved/
|
||||
blackListPath=/user/miriam.baglioni/oalexBlackListNormalized
|
||||
resumeFrom=remove
|
|
@ -20,12 +20,19 @@
|
|||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="create_actionset"/>
|
||||
<start to="resumeFrom"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
</kill>
|
||||
|
||||
<decision name="resumeFrom">
|
||||
<switch>
|
||||
<case to="create_actionset">${wf:conf('resumeFrom') eq 'create'}</case>
|
||||
<default to="remove_from_actionset"/>
|
||||
</switch>
|
||||
</decision>
|
||||
|
||||
<action name="create_actionset">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
|
@ -45,6 +52,32 @@
|
|||
</spark-opts>
|
||||
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||
<arg>--blackListPath</arg><arg>${blackListPath}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<action name="remove_from_actionset">
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Removes some relations found to be wrong from the AS</name>
|
||||
<class>eu.dnetlib.dhp.actionmanager.webcrawl.RemoveRelationFromActionSet</class>
|
||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--executor-memory=${sparkExecutorMemory}
|
||||
--executor-cores=${sparkExecutorCores}
|
||||
--driver-memory=${sparkDriverMemory}
|
||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
</spark-opts>
|
||||
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||
<arg>--blackListPath</arg><arg>${blackListPath}</arg>
|
||||
</spark>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
[
|
||||
{
|
||||
"id": "100007630",
|
||||
"uri": "http://dx.doi.org/10.13039/100007630",
|
||||
"name": "College of Engineering and Informatics, National University of Ireland, Galway",
|
||||
"synonym": []
|
||||
},
|
||||
|
||||
{
|
||||
"id": "100007731",
|
||||
"uri": "http://dx.doi.org/10.13039/100007731",
|
||||
|
@ -58,7 +53,7 @@
|
|||
"uri": "http://dx.doi.org/10.13039/100010414",
|
||||
"name": "Health Research Board",
|
||||
"synonym": [
|
||||
"501100001590"
|
||||
"501100001590", "501100023273"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -85,24 +80,6 @@
|
|||
"name": "Irish College of General Practitioners",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "100012734",
|
||||
"uri": "http://dx.doi.org/10.13039/100012734",
|
||||
"name": "Department for Culture, Heritage and the Gaeltacht, Ireland",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "100012754",
|
||||
"uri": "http://dx.doi.org/10.13039/100012754",
|
||||
"name": "Horizon Pharma",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "100012891",
|
||||
"uri": "http://dx.doi.org/10.13039/100012891",
|
||||
"name": "Medical Research Charities Group",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "100012919",
|
||||
"uri": "http://dx.doi.org/10.13039/100012919",
|
||||
|
@ -233,7 +210,7 @@
|
|||
"id": "100018064",
|
||||
"uri": "http://dx.doi.org/10.13039/100018064",
|
||||
"name": "Department of Tourism, Culture, Arts, Gaeltacht, Sport and Media",
|
||||
"synonym": []
|
||||
"synonym": ["100012734"]
|
||||
},
|
||||
{
|
||||
"id": "100018172",
|
||||
|
@ -281,13 +258,13 @@
|
|||
"id": "100019637",
|
||||
"uri": "http://dx.doi.org/10.13039/100019637",
|
||||
"name": "Horizon Therapeutics",
|
||||
"synonym": []
|
||||
"synonym": ["100012754"]
|
||||
},
|
||||
{
|
||||
"id": "100020174",
|
||||
"uri": "http://dx.doi.org/10.13039/100020174",
|
||||
"name": "Health Research Charities Ireland",
|
||||
"synonym": []
|
||||
"synonym": ["100012891"]
|
||||
},
|
||||
{
|
||||
"id": "100020202",
|
||||
|
@ -319,12 +296,7 @@
|
|||
"name": "Centre for Ageing Research and Development in Ireland",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100001583",
|
||||
"uri": "http://dx.doi.org/10.13039/501100001583",
|
||||
"name": "Cystinosis Foundation Ireland",
|
||||
"synonym": []
|
||||
},
|
||||
|
||||
{
|
||||
"id": "501100001584",
|
||||
"uri": "http://dx.doi.org/10.13039/501100001584",
|
||||
|
@ -455,13 +427,13 @@
|
|||
"id": "501100001634",
|
||||
"uri": "http://dx.doi.org/10.13039/501100001634",
|
||||
"name": "University of Galway",
|
||||
"synonym": []
|
||||
"synonym": ["501100019905", "100007630", "501100020570", "501100023852"]
|
||||
},
|
||||
{
|
||||
"id": "501100001635",
|
||||
"uri": "http://dx.doi.org/10.13039/501100001635",
|
||||
"name": "University of Limerick",
|
||||
"synonym": []
|
||||
"synonym": ["501100014531"]
|
||||
},
|
||||
{
|
||||
"id": "501100001636",
|
||||
|
@ -491,7 +463,7 @@
|
|||
"id": "501100002736",
|
||||
"uri": "http://dx.doi.org/10.13039/501100002736",
|
||||
"name": "Covidien",
|
||||
"synonym": []
|
||||
"synonym": ["501100003956"]
|
||||
},
|
||||
{
|
||||
"id": "501100002755",
|
||||
|
@ -521,7 +493,7 @@
|
|||
"id": "501100003037",
|
||||
"uri": "http://dx.doi.org/10.13039/501100003037",
|
||||
"name": "Elan",
|
||||
"synonym": []
|
||||
"synonym": ["501100021694"]
|
||||
},
|
||||
{
|
||||
"id": "501100003496",
|
||||
|
@ -541,12 +513,6 @@
|
|||
"name": "Irish Institute of Clinical Neuroscience",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100003956",
|
||||
"uri": "http://dx.doi.org/10.13039/501100003956",
|
||||
"name": "Aspect Medical Systems",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100004162",
|
||||
"uri": "http://dx.doi.org/10.13039/501100004162",
|
||||
|
@ -595,17 +561,11 @@
|
|||
"name": "Technological University Dublin",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100009269",
|
||||
"uri": "http://dx.doi.org/10.13039/501100009269",
|
||||
"name": "Programme of Competitive Forestry Research for Development",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100009315",
|
||||
"uri": "http://dx.doi.org/10.13039/501100009315",
|
||||
"name": "Cystinosis Ireland",
|
||||
"synonym": []
|
||||
"synonym": ["501100001583"]
|
||||
},
|
||||
{
|
||||
"id": "501100010808",
|
||||
|
@ -625,12 +585,6 @@
|
|||
"name": "Alimentary Health",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100011103",
|
||||
"uri": "http://dx.doi.org/10.13039/501100011103",
|
||||
"name": "Rann\u00eds",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100012354",
|
||||
"uri": "http://dx.doi.org/10.13039/501100012354",
|
||||
|
@ -679,12 +633,7 @@
|
|||
"name": "Irish Centre for High-End Computing",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100019905",
|
||||
"uri": "http://dx.doi.org/10.13039/501100019905",
|
||||
"name": "Galway University Foundation",
|
||||
"synonym": []
|
||||
},
|
||||
|
||||
{
|
||||
"id": "501100020036",
|
||||
"uri": "http://dx.doi.org/10.13039/501100020036",
|
||||
|
@ -733,12 +682,6 @@
|
|||
"name": "Insight SFI Research Centre for Data Analytics",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100021694",
|
||||
"uri": "http://dx.doi.org/10.13039/501100021694",
|
||||
"name": "Elan Pharma International",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100021838",
|
||||
"uri": "http://dx.doi.org/10.13039/501100021838",
|
||||
|
@ -769,12 +712,6 @@
|
|||
"name": "Institute of Technology, Tralee",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100023273",
|
||||
"uri": "http://dx.doi.org/10.13039/501100023273",
|
||||
"name": "HRB Clinical Research Facility Galway",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100023378",
|
||||
"uri": "http://dx.doi.org/10.13039/501100023378",
|
||||
|
@ -871,12 +808,7 @@
|
|||
"name": "Energy Policy Research Centre, Economic and Social Research Institute",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100014531",
|
||||
"uri": "http://dx.doi.org/10.13039/501100014531",
|
||||
"name": "Physical Education and Sport Sciences Department, University of Limerick",
|
||||
"synonym": []
|
||||
},
|
||||
|
||||
{
|
||||
"id": "501100014745",
|
||||
"uri": "http://dx.doi.org/10.13039/501100014745",
|
||||
|
@ -889,22 +821,11 @@
|
|||
"name": "ADAPT - Centre for Digital Content Technology",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100020570",
|
||||
"uri": "http://dx.doi.org/10.13039/501100020570",
|
||||
"name": "College of Medicine, Nursing and Health Sciences, National University of Ireland, Galway",
|
||||
"synonym": []
|
||||
},
|
||||
|
||||
{
|
||||
"id": "501100020871",
|
||||
"uri": "http://dx.doi.org/10.13039/501100020871",
|
||||
"name": "Bernal Institute, University of Limerick",
|
||||
"synonym": []
|
||||
},
|
||||
{
|
||||
"id": "501100023852",
|
||||
"uri": "http://dx.doi.org/10.13039/501100023852",
|
||||
"name": "Moore Institute for Research in the Humanities and Social Studies, University of Galway",
|
||||
"synonym": []
|
||||
}
|
||||
]
|
|
@ -48,12 +48,37 @@
|
|||
<description>Used to configure the heap size for the map JVM process. Should be 80% of mapreduce.map.memory.mb.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>JAVA_HOME</name>
|
||||
<value>/srv/java/openjdk-17</value>
|
||||
<description>Used to configure the Java home location for oozie.launcher.mapreduce.map.env</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>JAVA_OPTS</name>
|
||||
<value>-Dcom.sun.security.enableAIAcaIssuers=true</value>
|
||||
<description>Used to configure the JAVA_OPTS parameter</description>
|
||||
</property>
|
||||
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.job.queuename</name>
|
||||
<value>${queueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
||||
<value>${oozieLauncherQueueName}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.launcher.mapreduce.map.env</name>
|
||||
<value>JAVA_HOME=${JAVA_HOME}</value>
|
||||
</property>
|
||||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="collection_mode"/>
|
||||
|
@ -99,7 +124,7 @@
|
|||
<action name="CollectionWorker">
|
||||
<java>
|
||||
<main-class>eu.dnetlib.dhp.collection.CollectorWorkerApplication</main-class>
|
||||
<java-opts>${collection_java_xmx}</java-opts>
|
||||
<java-opts>${JAVA_OPTS} ${collection_java_xmx}</java-opts>
|
||||
<arg>--apidescriptor</arg><arg>${apiDescription}</arg>
|
||||
<arg>--namenode</arg><arg>${nameNode}</arg>
|
||||
<arg>--workflowId</arg><arg>${workflowId}</arg>
|
||||
|
|
|
@ -93,7 +93,7 @@ case object Crossref2Oaf {
|
|||
|
||||
val cf = new KeyValue
|
||||
cf.setValue("UnpayWall")
|
||||
cf.setKey(s"10|openaire____:${DHPUtils.md5("UnpayWall".toLowerCase)}")
|
||||
cf.setKey(s"10|openaire____::${DHPUtils.md5("UnpayWall".toLowerCase)}")
|
||||
cf
|
||||
|
||||
}
|
||||
|
@ -888,7 +888,11 @@ case object Crossref2Oaf {
|
|||
val targetId = getProjectId("cihr________", "1e5e62235d094afd01cd56e65112fc63")
|
||||
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
|
||||
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
|
||||
|
||||
// Added mapping for DFG
|
||||
case "10.13039/501100001659" =>
|
||||
val targetId = getProjectId("dfgf________", "1e5e62235d094afd01cd56e65112fc63")
|
||||
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
|
||||
queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES)
|
||||
case "10.13039/100020031" =>
|
||||
val targetId = getProjectId("tara________", "1e5e62235d094afd01cd56e65112fc63")
|
||||
queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY)
|
||||
|
@ -1025,6 +1029,7 @@ case object Crossref2Oaf {
|
|||
tp._1 match {
|
||||
case "electronic" => journal.setIssnOnline(tp._2)
|
||||
case "print" => journal.setIssnPrinted(tp._2)
|
||||
case _ =>
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -79,23 +79,6 @@ object MagUtility extends Serializable {
|
|||
private val MAGCollectedFrom = keyValue(ModelConstants.MAG_ID, ModelConstants.MAG_NAME)
|
||||
|
||||
private val MAGDataInfo: DataInfo = {
|
||||
val di = new DataInfo
|
||||
di.setDeletedbyinference(false)
|
||||
di.setInferred(false)
|
||||
di.setInvisible(false)
|
||||
di.setTrust("0.9")
|
||||
di.setProvenanceaction(
|
||||
OafMapperUtils.qualifier(
|
||||
ModelConstants.SYSIMPORT_ACTIONSET,
|
||||
ModelConstants.SYSIMPORT_ACTIONSET,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||
ModelConstants.DNET_PROVENANCE_ACTIONS
|
||||
)
|
||||
)
|
||||
di
|
||||
}
|
||||
|
||||
private val MAGDataInfoInvisible: DataInfo = {
|
||||
val di = new DataInfo
|
||||
di.setDeletedbyinference(false)
|
||||
di.setInferred(false)
|
||||
|
@ -453,7 +436,6 @@ object MagUtility extends Serializable {
|
|||
|
||||
case "repository" =>
|
||||
result = new Publication()
|
||||
result.setDataInfo(MAGDataInfoInvisible)
|
||||
qualifier(
|
||||
"0038",
|
||||
"Other literature type",
|
||||
|
@ -488,8 +470,7 @@ object MagUtility extends Serializable {
|
|||
}
|
||||
|
||||
if (result != null) {
|
||||
if (result.getDataInfo == null)
|
||||
result.setDataInfo(MAGDataInfo)
|
||||
result.setDataInfo(MAGDataInfo)
|
||||
val i = new Instance
|
||||
i.setInstancetype(tp)
|
||||
i.setInstanceTypeMapping(
|
||||
|
@ -512,7 +493,7 @@ object MagUtility extends Serializable {
|
|||
return null
|
||||
|
||||
result.setCollectedfrom(List(MAGCollectedFrom).asJava)
|
||||
val pidList = List(
|
||||
var pidList = List(
|
||||
structuredProperty(
|
||||
paper.paperId.get.toString,
|
||||
qualifier(
|
||||
|
@ -525,8 +506,6 @@ object MagUtility extends Serializable {
|
|||
)
|
||||
)
|
||||
|
||||
result.setPid(pidList.asJava)
|
||||
|
||||
result.setOriginalId(pidList.map(s => s.getValue).asJava)
|
||||
|
||||
result.setId(s"50|mag_________::${DHPUtils.md5(paper.paperId.get.toString)}")
|
||||
|
@ -618,22 +597,23 @@ object MagUtility extends Serializable {
|
|||
}
|
||||
|
||||
val instance = result.getInstance().get(0)
|
||||
instance.setPid(pidList.asJava)
|
||||
if (paper.doi.orNull != null)
|
||||
instance.setAlternateIdentifier(
|
||||
List(
|
||||
structuredProperty(
|
||||
paper.doi.get,
|
||||
qualifier(
|
||||
PidType.doi.toString,
|
||||
PidType.doi.toString,
|
||||
ModelConstants.DNET_PID_TYPES,
|
||||
ModelConstants.DNET_PID_TYPES
|
||||
),
|
||||
null
|
||||
)
|
||||
).asJava
|
||||
|
||||
if (paper.doi.orNull != null) {
|
||||
pidList = pidList ::: List(
|
||||
structuredProperty(
|
||||
paper.doi.get,
|
||||
qualifier(
|
||||
PidType.doi.toString,
|
||||
PidType.doi.toString,
|
||||
ModelConstants.DNET_PID_TYPES,
|
||||
ModelConstants.DNET_PID_TYPES
|
||||
),
|
||||
null
|
||||
)
|
||||
)
|
||||
}
|
||||
instance.setPid(pidList.asJava)
|
||||
result.setPid(pidList.asJava)
|
||||
instance.setUrl(paper.urls.get.asJava)
|
||||
instance.setHostedby(ModelConstants.UNKNOWN_REPOSITORY)
|
||||
instance.setCollectedfrom(MAGCollectedFrom)
|
||||
|
|
|
@ -38,6 +38,7 @@ class SparkMAGtoOAF(propertyPath: String, args: Array[String], log: Logger)
|
|||
spark.read
|
||||
.load(s"$magBasePath/mag_denormalized")
|
||||
.as[MAGPaper]
|
||||
.filter(col("doi").isNotNull)
|
||||
.map(s => MagUtility.convertMAGtoOAF(s))
|
||||
.filter(s => s != null)
|
||||
.write
|
||||
|
|
|
@ -2,12 +2,9 @@ package eu.dnetlib.dhp.sx.bio.ebi
|
|||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||
import eu.dnetlib.dhp.common.Constants.{MDSTORE_DATA_PATH, MDSTORE_SIZE_PATH}
|
||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup
|
||||
import eu.dnetlib.dhp.schema.mdstore.MDStoreVersion
|
||||
import eu.dnetlib.dhp.schema.oaf.{Oaf, Result}
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
||||
import eu.dnetlib.dhp.sx.bio.pubmed._
|
||||
import eu.dnetlib.dhp.utils.DHPUtils.{MAPPER, writeHdfsFile}
|
||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.conf.Configuration
|
||||
|
@ -17,13 +14,13 @@ import org.apache.http.client.methods.HttpGet
|
|||
import org.apache.http.impl.client.HttpClientBuilder
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.expressions.Aggregator
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.expressions.Aggregator
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import java.io.InputStream
|
||||
import scala.io.Source
|
||||
import scala.xml.pull.XMLEventReader
|
||||
import java.io.{ByteArrayInputStream, InputStream}
|
||||
import java.nio.charset.Charset
|
||||
import javax.xml.stream.XMLInputFactory
|
||||
|
||||
object SparkCreateBaselineDataFrame {
|
||||
|
||||
|
@ -86,7 +83,7 @@ object SparkCreateBaselineDataFrame {
|
|||
if (response.getStatusLine.getStatusCode > 400) {
|
||||
tries -= 1
|
||||
} else
|
||||
return IOUtils.toString(response.getEntity.getContent)
|
||||
return IOUtils.toString(response.getEntity.getContent, Charset.defaultCharset())
|
||||
} catch {
|
||||
case e: Throwable =>
|
||||
println(s"Error on requesting ${r.getURI}")
|
||||
|
@ -158,7 +155,8 @@ object SparkCreateBaselineDataFrame {
|
|||
IOUtils.toString(
|
||||
SparkEBILinksToOaf.getClass.getResourceAsStream(
|
||||
"/eu/dnetlib/dhp/sx/bio/ebi/baseline_to_oaf_params.json"
|
||||
)
|
||||
),
|
||||
Charset.defaultCharset()
|
||||
)
|
||||
)
|
||||
parser.parseArgument(args)
|
||||
|
@ -167,15 +165,11 @@ object SparkCreateBaselineDataFrame {
|
|||
val workingPath = parser.get("workingPath")
|
||||
log.info("workingPath: {}", workingPath)
|
||||
|
||||
val mdstoreOutputVersion = parser.get("mdstoreOutputVersion")
|
||||
log.info("mdstoreOutputVersion: {}", mdstoreOutputVersion)
|
||||
|
||||
val cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, classOf[MDStoreVersion])
|
||||
val outputBasePath = cleanedMdStoreVersion.getHdfsPath
|
||||
log.info("outputBasePath: {}", outputBasePath)
|
||||
val targetPath = parser.get("targetPath")
|
||||
log.info("targetPath: {}", targetPath)
|
||||
|
||||
val hdfsServerUri = parser.get("hdfsServerUri")
|
||||
log.info("hdfsServerUri: {}", hdfsServerUri)
|
||||
log.info("hdfsServerUri: {}", targetPath)
|
||||
|
||||
val skipUpdate = parser.get("skipUpdate")
|
||||
log.info("skipUpdate: {}", skipUpdate)
|
||||
|
@ -201,10 +195,11 @@ object SparkCreateBaselineDataFrame {
|
|||
if (!"true".equalsIgnoreCase(skipUpdate)) {
|
||||
downloadBaseLineUpdate(s"$workingPath/baseline", hdfsServerUri)
|
||||
val k: RDD[(String, String)] = sc.wholeTextFiles(s"$workingPath/baseline", 2000)
|
||||
val inputFactory = XMLInputFactory.newInstance
|
||||
val ds: Dataset[PMArticle] = spark.createDataset(
|
||||
k.filter(i => i._1.endsWith(".gz"))
|
||||
.flatMap(i => {
|
||||
val xml = new XMLEventReader(Source.fromBytes(i._2.getBytes()))
|
||||
val xml = inputFactory.createXMLEventReader(new ByteArrayInputStream(i._2.getBytes()))
|
||||
new PMParser(xml)
|
||||
})
|
||||
)
|
||||
|
@ -223,11 +218,8 @@ object SparkCreateBaselineDataFrame {
|
|||
.map(a => PubMedToOaf.convert(a, vocabularies))
|
||||
.as[Oaf]
|
||||
.filter(p => p != null),
|
||||
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||
targetPath
|
||||
)
|
||||
|
||||
val df = spark.read.text(s"$outputBasePath/$MDSTORE_DATA_PATH")
|
||||
val mdStoreSize = df.count
|
||||
writeHdfsFile(spark.sparkContext.hadoopConfiguration, s"$mdStoreSize", s"$outputBasePath/$MDSTORE_SIZE_PATH")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
package eu.dnetlib.dhp.sx.bio.pubmed
|
||||
|
||||
import scala.xml.MetaData
|
||||
import scala.xml.pull.{EvElemEnd, EvElemStart, EvText, XMLEventReader}
|
||||
import javax.xml.stream.XMLEventReader
|
||||
import scala.xml.pull.{EvElemEnd, EvElemStart, EvText}
|
||||
|
||||
/** @param xml
|
||||
*/
|
||||
|
|
|
@ -88,6 +88,7 @@ public class PrepareAffiliationRelationsTest {
|
|||
"-pubmedInputPath", crossrefAffiliationRelationPath,
|
||||
"-openapcInputPath", crossrefAffiliationRelationPath,
|
||||
"-dataciteInputPath", crossrefAffiliationRelationPath,
|
||||
"-webCrawlInputPath", crossrefAffiliationRelationPath,
|
||||
"-outputPath", outputPath
|
||||
});
|
||||
|
||||
|
@ -104,7 +105,7 @@ public class PrepareAffiliationRelationsTest {
|
|||
// );
|
||||
// }
|
||||
// count the number of relations
|
||||
assertEquals(80, tmp.count());
|
||||
assertEquals(120, tmp.count());
|
||||
|
||||
Dataset<Relation> dataset = spark.createDataset(tmp.rdd(), Encoders.bean(Relation.class));
|
||||
dataset.createOrReplaceTempView("result");
|
||||
|
@ -115,7 +116,7 @@ public class PrepareAffiliationRelationsTest {
|
|||
// verify that we have equal number of bi-directional relations
|
||||
Assertions
|
||||
.assertEquals(
|
||||
40, execVerification
|
||||
60, execVerification
|
||||
.filter(
|
||||
"relClass='" + ModelConstants.HAS_AUTHOR_INSTITUTION + "'")
|
||||
.collectAsList()
|
||||
|
@ -123,7 +124,7 @@ public class PrepareAffiliationRelationsTest {
|
|||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
40, execVerification
|
||||
60, execVerification
|
||||
.filter(
|
||||
"relClass='" + ModelConstants.IS_AUTHOR_INSTITUTION_OF + "'")
|
||||
.collectAsList()
|
||||
|
|
|
@ -15,10 +15,7 @@ import org.apache.spark.SparkConf;
|
|||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -119,7 +119,9 @@ public class ReadCOCITest {
|
|||
workingDir.toString() + "/COCI",
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/COCI_json/",
|
||||
"-inputFile", "input1;input2;input3;input4;input5"
|
||||
"-inputFile", "input1;input2;input3;input4;input5",
|
||||
"-format",
|
||||
"COCI"
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.person;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.actionmanager.personentity.ExtractPerson;
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||
import eu.dnetlib.dhp.schema.oaf.Person;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||
|
||||
public class CreatePersonAS {
|
||||
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
private static SparkSession spark;
|
||||
|
||||
private static Path workingDir;
|
||||
private static final Logger log = LoggerFactory
|
||||
.getLogger(CreatePersonAS.class);
|
||||
|
||||
@BeforeAll
|
||||
public static void beforeAll() throws IOException {
|
||||
workingDir = Files
|
||||
.createTempDirectory(CreatePersonAS.class.getSimpleName());
|
||||
log.info("using work dir {}", workingDir);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
conf.setAppName(CreatePersonAS.class.getSimpleName());
|
||||
|
||||
conf.setMaster("local[*]");
|
||||
conf.set("spark.driver.host", "localhost");
|
||||
conf.set("hive.metastore.local", "true");
|
||||
conf.set("spark.ui.enabled", "false");
|
||||
conf.set("spark.sql.codegen.wholeStage", "false");
|
||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
||||
|
||||
spark = SparkSession
|
||||
.builder()
|
||||
.appName(CreatePersonAS.class.getSimpleName())
|
||||
.config(conf)
|
||||
.getOrCreate();
|
||||
}
|
||||
|
||||
@AfterAll
|
||||
public static void afterAll() throws IOException {
|
||||
FileUtils.deleteDirectory(workingDir.toFile());
|
||||
spark.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
void testAuthors() throws Exception {
|
||||
|
||||
String inputPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/person/")
|
||||
.getPath();
|
||||
|
||||
// spark
|
||||
// .read()
|
||||
// .parquet(inputPath + "Authors")
|
||||
// .as(Encoders.bean(Author.class))
|
||||
// .filter((FilterFunction<Author>) a -> Optional.ofNullable(a.getOtherNames()).isPresent() &&
|
||||
// Optional.ofNullable(a.getBiography()).isPresent())
|
||||
// .write()
|
||||
// .mode(SaveMode.Overwrite)
|
||||
// .parquet(workingDir.toString() + "AuthorsSubset");
|
||||
|
||||
ExtractPerson
|
||||
.main(
|
||||
new String[] {
|
||||
"-isSparkSessionManaged",
|
||||
Boolean.FALSE.toString(),
|
||||
"-inputPath",
|
||||
inputPath,
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/actionSet1",
|
||||
"-workingDir",
|
||||
workingDir.toString() + "/working"
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||
|
||||
JavaRDD<Relation> relations = sc
|
||||
.sequenceFile(workingDir.toString() + "/actionSet1", Text.class, Text.class)
|
||||
.filter(v -> "eu.dnetlib.dhp.schema.oaf.Relation".equalsIgnoreCase(v._1().toString()))
|
||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||
.map(aa -> ((Relation) aa.getPayload()));
|
||||
//
|
||||
JavaRDD<Person> people = sc
|
||||
.sequenceFile(workingDir.toString() + "/actionSet1", Text.class, Text.class)
|
||||
.filter(v -> "eu.dnetlib.dhp.schema.oaf.Person".equalsIgnoreCase(v._1().toString()))
|
||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||
.map(aa -> ((Person) aa.getPayload()));
|
||||
//
|
||||
Assertions.assertEquals(7, people.count());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
"Paulo",
|
||||
people
|
||||
.filter(
|
||||
p -> p.getPid().stream().anyMatch(id -> id.getValue().equalsIgnoreCase("0000-0002-3210-3034")))
|
||||
.first()
|
||||
.getGivenName());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
"Tavares",
|
||||
people
|
||||
.filter(
|
||||
p -> p.getPid().stream().anyMatch(id -> id.getValue().equalsIgnoreCase("0000-0002-3210-3034")))
|
||||
.first()
|
||||
.getFamilyName());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
4,
|
||||
people
|
||||
.filter(
|
||||
p -> p.getPid().stream().anyMatch(id -> id.getValue().equalsIgnoreCase("0000-0002-3210-3034")))
|
||||
.first()
|
||||
.getAlternativeNames()
|
||||
.size());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
4,
|
||||
people
|
||||
.filter(
|
||||
p -> p.getPid().stream().anyMatch(id -> id.getValue().equalsIgnoreCase("0000-0002-3210-3034")))
|
||||
.first()
|
||||
.getPid()
|
||||
.size());
|
||||
Assertions
|
||||
.assertTrue(
|
||||
people
|
||||
.filter(
|
||||
p -> p.getPid().stream().anyMatch(id -> id.getValue().equalsIgnoreCase("0000-0002-3210-3034")))
|
||||
.first()
|
||||
.getPid()
|
||||
.stream()
|
||||
.anyMatch(
|
||||
p -> p.getQualifier().getSchemename().equalsIgnoreCase("Scopus Author ID")
|
||||
&& p.getValue().equalsIgnoreCase("15119405200")));
|
||||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
16,
|
||||
relations
|
||||
.filter(r -> r.getRelClass().equalsIgnoreCase(ModelConstants.RESULT_PERSON_HASAUTHORED))
|
||||
.count());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
14,
|
||||
relations
|
||||
.filter(r -> r.getRelClass().equalsIgnoreCase(ModelConstants.PERSON_PERSON_HASCOAUTHORED))
|
||||
.count());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
3,
|
||||
relations
|
||||
.filter(
|
||||
r -> r.getSource().equalsIgnoreCase("30|orcid_______::" + DHPUtils.md5("0000-0001-6291-9619"))
|
||||
&& r.getRelClass().equalsIgnoreCase(ModelConstants.RESULT_PERSON_HASAUTHORED))
|
||||
.count());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
2,
|
||||
relations
|
||||
.filter(
|
||||
r -> r.getSource().equalsIgnoreCase("30|orcid_______::" + DHPUtils.md5("0000-0001-6291-9619"))
|
||||
&& r.getRelClass().equalsIgnoreCase(ModelConstants.RESULT_PERSON_HASAUTHORED)
|
||||
&& r.getTarget().startsWith("50|doi"))
|
||||
.count());
|
||||
Assertions
|
||||
.assertEquals(
|
||||
1,
|
||||
relations
|
||||
.filter(
|
||||
r -> r.getSource().equalsIgnoreCase("30|orcid_______::" + DHPUtils.md5("0000-0001-6291-9619"))
|
||||
&& r.getRelClass().equalsIgnoreCase(ModelConstants.RESULT_PERSON_HASAUTHORED)
|
||||
&& r.getTarget().startsWith("50|arXiv"))
|
||||
.count());
|
||||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
1,
|
||||
relations
|
||||
.filter(
|
||||
r -> r.getSource().equalsIgnoreCase("30|orcid_______::" + DHPUtils.md5("0000-0001-6291-9619"))
|
||||
&& r.getRelClass().equalsIgnoreCase(ModelConstants.PERSON_PERSON_HASCOAUTHORED))
|
||||
.count());
|
||||
Assertions.assertEquals(33, relations.count());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
package eu.dnetlib.dhp.actionmanager.webcrawl;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
@ -75,7 +76,11 @@ public class CreateASTest {
|
|||
|
||||
String inputPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/")
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/input/")
|
||||
.getPath();
|
||||
String blackListPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/blackList/")
|
||||
.getPath();
|
||||
|
||||
CreateActionSetFromWebEntries
|
||||
|
@ -86,7 +91,8 @@ public class CreateASTest {
|
|||
"-sourcePath",
|
||||
inputPath,
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/actionSet1"
|
||||
workingDir.toString() + "/actionSet1",
|
||||
"-blackListPath", blackListPath
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||
|
@ -96,7 +102,10 @@ public class CreateASTest {
|
|||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||
.map(aa -> ((Relation) aa.getPayload()));
|
||||
|
||||
Assertions.assertEquals(64, tmp.count());
|
||||
tmp.foreach(r -> System.out.println(new ObjectMapper().writeValueAsString(r)));
|
||||
tmp.foreach(r -> assertTrue(r.getSource().startsWith("20|ror") || r.getSource().startsWith("50|doi")));
|
||||
tmp.foreach(r -> assertTrue(r.getTarget().startsWith("20|ror") || r.getTarget().startsWith("50|doi")));
|
||||
Assertions.assertEquals(24, tmp.count());
|
||||
|
||||
}
|
||||
|
||||
|
@ -107,7 +116,11 @@ public class CreateASTest {
|
|||
|
||||
String inputPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/")
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/input/")
|
||||
.getPath();
|
||||
String blackListPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/blackList/")
|
||||
.getPath();
|
||||
|
||||
CreateActionSetFromWebEntries
|
||||
|
@ -118,7 +131,8 @@ public class CreateASTest {
|
|||
"-sourcePath",
|
||||
inputPath,
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/actionSet1"
|
||||
workingDir.toString() + "/actionSet1",
|
||||
"-blackListPath", blackListPath
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||
|
@ -184,7 +198,7 @@ public class CreateASTest {
|
|||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
5, tmp
|
||||
1, tmp
|
||||
.filter(
|
||||
r -> r
|
||||
.getSource()
|
||||
|
@ -197,7 +211,7 @@ public class CreateASTest {
|
|||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
5, tmp
|
||||
1, tmp
|
||||
.filter(
|
||||
r -> r
|
||||
.getTarget()
|
||||
|
@ -210,7 +224,7 @@ public class CreateASTest {
|
|||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
2, tmp
|
||||
1, tmp
|
||||
.filter(
|
||||
r -> r
|
||||
.getTarget()
|
||||
|
@ -218,13 +232,13 @@ public class CreateASTest {
|
|||
"20|ror_________::" + IdentifierFactory
|
||||
.md5(
|
||||
PidCleaner
|
||||
.normalizePidValue(PidType.doi.toString(), "https://ror.org/03265fv13")))
|
||||
.normalizePidValue("ROR", "https://ror.org/03265fv13")))
|
||||
&& r.getSource().startsWith("50|doi"))
|
||||
.count());
|
||||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
2, tmp
|
||||
0, tmp
|
||||
.filter(
|
||||
r -> r
|
||||
.getTarget()
|
||||
|
@ -238,7 +252,7 @@ public class CreateASTest {
|
|||
|
||||
Assertions
|
||||
.assertEquals(
|
||||
1, tmp
|
||||
0, tmp
|
||||
.filter(
|
||||
r -> r
|
||||
.getTarget()
|
||||
|
@ -258,6 +272,10 @@ public class CreateASTest {
|
|||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl")
|
||||
.getPath();
|
||||
String blackListPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/blackList/")
|
||||
.getPath();
|
||||
|
||||
CreateActionSetFromWebEntries
|
||||
.main(
|
||||
|
@ -267,7 +285,8 @@ public class CreateASTest {
|
|||
"-sourcePath",
|
||||
inputPath,
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/actionSet1"
|
||||
workingDir.toString() + "/actionSet1",
|
||||
"-blackListPath", blackListPath
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
|
||||
package eu.dnetlib.dhp.actionmanager.webcrawl;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.sql.SparkSession;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidCleaner;
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.PidType;
|
||||
|
||||
/**
|
||||
* @author miriam.baglioni
|
||||
* @Date 22/04/24
|
||||
*/
|
||||
public class RemoveFromASTest {
|
||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||
|
||||
private static SparkSession spark;
|
||||
|
||||
private static Path workingDir;
|
||||
private static final Logger log = LoggerFactory
|
||||
.getLogger(RemoveFromASTest.class);
|
||||
|
||||
@BeforeAll
|
||||
public static void beforeAll() throws IOException {
|
||||
workingDir = Files
|
||||
.createTempDirectory(RemoveFromASTest.class.getSimpleName());
|
||||
log.info("using work dir {}", workingDir);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
conf.setAppName(RemoveFromASTest.class.getSimpleName());
|
||||
|
||||
conf.setMaster("local[*]");
|
||||
conf.set("spark.driver.host", "localhost");
|
||||
conf.set("hive.metastore.local", "true");
|
||||
conf.set("spark.ui.enabled", "false");
|
||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
||||
|
||||
spark = SparkSession
|
||||
.builder()
|
||||
.appName(RemoveFromASTest.class.getSimpleName())
|
||||
.config(conf)
|
||||
.getOrCreate();
|
||||
}
|
||||
|
||||
@AfterAll
|
||||
public static void afterAll() throws IOException {
|
||||
FileUtils.deleteDirectory(workingDir.toFile());
|
||||
spark.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
void testNumberofRelations() throws Exception {
|
||||
|
||||
String inputPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/actionSet/")
|
||||
.getPath();
|
||||
String blackListPath = getClass()
|
||||
.getResource(
|
||||
"/eu/dnetlib/dhp/actionmanager/webcrawl/blackListRemove/")
|
||||
.getPath();
|
||||
|
||||
RemoveRelationFromActionSet
|
||||
.main(
|
||||
new String[] {
|
||||
"-isSparkSessionManaged",
|
||||
Boolean.FALSE.toString(),
|
||||
"-sourcePath",
|
||||
inputPath,
|
||||
"-outputPath",
|
||||
workingDir.toString() + "/actionSet1",
|
||||
"-blackListPath", blackListPath
|
||||
});
|
||||
|
||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||
|
||||
JavaRDD<Relation> tmp = sc
|
||||
.sequenceFile(workingDir.toString() + "/actionSet1", Text.class, Text.class)
|
||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||
.map(aa -> ((Relation) aa.getPayload()));
|
||||
|
||||
Assertions.assertEquals(22, tmp.count());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.file;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
public class FileGZipMultipleNodeTest {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(FileGZipCollectorPluginTest.class);
|
||||
|
||||
private final ApiDescriptor api = new ApiDescriptor();
|
||||
|
||||
private FileGZipCollectorPlugin plugin;
|
||||
|
||||
private static final String SPLIT_ON_ELEMENT = "incollection,article";
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
|
||||
final String gzipFile = Objects
|
||||
.requireNonNull(
|
||||
this
|
||||
.getClass()
|
||||
.getResource("/eu/dnetlib/dhp/collection/plugin/file/dblp.gz"))
|
||||
.getFile();
|
||||
|
||||
api.setBaseUrl(gzipFile);
|
||||
|
||||
HashMap<String, String> params = new HashMap<>();
|
||||
params.put("splitOnElement", SPLIT_ON_ELEMENT);
|
||||
|
||||
api.setParams(params);
|
||||
|
||||
FileSystem fs = FileSystem.get(new Configuration());
|
||||
plugin = new FileGZipCollectorPlugin(fs);
|
||||
}
|
||||
|
||||
@Test
|
||||
void test() throws CollectorException {
|
||||
|
||||
final Stream<String> stream = plugin.collect(api, new AggregatorReport());
|
||||
|
||||
stream.limit(10).forEach(s -> {
|
||||
Assertions.assertTrue(s.length() > 0);
|
||||
log.info(s);
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
|
||||
package eu.dnetlib.dhp.collection.plugin.researchfi;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.dom4j.DocumentException;
|
||||
import org.dom4j.DocumentHelper;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import eu.dnetlib.dhp.collection.ApiDescriptor;
|
||||
import eu.dnetlib.dhp.common.aggregation.AggregatorReport;
|
||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
||||
|
||||
public class ResearchFiCollectorPluginTest {
|
||||
|
||||
private final ResearchFiCollectorPlugin plugin = new ResearchFiCollectorPlugin();
|
||||
|
||||
@Test
|
||||
@Disabled
|
||||
void testCollect() throws CollectorException {
|
||||
final ApiDescriptor api = new ApiDescriptor();
|
||||
api.setBaseUrl("https://research.fi/api/rest/v1/funding-decisions?FunderName=AKA&FundingStartYearFrom=2022");
|
||||
api.setProtocol("research_fi");
|
||||
api
|
||||
.getParams()
|
||||
.put("auth_url", "https://researchfi-auth.2.rahtiapp.fi/realms/publicapi/protocol/openid-connect/token");
|
||||
api.getParams().put("auth_client_id", "");
|
||||
api.getParams().put("auth_client_secret", "");
|
||||
|
||||
final AtomicLong count = new AtomicLong(0);
|
||||
final Set<String> ids = new HashSet<>();
|
||||
|
||||
this.plugin.collect(api, new AggregatorReport()).forEach(s -> {
|
||||
|
||||
if (count.getAndIncrement() == 0) {
|
||||
System.out.println("First: " + s);
|
||||
}
|
||||
|
||||
try {
|
||||
final String id = DocumentHelper.parseText(s).valueOf("/recordWrap/funderProjectNumber");
|
||||
if (ids.contains(id)) {
|
||||
System.out.println("Id already present: " + id);
|
||||
}
|
||||
ids.add(id);
|
||||
} catch (final DocumentException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
System.out.println("Total records: " + count);
|
||||
System.out.println("Total identifiers: " + ids.size());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -3,6 +3,7 @@ package eu.dnetlib.dhp.collection.plugin.rest;
|
|||
|
||||
import java.util.HashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
@ -35,11 +36,11 @@ public class OsfPreprintCollectorTest {
|
|||
private final String resultTotalXpath = "/*/*[local-name()='links']/*[local-name()='meta']/*[local-name()='total']";
|
||||
|
||||
private final String resumptionParam = "page";
|
||||
private final String resumptionType = "page";
|
||||
private final String resumptionXpath = "/*/*[local-name()='links']/*[local-name()='next']";
|
||||
private final String resumptionType = "scan";
|
||||
private final String resumptionXpath = "substring-before(substring-after(/*/*[local-name()='links']/*[local-name()='next'], 'page='), '&')";
|
||||
|
||||
private final String resultSizeParam = "";
|
||||
private final String resultSizeValue = "";
|
||||
private final String resultSizeParam = "page[size]";
|
||||
private final String resultSizeValue = "100";
|
||||
|
||||
private final String resultFormatParam = "format";
|
||||
private final String resultFormatValue = "json";
|
||||
|
@ -69,11 +70,11 @@ public class OsfPreprintCollectorTest {
|
|||
|
||||
@Test
|
||||
@Disabled
|
||||
void test() throws CollectorException {
|
||||
void test_limited() throws CollectorException {
|
||||
final AtomicInteger i = new AtomicInteger(0);
|
||||
final Stream<String> stream = this.rcp.collect(this.api, new AggregatorReport());
|
||||
|
||||
stream.limit(200).forEach(s -> {
|
||||
stream.limit(2000).forEach(s -> {
|
||||
Assertions.assertTrue(s.length() > 0);
|
||||
i.incrementAndGet();
|
||||
log.info(s);
|
||||
|
@ -82,4 +83,23 @@ public class OsfPreprintCollectorTest {
|
|||
log.info("{}", i.intValue());
|
||||
Assertions.assertTrue(i.intValue() > 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled
|
||||
void test_all() throws CollectorException {
|
||||
final AtomicLong i = new AtomicLong(0);
|
||||
final Stream<String> stream = this.rcp.collect(this.api, new AggregatorReport());
|
||||
|
||||
stream.forEach(s -> {
|
||||
Assertions.assertTrue(s.length() > 0);
|
||||
if ((i.incrementAndGet() % 1000) == 0) {
|
||||
log.info("COLLECTED: {}", i.get());
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
log.info("TOTAL: {}", i.get());
|
||||
Assertions.assertTrue(i.get() > 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue