Compare commits
272 Commits
oaf_countr
...
master
Author | SHA1 | Date |
---|---|---|
Michele De Bonis | 70b6e58914 | |
Michele Artini | 4752d60421 | |
Claudio Atzori | 795e1b2629 | |
Claudio Atzori | 0c05abe50b | |
Claudio Atzori | 8fdd0244ad | |
Claudio Atzori | 18fdaaf548 | |
Claudio Atzori | 43e123c624 | |
Claudio Atzori | 62a07b7add | |
Claudio Atzori | 96bddcc921 | |
Miriam Baglioni | 0486cea4c4 | |
Claudio Atzori | 013935c593 | |
Lampros Smyrnaios | d7da4f814b | |
Lampros Smyrnaios | 14719dcd62 | |
Lampros Smyrnaios | 22745027c8 | |
Lampros Smyrnaios | abf0b69f29 | |
Claudio Atzori | 6132bd028e | |
Miriam Baglioni | 519db1ddef | |
Claudio Atzori | 5add51f38c | |
Lampros Smyrnaios | b7c8acc563 | |
Antonis Lempesis | df6e3bda04 | |
Antonis Lempesis | 573b081f1d | |
Antonis Lempesis | 0bf2a7a359 | |
Claudio Atzori | f01390702e | |
Antonis Lempesis | 9ff44eed96 | |
Claudio Atzori | 5592ccc37a | |
Antonis Lempesis | 1fee4124e0 | |
Claudio Atzori | d16c15da8d | |
Lampros Smyrnaios | 036ba03fcd | |
Claudio Atzori | 09a6d17059 | |
Claudio Atzori | d70793847d | |
Lampros Smyrnaios | bc8c97182d | |
Lampros Smyrnaios | 92cc27e7eb | |
Michele De Bonis | f6601ea7d1 | |
Michele De Bonis | cd4c3c934d | |
Antonis Lempesis | 4c40c96e30 | |
Antonis Lempesis | 459167ac2f | |
Antonis Lempesis | 07f634a46d | |
Antonis Lempesis | 9521625a07 | |
Antonis Lempesis | 67a5aa0a38 | |
dimitrispie | a3a570e9a0 | |
Michele Artini | 7a934affd8 | |
Michele Artini | a99942f7cf | |
Michele Artini | 7f7083f53e | |
Michele Artini | d9b23a76c5 | |
Michele Artini | 841ca92246 | |
Michele Artini | 3bcfc40293 | |
Antonis Lempesis | f74c7e8689 | |
Antonis Lempesis | 3c79720342 | |
Antonis Lempesis | 5ae4b4286c | |
Antonis Lempesis | 316d585c8a | |
Giambattista Bloisi | 3067ea390d | |
Miriam Baglioni | c94d94035c | |
Michele Artini | 4374d7449e | |
Claudio Atzori | 07d009007b | |
Claudio Atzori | 071d044971 | |
Claudio Atzori | b3ddbaed58 | |
Claudio Atzori | 1416f16b35 | |
Giambattista Bloisi | ba1a0e7b4f | |
Giambattista Bloisi | 079085286c | |
Giambattista Bloisi | 8dd666aedd | |
Claudio Atzori | f21133229a | |
Claudio Atzori | d86b909db2 | |
Claudio Atzori | 08162902ab | |
Antonis Lempesis | dd4c27f4f3 | |
Claudio Atzori | e8630a6d03 | |
Claudio Atzori | f28c63d5ef | |
Antonis Lempesis | a512ead447 | |
Claudio Atzori | 1a8b609ed2 | |
Antonis Lempesis | bb10a22290 | |
Miriam Baglioni | 4c8706efee | |
Claudio Atzori | 4d0c59669b | |
Sandro La Bruzzo | 3c8c88bdd3 | |
Antonis Lempesis | c548796463 | |
Antonis Lempesis | fd43b0e84a | |
Antonis Lempesis | e024718f73 | |
dimitrispie | b920307bdd | |
dimitrispie | 8b2cbb611e | |
Antonis Lempesis | 2e4cab026c | |
dimitrispie | 6b823100ae | |
dimitrispie | 75bfde043c | |
dimitrispie | ffdd03d2f4 | |
dimitrispie | 40b98d8182 | |
Claudio Atzori | 106968adaa | |
Claudio Atzori | a8a4db96f0 | |
Sandro La Bruzzo | 37e36baf76 | |
Sandro La Bruzzo | 9d39845d1f | |
Sandro La Bruzzo | 1fbd4325f5 | |
Sandro La Bruzzo | 1f1a6a5f5f | |
Claudio Atzori | c4ec35b6cd | |
Claudio Atzori | 1726f49790 | |
Claudio Atzori | 1763d377ad | |
Claudio Atzori | a0311e8a90 | |
Claudio Atzori | 8fb05888fd | |
Claudio Atzori | 2b626815ff | |
Miriam Baglioni | b177cd5a0a | |
Serafeim Chatzopoulos | 671ba8a5a7 | |
Claudio Atzori | 5f1ed61c1f | |
Claudio Atzori | 8c03c41d5d | |
Claudio Atzori | 97454e9594 | |
Serafeim Chatzopoulos | 7e34dde774 | |
Serafeim Chatzopoulos | 24c3f92d87 | |
Serafeim Chatzopoulos | 6ce9b600c1 | |
Serafeim Chatzopoulos | 94089878fd | |
Miriam Baglioni | 0097f4e64b | |
Miriam Baglioni | 5c5a195e97 | |
Miriam Baglioni | 70b78a40c7 | |
Miriam Baglioni | f206ff42d6 | |
Miriam Baglioni | 34358afe75 | |
Miriam Baglioni | 18bfff8af3 | |
Miriam Baglioni | 69dac91659 | |
Miriam Baglioni | a9ede1e989 | |
Claudio Atzori | 242d647146 | |
Claudio Atzori | af3ffad6c4 | |
Claudio Atzori | ba5475ed4c | |
Giambattista Bloisi | 2c235e82ad | |
Claudio Atzori | 4ac06c9e37 | |
Claudio Atzori | fa692b3629 | |
Claudio Atzori | ef02648399 | |
Claudio Atzori | d13bb534f0 | |
Giambattista Bloisi | 775c3f704a | |
Sandro La Bruzzo | 9c3ab11d5b | |
Sandro La Bruzzo | 423ef30676 | |
Giambattista Bloisi | 7152d47f84 | |
Claudio Atzori | 4853c19b5e | |
Giambattista Bloisi | 1f226d1dce | |
Alessia Bardi | 6186cdc2cc | |
Alessia Bardi | d94b9bebf7 | |
Alessia Bardi | 19abba8fa7 | |
Claudio Atzori | c2f179800c | |
Serafeim Chatzopoulos | 2aed5a74be | |
Claudio Atzori | 4dc4862011 | |
Claudio Atzori | dc80ab14d3 | |
Alessia Bardi | 77a2199837 | |
Claudio Atzori | 265180bfd2 | |
Claudio Atzori | da0e9828f7 | |
Miriam Baglioni | 599828ce35 | |
Claudio Atzori | 0bc74e2000 | |
Claudio Atzori | 7180911ded | |
Claudio Atzori | da1727f93f | |
Claudio Atzori | ccac6a7f75 | |
Claudio Atzori | d512df8612 | |
Claudio Atzori | 59764145bb | |
Miriam Baglioni | 9e8e39f78a | |
Claudio Atzori | 373a5f2c83 | |
Claudio Atzori | 8af129b0c7 | |
dimitrispie | 706092bc19 | |
dimitrispie | aedd279f78 | |
Miriam Baglioni | 8dcd028eed | |
Miriam Baglioni | 8621377917 | |
Miriam Baglioni | ef2dd7a980 | |
Claudio Atzori | f3a85e224b | |
Claudio Atzori | 4ef0f2ec26 | |
Claudio Atzori | 288ec0b7d6 | |
Claudio Atzori | 5f32edd9bf | |
Claudio Atzori | e10ce92fe5 | |
Claudio Atzori | b93e1541aa | |
Claudio Atzori | d029bf0b94 | |
Michele Artini | 009d7f312f | |
Miriam Baglioni | e4b27182d0 | |
Giambattista Bloisi | 758e662ab8 | |
Giambattista Bloisi | 485f9d18cb | |
Michele Artini | a92206dab5 | |
Miriam Baglioni | d9506035e4 | |
Alessia Bardi | 118e72d7db | |
Alessia Bardi | 5befd93d7d | |
Michele Artini | cae92cf811 | |
Miriam Baglioni | b64a5eb4a5 | |
Claudio Atzori | 654ffcba60 | |
Claudio Atzori | db625e548d | |
Alessia Bardi | 04141fe259 | |
Alessia Bardi | b88f009d9f | |
Alessia Bardi | 5ffe82ffd8 | |
Alessia Bardi | 1c173642f0 | |
Alessia Bardi | 382f46a8e4 | |
Miriam Baglioni | 9fc8ebe98b | |
Miriam Baglioni | 24c41806ac | |
Miriam Baglioni | 087b5a7973 | |
Claudio Atzori | 688e3b7936 | |
Claudio Atzori | 2e465915b4 | |
Claudio Atzori | 4a4ca634f0 | |
Miriam Baglioni | c6a7602b3e | |
Miriam Baglioni | 831055a1fc | |
Miriam Baglioni | cf3d0f4f83 | |
Claudio Atzori | 4f67225fbc | |
Claudio Atzori | e093f04874 | |
Miriam Baglioni | c5a9f39141 | |
Miriam Baglioni | ecc05fe0f3 | |
Claudio Atzori | 42442ccd39 | |
Miriam Baglioni | 9a9cc6a1dd | |
Michele Artini | 200098b683 | |
Michele Artini | 9c1df15071 | |
Miriam Baglioni | 32870339f5 | |
Miriam Baglioni | 7184cc0804 | |
Miriam Baglioni | 7473093c84 | |
Miriam Baglioni | 5f0906be60 | |
Claudio Atzori | 1b37516578 | |
Claudio Atzori | c1e2460293 | |
Claudio Atzori | 3800361033 | |
Michele Artini | 699736addc | |
Claudio Atzori | f86e19b282 | |
Michele Artini | d40e20f437 | |
Michele Artini | 4953ae5649 | |
Miriam Baglioni | c60d3a2b46 | |
Claudio Atzori | 7becdaf31d | |
Miriam Baglioni | b713132db7 | |
Miriam Baglioni | 11f2b470d3 | |
Sandro La Bruzzo | 91c70b15a5 | |
Claudio Atzori | f910b7379d | |
Claudio Atzori | 33bdad104e | |
Claudio Atzori | 5816ded93f | |
Claudio Atzori | 46972f8393 | |
Claudio Atzori | da85ca697d | |
Miriam Baglioni | 059e100ec7 | |
Miriam Baglioni | fc95a550c3 | |
Miriam Baglioni | 6901ac91b1 | |
Claudio Atzori | 08c4588d47 | |
Miriam Baglioni | 29d3da85f1 | |
Miriam Baglioni | 33a2b1b5dc | |
Miriam Baglioni | c6df8327b3 | |
Miriam Baglioni | 935aa367d8 | |
Miriam Baglioni | 43aedbdfe5 | |
Miriam Baglioni | b6da9b67ff | |
Claudio Atzori | a34c8b6f81 | |
Miriam Baglioni | 122e75aa17 | |
Miriam Baglioni | cee7a45b1d | |
Claudio Atzori | ed64618235 | |
Claudio Atzori | 8742934843 | |
Claudio Atzori | 13cc592f39 | |
Claudio Atzori | af15b1e48d | |
Claudio Atzori | eb45ba7af0 | |
Claudio Atzori | a929dc5fee | |
Miriam Baglioni | 5f9383b2d9 | |
Miriam Baglioni | b18bbca8af | |
dimitrispie | 55fa3b2a17 | |
Claudio Atzori | 80c5e0f637 | |
Claudio Atzori | c01d528ab2 | |
Claudio Atzori | e6d788d27a | |
Claudio Atzori | 930f118673 | |
Claudio Atzori | b2c3071e72 | |
Claudio Atzori | 10ec074f79 | |
Claudio Atzori | 7225fe9cbe | |
Miriam Baglioni | 869e129288 | |
Miriam Baglioni | 840465958b | |
Claudio Atzori | bdc8f993d0 | |
Miriam Baglioni | ec87149cb3 | |
Miriam Baglioni | b42e2c9df6 | |
Miriam Baglioni | 1329aa8479 | |
Miriam Baglioni | a0ee1a8640 | |
Claudio Atzori | 96062164f9 | |
Claudio Atzori | 35bb7c423f | |
Claudio Atzori | fd87571506 | |
Claudio Atzori | c527112e33 | |
Claudio Atzori | 65209359bc | |
Claudio Atzori | d72a64ded3 | |
Claudio Atzori | 3e8499ce47 | |
Claudio Atzori | 61aacb3271 | |
Claudio Atzori | dbb567251a | |
Claudio Atzori | c7e8ad853e | |
Claudio Atzori | 0849ebfd80 | |
Claudio Atzori | 281239249e | |
Claudio Atzori | 45fc5e12be | |
Claudio Atzori | 1c05aaaa2e | |
Claudio Atzori | 01d5ad6361 | |
Claudio Atzori | d872d1cdd9 | |
Claudio Atzori | ab0efecab4 | |
Claudio Atzori | 725c3c68d0 | |
Claudio Atzori | 300ae6221c | |
Claudio Atzori | 0ec2eaba35 | |
Claudio Atzori | a387807d43 | |
Claudio Atzori | 2abe2bc137 | |
Claudio Atzori | a07c876922 | |
Claudio Atzori | cbd48bc645 |
|
@ -1,53 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
|
|
||||||
import okhttp3.MediaType;
|
|
||||||
import okhttp3.RequestBody;
|
|
||||||
import okhttp3.internal.Util;
|
|
||||||
import okio.BufferedSink;
|
|
||||||
import okio.Okio;
|
|
||||||
import okio.Source;
|
|
||||||
|
|
||||||
public class InputStreamRequestBody extends RequestBody {
|
|
||||||
|
|
||||||
private final InputStream inputStream;
|
|
||||||
private final MediaType mediaType;
|
|
||||||
private final long lenght;
|
|
||||||
|
|
||||||
public static RequestBody create(final MediaType mediaType, final InputStream inputStream, final long len) {
|
|
||||||
|
|
||||||
return new InputStreamRequestBody(inputStream, mediaType, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
private InputStreamRequestBody(InputStream inputStream, MediaType mediaType, long len) {
|
|
||||||
this.inputStream = inputStream;
|
|
||||||
this.mediaType = mediaType;
|
|
||||||
this.lenght = len;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MediaType contentType() {
|
|
||||||
return mediaType;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long contentLength() {
|
|
||||||
|
|
||||||
return lenght;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(BufferedSink sink) throws IOException {
|
|
||||||
Source source = null;
|
|
||||||
try {
|
|
||||||
source = Okio.source(inputStream);
|
|
||||||
sink.writeAll(source);
|
|
||||||
} finally {
|
|
||||||
Util.closeQuietly(source);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api;
|
|
||||||
|
|
||||||
public class MissingConceptDoiException extends Throwable {
|
|
||||||
public MissingConceptDoiException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,365 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api;
|
|
||||||
|
|
||||||
import java.io.*;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.HttpURLConnection;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.http.HttpHeaders;
|
|
||||||
import org.apache.http.entity.ContentType;
|
|
||||||
import org.jetbrains.annotations.NotNull;
|
|
||||||
|
|
||||||
import com.google.gson.Gson;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModel;
|
|
||||||
import eu.dnetlib.dhp.common.api.zenodo.ZenodoModelList;
|
|
||||||
import okhttp3.*;
|
|
||||||
|
|
||||||
public class ZenodoAPIClient implements Serializable {
|
|
||||||
|
|
||||||
String urlString;
|
|
||||||
String bucket;
|
|
||||||
|
|
||||||
String deposition_id;
|
|
||||||
String access_token;
|
|
||||||
|
|
||||||
public static final MediaType MEDIA_TYPE_JSON = MediaType.parse("application/json; charset=utf-8");
|
|
||||||
|
|
||||||
private static final MediaType MEDIA_TYPE_ZIP = MediaType.parse("application/zip");
|
|
||||||
|
|
||||||
public String getUrlString() {
|
|
||||||
return urlString;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setUrlString(String urlString) {
|
|
||||||
this.urlString = urlString;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getBucket() {
|
|
||||||
return bucket;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setBucket(String bucket) {
|
|
||||||
this.bucket = bucket;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDeposition_id(String deposition_id) {
|
|
||||||
this.deposition_id = deposition_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ZenodoAPIClient(String urlString, String access_token) {
|
|
||||||
|
|
||||||
this.urlString = urlString;
|
|
||||||
this.access_token = access_token;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Brand new deposition in Zenodo. It sets the deposition_id and the bucket where to store the files to upload
|
|
||||||
*
|
|
||||||
* @return response code
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public int newDeposition() throws IOException {
|
|
||||||
String json = "{}";
|
|
||||||
|
|
||||||
URL url = new URL(urlString);
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.toString());
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setRequestMethod("POST");
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
try (OutputStream os = conn.getOutputStream()) {
|
|
||||||
byte[] input = json.getBytes("utf-8");
|
|
||||||
os.write(input, 0, input.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
String body = getBody(conn);
|
|
||||||
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
conn.disconnect();
|
|
||||||
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + body);
|
|
||||||
|
|
||||||
ZenodoModel newSubmission = new Gson().fromJson(body, ZenodoModel.class);
|
|
||||||
this.bucket = newSubmission.getLinks().getBucket();
|
|
||||||
this.deposition_id = newSubmission.getId();
|
|
||||||
|
|
||||||
return responseCode;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Upload files in Zenodo.
|
|
||||||
*
|
|
||||||
* @param is the inputStream for the file to upload
|
|
||||||
* @param file_name the name of the file as it will appear on Zenodo
|
|
||||||
* @return the response code
|
|
||||||
*/
|
|
||||||
public int uploadIS(InputStream is, String file_name) throws IOException {
|
|
||||||
|
|
||||||
URL url = new URL(bucket + "/" + file_name);
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, "application/zip");
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
conn.setRequestMethod("PUT");
|
|
||||||
|
|
||||||
byte[] buf = new byte[8192];
|
|
||||||
int length;
|
|
||||||
try (OutputStream os = conn.getOutputStream()) {
|
|
||||||
while ((length = is.read(buf)) != -1) {
|
|
||||||
os.write(buf, 0, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
if (!checkOKStatus(responseCode)) {
|
|
||||||
throw new IOException("Unexpected code " + responseCode + getBody(conn));
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseCode;
|
|
||||||
}
|
|
||||||
|
|
||||||
@NotNull
|
|
||||||
private String getBody(HttpURLConnection conn) throws IOException {
|
|
||||||
String body = "{}";
|
|
||||||
try (BufferedReader br = new BufferedReader(
|
|
||||||
new InputStreamReader(conn.getInputStream(), "utf-8"))) {
|
|
||||||
StringBuilder response = new StringBuilder();
|
|
||||||
String responseLine = null;
|
|
||||||
while ((responseLine = br.readLine()) != null) {
|
|
||||||
response.append(responseLine.trim());
|
|
||||||
}
|
|
||||||
|
|
||||||
body = response.toString();
|
|
||||||
|
|
||||||
}
|
|
||||||
return body;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Associates metadata information to the current deposition
|
|
||||||
*
|
|
||||||
* @param metadata the metadata
|
|
||||||
* @return response code
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public int sendMretadata(String metadata) throws IOException {
|
|
||||||
|
|
||||||
URL url = new URL(urlString + "/" + deposition_id);
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.toString());
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
conn.setRequestMethod("PUT");
|
|
||||||
|
|
||||||
try (OutputStream os = conn.getOutputStream()) {
|
|
||||||
byte[] input = metadata.getBytes("utf-8");
|
|
||||||
os.write(input, 0, input.length);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
final int responseCode = conn.getResponseCode();
|
|
||||||
conn.disconnect();
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + getBody(conn));
|
|
||||||
|
|
||||||
return responseCode;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean checkOKStatus(int responseCode) {
|
|
||||||
|
|
||||||
if (HttpURLConnection.HTTP_OK != responseCode ||
|
|
||||||
HttpURLConnection.HTTP_CREATED != responseCode)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* To publish the current deposition. It works for both new deposition or new version of an old deposition
|
|
||||||
*
|
|
||||||
* @return response code
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int publish() throws IOException {
|
|
||||||
|
|
||||||
String json = "{}";
|
|
||||||
|
|
||||||
OkHttpClient httpClient = new OkHttpClient.Builder().connectTimeout(600, TimeUnit.SECONDS).build();
|
|
||||||
|
|
||||||
RequestBody body = RequestBody.create(json, MEDIA_TYPE_JSON);
|
|
||||||
|
|
||||||
Request request = new Request.Builder()
|
|
||||||
.url(urlString + "/" + deposition_id + "/actions/publish")
|
|
||||||
.addHeader("Authorization", "Bearer " + access_token)
|
|
||||||
.post(body)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try (Response response = httpClient.newCall(request).execute()) {
|
|
||||||
|
|
||||||
if (!response.isSuccessful())
|
|
||||||
throw new IOException("Unexpected code " + response + response.body().string());
|
|
||||||
|
|
||||||
return response.code();
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* To create a new version of an already published deposition. It sets the deposition_id and the bucket to be used
|
|
||||||
* for the new version.
|
|
||||||
*
|
|
||||||
* @param concept_rec_id the concept record id of the deposition for which to create a new version. It is the last
|
|
||||||
* part of the url for the DOI Zenodo suggests to use to cite all versions: DOI: 10.xxx/zenodo.656930
|
|
||||||
* concept_rec_id = 656930
|
|
||||||
* @return response code
|
|
||||||
* @throws IOException
|
|
||||||
* @throws MissingConceptDoiException
|
|
||||||
*/
|
|
||||||
public int newVersion(String concept_rec_id) throws IOException, MissingConceptDoiException {
|
|
||||||
setDepositionId(concept_rec_id, 1);
|
|
||||||
String json = "{}";
|
|
||||||
|
|
||||||
URL url = new URL(urlString + "/" + deposition_id + "/actions/newversion");
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
conn.setRequestMethod("POST");
|
|
||||||
|
|
||||||
try (OutputStream os = conn.getOutputStream()) {
|
|
||||||
byte[] input = json.getBytes("utf-8");
|
|
||||||
os.write(input, 0, input.length);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
String body = getBody(conn);
|
|
||||||
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
|
|
||||||
conn.disconnect();
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + body);
|
|
||||||
|
|
||||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
|
||||||
String latest_draft = zenodoModel.getLinks().getLatest_draft();
|
|
||||||
deposition_id = latest_draft.substring(latest_draft.lastIndexOf("/") + 1);
|
|
||||||
bucket = getBucket(latest_draft);
|
|
||||||
|
|
||||||
return responseCode;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* To finish uploading a version or new deposition not published
|
|
||||||
* It sets the deposition_id and the bucket to be used
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* @param deposition_id the deposition id of the not yet published upload
|
|
||||||
* concept_rec_id = 656930
|
|
||||||
* @return response code
|
|
||||||
* @throws IOException
|
|
||||||
* @throws MissingConceptDoiException
|
|
||||||
*/
|
|
||||||
public int uploadOpenDeposition(String deposition_id) throws IOException, MissingConceptDoiException {
|
|
||||||
|
|
||||||
this.deposition_id = deposition_id;
|
|
||||||
|
|
||||||
String json = "{}";
|
|
||||||
|
|
||||||
URL url = new URL(urlString + "/" + deposition_id);
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setRequestMethod("POST");
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
try (OutputStream os = conn.getOutputStream()) {
|
|
||||||
byte[] input = json.getBytes("utf-8");
|
|
||||||
os.write(input, 0, input.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
String body = getBody(conn);
|
|
||||||
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
conn.disconnect();
|
|
||||||
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + body);
|
|
||||||
|
|
||||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
|
||||||
bucket = zenodoModel.getLinks().getBucket();
|
|
||||||
|
|
||||||
return responseCode;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setDepositionId(String concept_rec_id, Integer page) throws IOException, MissingConceptDoiException {
|
|
||||||
|
|
||||||
ZenodoModelList zenodoModelList = new Gson()
|
|
||||||
.fromJson(getPrevDepositions(String.valueOf(page)), ZenodoModelList.class);
|
|
||||||
|
|
||||||
for (ZenodoModel zm : zenodoModelList) {
|
|
||||||
if (zm.getConceptrecid().equals(concept_rec_id)) {
|
|
||||||
deposition_id = zm.getId();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (zenodoModelList.size() == 0)
|
|
||||||
throw new MissingConceptDoiException(
|
|
||||||
"The concept record id specified was missing in the list of depositions");
|
|
||||||
setDepositionId(concept_rec_id, page + 1);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getPrevDepositions(String page) throws IOException {
|
|
||||||
|
|
||||||
HttpUrl.Builder urlBuilder = HttpUrl.parse(urlString).newBuilder();
|
|
||||||
urlBuilder.addQueryParameter("page", page);
|
|
||||||
|
|
||||||
URL url = new URL(urlBuilder.build().toString());
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.toString());
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
conn.setRequestMethod("GET");
|
|
||||||
|
|
||||||
String body = getBody(conn);
|
|
||||||
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
|
|
||||||
conn.disconnect();
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + body);
|
|
||||||
|
|
||||||
return body;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getBucket(String inputUurl) throws IOException {
|
|
||||||
|
|
||||||
URL url = new URL(inputUurl);
|
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
||||||
conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.toString());
|
|
||||||
conn.setRequestProperty(HttpHeaders.AUTHORIZATION, "Bearer " + access_token);
|
|
||||||
conn.setDoOutput(true);
|
|
||||||
conn.setRequestMethod("GET");
|
|
||||||
|
|
||||||
String body = getBody(conn);
|
|
||||||
|
|
||||||
int responseCode = conn.getResponseCode();
|
|
||||||
|
|
||||||
conn.disconnect();
|
|
||||||
if (!checkOKStatus(responseCode))
|
|
||||||
throw new IOException("Unexpected code " + responseCode + body);
|
|
||||||
|
|
||||||
ZenodoModel zenodoModel = new Gson().fromJson(body, ZenodoModel.class);
|
|
||||||
|
|
||||||
return zenodoModel.getLinks().getBucket();
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
public class Community {
|
|
||||||
private String identifier;
|
|
||||||
|
|
||||||
public String getIdentifier() {
|
|
||||||
return identifier;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setIdentifier(String identifier) {
|
|
||||||
this.identifier = identifier;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
public class Creator {
|
|
||||||
private String affiliation;
|
|
||||||
private String name;
|
|
||||||
private String orcid;
|
|
||||||
|
|
||||||
public String getAffiliation() {
|
|
||||||
return affiliation;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setAffiliation(String affiliation) {
|
|
||||||
this.affiliation = affiliation;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getName() {
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setName(String name) {
|
|
||||||
this.name = name;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getOrcid() {
|
|
||||||
return orcid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setOrcid(String orcid) {
|
|
||||||
this.orcid = orcid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Creator newInstance(String name, String affiliation, String orcid) {
|
|
||||||
Creator c = new Creator();
|
|
||||||
if (name != null) {
|
|
||||||
c.name = name;
|
|
||||||
}
|
|
||||||
if (affiliation != null) {
|
|
||||||
c.affiliation = affiliation;
|
|
||||||
}
|
|
||||||
if (orcid != null) {
|
|
||||||
c.orcid = orcid;
|
|
||||||
}
|
|
||||||
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class File implements Serializable {
|
|
||||||
private String checksum;
|
|
||||||
private String filename;
|
|
||||||
private long filesize;
|
|
||||||
private String id;
|
|
||||||
|
|
||||||
public String getChecksum() {
|
|
||||||
return checksum;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setChecksum(String checksum) {
|
|
||||||
this.checksum = checksum;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getFilename() {
|
|
||||||
return filename;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setFilename(String filename) {
|
|
||||||
this.filename = filename;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getFilesize() {
|
|
||||||
return filesize;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setFilesize(long filesize) {
|
|
||||||
this.filesize = filesize;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getId() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setId(String id) {
|
|
||||||
this.id = id;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class Grant implements Serializable {
|
|
||||||
private String id;
|
|
||||||
|
|
||||||
public String getId() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setId(String id) {
|
|
||||||
this.id = id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Grant newInstance(String id) {
|
|
||||||
Grant g = new Grant();
|
|
||||||
g.id = id;
|
|
||||||
|
|
||||||
return g;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,92 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class Links implements Serializable {
|
|
||||||
|
|
||||||
private String bucket;
|
|
||||||
|
|
||||||
private String discard;
|
|
||||||
|
|
||||||
private String edit;
|
|
||||||
private String files;
|
|
||||||
private String html;
|
|
||||||
private String latest_draft;
|
|
||||||
private String latest_draft_html;
|
|
||||||
private String publish;
|
|
||||||
|
|
||||||
private String self;
|
|
||||||
|
|
||||||
public String getBucket() {
|
|
||||||
return bucket;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setBucket(String bucket) {
|
|
||||||
this.bucket = bucket;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getDiscard() {
|
|
||||||
return discard;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDiscard(String discard) {
|
|
||||||
this.discard = discard;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getEdit() {
|
|
||||||
return edit;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setEdit(String edit) {
|
|
||||||
this.edit = edit;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getFiles() {
|
|
||||||
return files;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setFiles(String files) {
|
|
||||||
this.files = files;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getHtml() {
|
|
||||||
return html;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setHtml(String html) {
|
|
||||||
this.html = html;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLatest_draft() {
|
|
||||||
return latest_draft;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLatest_draft(String latest_draft) {
|
|
||||||
this.latest_draft = latest_draft;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLatest_draft_html() {
|
|
||||||
return latest_draft_html;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLatest_draft_html(String latest_draft_html) {
|
|
||||||
this.latest_draft_html = latest_draft_html;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getPublish() {
|
|
||||||
return publish;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setPublish(String publish) {
|
|
||||||
this.publish = publish;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getSelf() {
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setSelf(String self) {
|
|
||||||
this.self = self;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,153 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class Metadata implements Serializable {
|
|
||||||
|
|
||||||
private String access_right;
|
|
||||||
private List<Community> communities;
|
|
||||||
private List<Creator> creators;
|
|
||||||
private String description;
|
|
||||||
private String doi;
|
|
||||||
private List<Grant> grants;
|
|
||||||
private List<String> keywords;
|
|
||||||
private String language;
|
|
||||||
private String license;
|
|
||||||
private PrereserveDoi prereserve_doi;
|
|
||||||
private String publication_date;
|
|
||||||
private List<String> references;
|
|
||||||
private List<RelatedIdentifier> related_identifiers;
|
|
||||||
private String title;
|
|
||||||
private String upload_type;
|
|
||||||
private String version;
|
|
||||||
|
|
||||||
public String getUpload_type() {
|
|
||||||
return upload_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setUpload_type(String upload_type) {
|
|
||||||
this.upload_type = upload_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getVersion() {
|
|
||||||
return version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setVersion(String version) {
|
|
||||||
this.version = version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getAccess_right() {
|
|
||||||
return access_right;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setAccess_right(String access_right) {
|
|
||||||
this.access_right = access_right;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<Community> getCommunities() {
|
|
||||||
return communities;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCommunities(List<Community> communities) {
|
|
||||||
this.communities = communities;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<Creator> getCreators() {
|
|
||||||
return creators;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCreators(List<Creator> creators) {
|
|
||||||
this.creators = creators;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getDescription() {
|
|
||||||
return description;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDescription(String description) {
|
|
||||||
this.description = description;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getDoi() {
|
|
||||||
return doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDoi(String doi) {
|
|
||||||
this.doi = doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<Grant> getGrants() {
|
|
||||||
return grants;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setGrants(List<Grant> grants) {
|
|
||||||
this.grants = grants;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<String> getKeywords() {
|
|
||||||
return keywords;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setKeywords(List<String> keywords) {
|
|
||||||
this.keywords = keywords;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLanguage() {
|
|
||||||
return language;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLanguage(String language) {
|
|
||||||
this.language = language;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLicense() {
|
|
||||||
return license;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLicense(String license) {
|
|
||||||
this.license = license;
|
|
||||||
}
|
|
||||||
|
|
||||||
public PrereserveDoi getPrereserve_doi() {
|
|
||||||
return prereserve_doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setPrereserve_doi(PrereserveDoi prereserve_doi) {
|
|
||||||
this.prereserve_doi = prereserve_doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getPublication_date() {
|
|
||||||
return publication_date;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setPublication_date(String publication_date) {
|
|
||||||
this.publication_date = publication_date;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<String> getReferences() {
|
|
||||||
return references;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setReferences(List<String> references) {
|
|
||||||
this.references = references;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<RelatedIdentifier> getRelated_identifiers() {
|
|
||||||
return related_identifiers;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRelated_identifiers(List<RelatedIdentifier> related_identifiers) {
|
|
||||||
this.related_identifiers = related_identifiers;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getTitle() {
|
|
||||||
return title;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTitle(String title) {
|
|
||||||
this.title = title;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class PrereserveDoi implements Serializable {
|
|
||||||
private String doi;
|
|
||||||
private String recid;
|
|
||||||
|
|
||||||
public String getDoi() {
|
|
||||||
return doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDoi(String doi) {
|
|
||||||
this.doi = doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getRecid() {
|
|
||||||
return recid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRecid(String recid) {
|
|
||||||
this.recid = recid;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class RelatedIdentifier implements Serializable {
|
|
||||||
private String identifier;
|
|
||||||
private String relation;
|
|
||||||
private String resource_type;
|
|
||||||
private String scheme;
|
|
||||||
|
|
||||||
public String getIdentifier() {
|
|
||||||
return identifier;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setIdentifier(String identifier) {
|
|
||||||
this.identifier = identifier;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getRelation() {
|
|
||||||
return relation;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRelation(String relation) {
|
|
||||||
this.relation = relation;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getResource_type() {
|
|
||||||
return resource_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setResource_type(String resource_type) {
|
|
||||||
this.resource_type = resource_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getScheme() {
|
|
||||||
return scheme;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setScheme(String scheme) {
|
|
||||||
this.scheme = scheme;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class ZenodoModel implements Serializable {
|
|
||||||
|
|
||||||
private String conceptrecid;
|
|
||||||
private String created;
|
|
||||||
|
|
||||||
private List<File> files;
|
|
||||||
private String id;
|
|
||||||
private Links links;
|
|
||||||
private Metadata metadata;
|
|
||||||
private String modified;
|
|
||||||
private String owner;
|
|
||||||
private String record_id;
|
|
||||||
private String state;
|
|
||||||
private boolean submitted;
|
|
||||||
private String title;
|
|
||||||
|
|
||||||
public String getConceptrecid() {
|
|
||||||
return conceptrecid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setConceptrecid(String conceptrecid) {
|
|
||||||
this.conceptrecid = conceptrecid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getCreated() {
|
|
||||||
return created;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCreated(String created) {
|
|
||||||
this.created = created;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<File> getFiles() {
|
|
||||||
return files;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setFiles(List<File> files) {
|
|
||||||
this.files = files;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getId() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setId(String id) {
|
|
||||||
this.id = id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Links getLinks() {
|
|
||||||
return links;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLinks(Links links) {
|
|
||||||
this.links = links;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Metadata getMetadata() {
|
|
||||||
return metadata;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setMetadata(Metadata metadata) {
|
|
||||||
this.metadata = metadata;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getModified() {
|
|
||||||
return modified;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setModified(String modified) {
|
|
||||||
this.modified = modified;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getOwner() {
|
|
||||||
return owner;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setOwner(String owner) {
|
|
||||||
this.owner = owner;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getRecord_id() {
|
|
||||||
return record_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRecord_id(String record_id) {
|
|
||||||
this.record_id = record_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getState() {
|
|
||||||
return state;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setState(String state) {
|
|
||||||
this.state = state;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isSubmitted() {
|
|
||||||
return submitted;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setSubmitted(boolean submitted) {
|
|
||||||
this.submitted = submitted;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getTitle() {
|
|
||||||
return title;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTitle(String title) {
|
|
||||||
this.title = title;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api.zenodo;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
public class ZenodoModelList extends ArrayList<ZenodoModel> {
|
|
||||||
}
|
|
|
@ -145,6 +145,105 @@ public class AuthorMerger {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method tries to figure out when two author are the same in the contest
|
||||||
|
* of ORCID enrichment
|
||||||
|
*
|
||||||
|
* @param left Author in the OAF entity
|
||||||
|
* @param right Author ORCID
|
||||||
|
* @return based on a heuristic on the names of the authors if they are the same.
|
||||||
|
*/
|
||||||
|
public static boolean checkORCIDSimilarity(final Author left, final Author right) {
|
||||||
|
final Person pl = parse(left);
|
||||||
|
final Person pr = parse(right);
|
||||||
|
|
||||||
|
// If one of them didn't have a surname we verify if they have the fullName not empty
|
||||||
|
// and verify if the normalized version is equal
|
||||||
|
if (!(pl.getSurname() != null && pl.getSurname().stream().anyMatch(StringUtils::isNotBlank) &&
|
||||||
|
pr.getSurname() != null && pr.getSurname().stream().anyMatch(StringUtils::isNotBlank))) {
|
||||||
|
|
||||||
|
if (pl.getFullname() != null && !pl.getFullname().isEmpty() && pr.getFullname() != null
|
||||||
|
&& !pr.getFullname().isEmpty()) {
|
||||||
|
return pl
|
||||||
|
.getFullname()
|
||||||
|
.stream()
|
||||||
|
.anyMatch(
|
||||||
|
fl -> pr.getFullname().stream().anyMatch(fr -> normalize(fl).equalsIgnoreCase(normalize(fr))));
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The Authors have one surname in common
|
||||||
|
if (pl.getSurname().stream().anyMatch(sl -> pr.getSurname().stream().anyMatch(sr -> sr.equalsIgnoreCase(sl)))) {
|
||||||
|
|
||||||
|
// If one of them has only a surname and is the same we can say that they are the same author
|
||||||
|
if ((pl.getName() == null || pl.getName().stream().allMatch(StringUtils::isBlank)) ||
|
||||||
|
(pr.getName() == null || pr.getName().stream().allMatch(StringUtils::isBlank)))
|
||||||
|
return true;
|
||||||
|
// The authors have the same initials of Name in common
|
||||||
|
if (pl
|
||||||
|
.getName()
|
||||||
|
.stream()
|
||||||
|
.anyMatch(
|
||||||
|
nl -> pr
|
||||||
|
.getName()
|
||||||
|
.stream()
|
||||||
|
.anyMatch(nr -> nr.equalsIgnoreCase(nl))))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sometimes we noticed that publication have author wrote in inverse order Surname, Name
|
||||||
|
// We verify if we have an exact match between name and surname
|
||||||
|
if (pl.getSurname().stream().anyMatch(sl -> pr.getName().stream().anyMatch(nr -> nr.equalsIgnoreCase(sl))) &&
|
||||||
|
pl.getName().stream().anyMatch(nl -> pr.getSurname().stream().anyMatch(sr -> sr.equalsIgnoreCase(nl))))
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
//
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Method to enrich ORCID information in one list of authors based on another list
|
||||||
|
*
|
||||||
|
* @param baseAuthor the Author List in the OAF Entity
|
||||||
|
* @param orcidAuthor The list of ORCID Author intersected
|
||||||
|
* @return The Author List of the OAF Entity enriched with the orcid Author
|
||||||
|
*/
|
||||||
|
public static List<Author> enrichOrcid(List<Author> baseAuthor, List<Author> orcidAuthor) {
|
||||||
|
|
||||||
|
if (baseAuthor == null || baseAuthor.isEmpty())
|
||||||
|
return orcidAuthor;
|
||||||
|
|
||||||
|
if (orcidAuthor == null || orcidAuthor.isEmpty())
|
||||||
|
return baseAuthor;
|
||||||
|
|
||||||
|
if (baseAuthor.size() == 1 && orcidAuthor.size() > 10)
|
||||||
|
return baseAuthor;
|
||||||
|
|
||||||
|
final List<Author> oAuthor = new ArrayList<>();
|
||||||
|
oAuthor.addAll(orcidAuthor);
|
||||||
|
|
||||||
|
baseAuthor.forEach(ba -> {
|
||||||
|
Optional<Author> aMatch = oAuthor.stream().filter(oa -> checkORCIDSimilarity(ba, oa)).findFirst();
|
||||||
|
if (aMatch.isPresent()) {
|
||||||
|
final Author sameAuthor = aMatch.get();
|
||||||
|
addPid(ba, sameAuthor.getPid());
|
||||||
|
oAuthor.remove(sameAuthor);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return baseAuthor;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void addPid(final Author a, final List<StructuredProperty> pids) {
|
||||||
|
|
||||||
|
if (a.getPid() == null) {
|
||||||
|
a.setPid(new ArrayList<>());
|
||||||
|
}
|
||||||
|
|
||||||
|
a.getPid().addAll(pids);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
public static String pidToComparableString(StructuredProperty pid) {
|
public static String pidToComparableString(StructuredProperty pid) {
|
||||||
final String classid = pid.getQualifier().getClassid() != null ? pid.getQualifier().getClassid().toLowerCase()
|
final String classid = pid.getQualifier().getClassid() != null ? pid.getQualifier().getClassid().toLowerCase()
|
||||||
: "";
|
: "";
|
||||||
|
|
|
@ -1,6 +1,24 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.merge;
|
package eu.dnetlib.dhp.oa.merge;
|
||||||
|
|
||||||
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
import static org.apache.spark.sql.functions.col;
|
||||||
|
import static org.apache.spark.sql.functions.when;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ForkJoinPool;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
|
import org.apache.spark.api.java.function.ReduceFunction;
|
||||||
|
import org.apache.spark.sql.*;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
|
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup;
|
||||||
|
@ -8,186 +26,169 @@ import eu.dnetlib.dhp.schema.common.EntityType;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.GraphCleaningFunctions;
|
import eu.dnetlib.dhp.schema.oaf.utils.GraphCleaningFunctions;
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.MergeUtils;
|
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils;
|
||||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
||||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpException;
|
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpException;
|
||||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
|
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
|
||||||
import org.apache.spark.api.java.function.ReduceFunction;
|
|
||||||
import org.apache.spark.sql.*;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.ForkJoinPool;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
import static org.apache.spark.sql.functions.col;
|
|
||||||
import static org.apache.spark.sql.functions.when;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Groups the graph content by entity identifier to ensure ID uniqueness
|
* Groups the graph content by entity identifier to ensure ID uniqueness
|
||||||
*/
|
*/
|
||||||
public class GroupEntitiesSparkJob {
|
public class GroupEntitiesSparkJob {
|
||||||
private static final Logger log = LoggerFactory.getLogger(GroupEntitiesSparkJob.class);
|
private static final Logger log = LoggerFactory.getLogger(GroupEntitiesSparkJob.class);
|
||||||
|
|
||||||
private static final Encoder<OafEntity> OAFENTITY_KRYO_ENC = Encoders.kryo(OafEntity.class);
|
private static final Encoder<OafEntity> OAFENTITY_KRYO_ENC = Encoders.kryo(OafEntity.class);
|
||||||
|
|
||||||
private ArgumentApplicationParser parser;
|
private ArgumentApplicationParser parser;
|
||||||
|
|
||||||
public GroupEntitiesSparkJob(ArgumentApplicationParser parser) {
|
public GroupEntitiesSparkJob(ArgumentApplicationParser parser) {
|
||||||
this.parser = parser;
|
this.parser = parser;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
String jsonConfiguration = IOUtils
|
String jsonConfiguration = IOUtils
|
||||||
.toString(
|
.toString(
|
||||||
GroupEntitiesSparkJob.class
|
GroupEntitiesSparkJob.class
|
||||||
.getResourceAsStream(
|
.getResourceAsStream(
|
||||||
"/eu/dnetlib/dhp/oa/merge/group_graph_entities_parameters.json"));
|
"/eu/dnetlib/dhp/oa/merge/group_graph_entities_parameters.json"));
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = Optional
|
Boolean isSparkSessionManaged = Optional
|
||||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||||
.map(Boolean::valueOf)
|
.map(Boolean::valueOf)
|
||||||
.orElse(Boolean.TRUE);
|
.orElse(Boolean.TRUE);
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||||
|
|
||||||
final String isLookupUrl = parser.get("isLookupUrl");
|
final String isLookupUrl = parser.get("isLookupUrl");
|
||||||
log.info("isLookupUrl: {}", isLookupUrl);
|
log.info("isLookupUrl: {}", isLookupUrl);
|
||||||
|
|
||||||
final ISLookUpService isLookupService = ISLookupClientFactory.getLookUpService(isLookupUrl);
|
final ISLookUpService isLookupService = ISLookupClientFactory.getLookUpService(isLookupUrl);
|
||||||
|
|
||||||
new GroupEntitiesSparkJob(parser).run(isSparkSessionManaged, isLookupService);
|
new GroupEntitiesSparkJob(parser).run(isSparkSessionManaged, isLookupService);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void run(Boolean isSparkSessionManaged, ISLookUpService isLookUpService)
|
public void run(Boolean isSparkSessionManaged, ISLookUpService isLookUpService)
|
||||||
throws ISLookUpException {
|
throws ISLookUpException {
|
||||||
|
|
||||||
String graphInputPath = parser.get("graphInputPath");
|
String graphInputPath = parser.get("graphInputPath");
|
||||||
log.info("graphInputPath: {}", graphInputPath);
|
log.info("graphInputPath: {}", graphInputPath);
|
||||||
|
|
||||||
String checkpointPath = parser.get("checkpointPath");
|
String checkpointPath = parser.get("checkpointPath");
|
||||||
log.info("checkpointPath: {}", checkpointPath);
|
log.info("checkpointPath: {}", checkpointPath);
|
||||||
|
|
||||||
String outputPath = parser.get("outputPath");
|
String outputPath = parser.get("outputPath");
|
||||||
log.info("outputPath: {}", outputPath);
|
log.info("outputPath: {}", outputPath);
|
||||||
|
|
||||||
boolean filterInvisible = Boolean.parseBoolean(parser.get("filterInvisible"));
|
boolean filterInvisible = Boolean.parseBoolean(parser.get("filterInvisible"));
|
||||||
log.info("filterInvisible: {}", filterInvisible);
|
log.info("filterInvisible: {}", filterInvisible);
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
SparkConf conf = new SparkConf();
|
||||||
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
|
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
|
||||||
conf.registerKryoClasses(ModelSupport.getOafModelClasses());
|
conf.registerKryoClasses(ModelSupport.getOafModelClasses());
|
||||||
|
|
||||||
final VocabularyGroup vocs = VocabularyGroup.loadVocsFromIS(isLookUpService);
|
final VocabularyGroup vocs = VocabularyGroup.loadVocsFromIS(isLookUpService);
|
||||||
|
|
||||||
runWithSparkSession(
|
runWithSparkSession(
|
||||||
conf,
|
conf,
|
||||||
isSparkSessionManaged,
|
isSparkSessionManaged,
|
||||||
spark -> {
|
spark -> {
|
||||||
HdfsSupport.remove(checkpointPath, spark.sparkContext().hadoopConfiguration());
|
HdfsSupport.remove(checkpointPath, spark.sparkContext().hadoopConfiguration());
|
||||||
groupEntities(spark, graphInputPath, checkpointPath, outputPath, filterInvisible, vocs);
|
groupEntities(spark, graphInputPath, checkpointPath, outputPath, filterInvisible, vocs);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void groupEntities(
|
private static void groupEntities(
|
||||||
SparkSession spark,
|
SparkSession spark,
|
||||||
String inputPath,
|
String inputPath,
|
||||||
String checkpointPath,
|
String checkpointPath,
|
||||||
String outputPath,
|
String outputPath,
|
||||||
boolean filterInvisible, VocabularyGroup vocs) {
|
boolean filterInvisible, VocabularyGroup vocs) {
|
||||||
|
|
||||||
Dataset<OafEntity> allEntities = spark.emptyDataset(OAFENTITY_KRYO_ENC);
|
Dataset<OafEntity> allEntities = spark.emptyDataset(OAFENTITY_KRYO_ENC);
|
||||||
|
|
||||||
for (Map.Entry<EntityType, Class> e : ModelSupport.entityTypes.entrySet()) {
|
for (Map.Entry<EntityType, Class> e : ModelSupport.entityTypes.entrySet()) {
|
||||||
String entity = e.getKey().name();
|
String entity = e.getKey().name();
|
||||||
Class<? extends OafEntity> entityClass = e.getValue();
|
Class<? extends OafEntity> entityClass = e.getValue();
|
||||||
String entityInputPath = inputPath + "/" + entity;
|
String entityInputPath = inputPath + "/" + entity;
|
||||||
|
|
||||||
if (!HdfsSupport.exists(entityInputPath, spark.sparkContext().hadoopConfiguration())) {
|
if (!HdfsSupport.exists(entityInputPath, spark.sparkContext().hadoopConfiguration())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
allEntities = allEntities
|
allEntities = allEntities
|
||||||
.union(
|
.union(
|
||||||
((Dataset<OafEntity>) spark
|
((Dataset<OafEntity>) spark
|
||||||
.read()
|
.read()
|
||||||
.schema(Encoders.bean(entityClass).schema())
|
.schema(Encoders.bean(entityClass).schema())
|
||||||
.json(entityInputPath)
|
.json(entityInputPath)
|
||||||
.filter("length(id) > 0")
|
.filter("length(id) > 0")
|
||||||
.as(Encoders.bean(entityClass)))
|
.as(Encoders.bean(entityClass)))
|
||||||
.map((MapFunction<OafEntity, OafEntity>) r -> r, OAFENTITY_KRYO_ENC));
|
.map((MapFunction<OafEntity, OafEntity>) r -> r, OAFENTITY_KRYO_ENC));
|
||||||
}
|
}
|
||||||
|
|
||||||
Dataset<?> groupedEntities = allEntities
|
Dataset<?> groupedEntities = allEntities
|
||||||
.map(
|
.map(
|
||||||
(MapFunction<OafEntity, OafEntity>) entity -> GraphCleaningFunctions
|
(MapFunction<OafEntity, OafEntity>) entity -> GraphCleaningFunctions
|
||||||
.applyCoarVocabularies(entity, vocs),
|
.applyCoarVocabularies(entity, vocs),
|
||||||
OAFENTITY_KRYO_ENC)
|
OAFENTITY_KRYO_ENC)
|
||||||
.groupByKey((MapFunction<OafEntity, String>) OafEntity::getId, Encoders.STRING())
|
.groupByKey((MapFunction<OafEntity, String>) OafEntity::getId, Encoders.STRING())
|
||||||
.reduceGroups((ReduceFunction<OafEntity>) MergeUtils::checkedMerge)
|
.reduceGroups((ReduceFunction<OafEntity>) OafMapperUtils::mergeEntities)
|
||||||
.map(
|
.map(
|
||||||
(MapFunction<Tuple2<String, OafEntity>, Tuple2<String, OafEntity>>) t -> new Tuple2<>(
|
(MapFunction<Tuple2<String, OafEntity>, Tuple2<String, OafEntity>>) t -> new Tuple2<>(
|
||||||
t._2().getClass().getName(), t._2()),
|
t._2().getClass().getName(), t._2()),
|
||||||
Encoders.tuple(Encoders.STRING(), OAFENTITY_KRYO_ENC));
|
Encoders.tuple(Encoders.STRING(), OAFENTITY_KRYO_ENC));
|
||||||
|
|
||||||
// pivot on "_1" (classname of the entity)
|
// pivot on "_1" (classname of the entity)
|
||||||
// created columns containing only entities of the same class
|
// created columns containing only entities of the same class
|
||||||
for (Map.Entry<EntityType, Class> e : ModelSupport.entityTypes.entrySet()) {
|
for (Map.Entry<EntityType, Class> e : ModelSupport.entityTypes.entrySet()) {
|
||||||
String entity = e.getKey().name();
|
String entity = e.getKey().name();
|
||||||
Class<? extends OafEntity> entityClass = e.getValue();
|
Class<? extends OafEntity> entityClass = e.getValue();
|
||||||
|
|
||||||
groupedEntities = groupedEntities
|
groupedEntities = groupedEntities
|
||||||
.withColumn(
|
.withColumn(
|
||||||
entity,
|
entity,
|
||||||
when(col("_1").equalTo(entityClass.getName()), col("_2")));
|
when(col("_1").equalTo(entityClass.getName()), col("_2")));
|
||||||
}
|
}
|
||||||
|
|
||||||
groupedEntities
|
groupedEntities
|
||||||
.drop("_1", "_2")
|
.drop("_1", "_2")
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
.save(checkpointPath);
|
.save(checkpointPath);
|
||||||
|
|
||||||
ForkJoinPool parPool = new ForkJoinPool(ModelSupport.entityTypes.size());
|
ForkJoinPool parPool = new ForkJoinPool(ModelSupport.entityTypes.size());
|
||||||
|
|
||||||
ModelSupport.entityTypes
|
ModelSupport.entityTypes
|
||||||
.entrySet()
|
.entrySet()
|
||||||
.stream()
|
.stream()
|
||||||
.map(e -> parPool.submit(() -> {
|
.map(e -> parPool.submit(() -> {
|
||||||
String entity = e.getKey().name();
|
String entity = e.getKey().name();
|
||||||
Class<? extends OafEntity> entityClass = e.getValue();
|
Class<? extends OafEntity> entityClass = e.getValue();
|
||||||
|
|
||||||
spark
|
spark
|
||||||
.read()
|
.read()
|
||||||
.load(checkpointPath)
|
.load(checkpointPath)
|
||||||
.select(col(entity).as("value"))
|
.select(col(entity).as("value"))
|
||||||
.filter("value IS NOT NULL")
|
.filter("value IS NOT NULL")
|
||||||
.as(OAFENTITY_KRYO_ENC)
|
.as(OAFENTITY_KRYO_ENC)
|
||||||
.map((MapFunction<OafEntity, OafEntity>) r -> r, (Encoder<OafEntity>) Encoders.bean(entityClass))
|
.map((MapFunction<OafEntity, OafEntity>) r -> r, (Encoder<OafEntity>) Encoders.bean(entityClass))
|
||||||
.filter(filterInvisible ? "dataInfo.invisible != TRUE" : "TRUE")
|
.filter(filterInvisible ? "dataInfo.invisible != TRUE" : "TRUE")
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
.json(outputPath + "/" + entity);
|
.json(outputPath + "/" + entity);
|
||||||
}))
|
}))
|
||||||
.collect(Collectors.toList())
|
.collect(Collectors.toList())
|
||||||
.forEach(t -> {
|
.forEach(t -> {
|
||||||
try {
|
try {
|
||||||
t.get();
|
t.get();
|
||||||
} catch (InterruptedException | ExecutionException e) {
|
} catch (InterruptedException | ExecutionException e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -506,8 +506,6 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
||||||
.filter(Objects::nonNull)
|
.filter(Objects::nonNull)
|
||||||
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
||||||
.map(GraphCleaningFunctions::cleanValue)
|
.map(GraphCleaningFunctions::cleanValue)
|
||||||
.sorted((s1, s2) -> s2.getValue().length() - s1.getValue().length())
|
|
||||||
.limit(ModelHardLimits.MAX_ABSTRACTS)
|
|
||||||
.collect(Collectors.toList()));
|
.collect(Collectors.toList()));
|
||||||
}
|
}
|
||||||
if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
|
if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Source code recreated from a .class file by IntelliJ IDEA
|
|
||||||
// (powered by FernFlower decompiler)
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.common.EntityType;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
|
||||||
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
public class MergeComparator implements Comparator<Oaf> {
|
|
||||||
public MergeComparator() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public int compare(Oaf left, Oaf right) {
|
|
||||||
// nulls at the end
|
|
||||||
if (left == null && right == null) {
|
|
||||||
return 0;
|
|
||||||
} else if (left == null) {
|
|
||||||
return -1;
|
|
||||||
} else if (right == null) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// invisible
|
|
||||||
if (left.getDataInfo() != null && left.getDataInfo().getInvisible() == true) {
|
|
||||||
if (right.getDataInfo() != null && right.getDataInfo().getInvisible() == false) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectedfrom
|
|
||||||
HashSet<String> lCf = getCollectedFromIds(left);
|
|
||||||
HashSet<String> rCf = getCollectedFromIds(right);
|
|
||||||
if (lCf.contains("10|openaire____::081b82f96300b6a6e3d282bad31cb6e2") && !rCf.contains("10|openaire____::081b82f96300b6a6e3d282bad31cb6e2")) {
|
|
||||||
return -1;
|
|
||||||
} else if (!lCf.contains("10|openaire____::081b82f96300b6a6e3d282bad31cb6e2") && rCf.contains("10|openaire____::081b82f96300b6a6e3d282bad31cb6e2")) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SubEntityType lClass = SubEntityType.fromClass(left.getClass());
|
|
||||||
SubEntityType rClass = SubEntityType.fromClass(right.getClass());
|
|
||||||
return lClass.ordinal() - rClass.ordinal();
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
protected HashSet<String> getCollectedFromIds(Oaf left) {
|
|
||||||
return (HashSet) Optional.ofNullable(left.getCollectedfrom()).map((cf) -> {
|
|
||||||
return (HashSet) cf.stream().map(KeyValue::getKey).collect(Collectors.toCollection(HashSet::new));
|
|
||||||
}).orElse(new HashSet());
|
|
||||||
}
|
|
||||||
|
|
||||||
enum SubEntityType {
|
|
||||||
publication, dataset, software, otherresearchproduct, datasource, organization, project;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolves the EntityType, given the relative class name
|
|
||||||
*
|
|
||||||
* @param clazz the given class name
|
|
||||||
* @param <T> actual OafEntity subclass
|
|
||||||
* @return the EntityType associated to the given class
|
|
||||||
*/
|
|
||||||
public static <T extends Oaf> SubEntityType fromClass(Class<T> clazz) {
|
|
||||||
return valueOf(clazz.getSimpleName().toLowerCase());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,707 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.commons.lang3.tuple.ImmutablePair;
|
|
||||||
import org.apache.commons.lang3.tuple.Pair;
|
|
||||||
|
|
||||||
import java.text.ParseException;
|
|
||||||
import java.util.*;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import static com.google.common.base.Objects.firstNonNull;
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
|
||||||
|
|
||||||
public class MergeUtils {
|
|
||||||
|
|
||||||
public static <T extends Oaf> T checkedMerge(final T left, final T right) {
|
|
||||||
return (T) merge(left, right, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Oaf merge(final Oaf left, final Oaf right) {
|
|
||||||
return merge(left, right, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Oaf merge(final Oaf left, final Oaf right, boolean checkDelegatedAuthority) {
|
|
||||||
if (sameClass(left, right, OafEntity.class)) {
|
|
||||||
return mergeEntities(left, right, checkDelegatedAuthority);
|
|
||||||
} else if (sameClass(left, right, Relation.class)) {
|
|
||||||
return mergeRelation((Relation) left, (Relation) right);
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException(
|
|
||||||
String
|
|
||||||
.format(
|
|
||||||
"MERGE_FROM_AND_GET incompatible types: %s, %s",
|
|
||||||
left.getClass().getCanonicalName(), right.getClass().getCanonicalName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends Oaf> boolean sameClass(Object left, Object right, Class<T> cls) {
|
|
||||||
return cls.isAssignableFrom(left.getClass()) && cls.isAssignableFrom(right.getClass());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Oaf mergeEntities(Oaf left, Oaf right, boolean checkDelegatedAuthority) {
|
|
||||||
|
|
||||||
if (sameClass(left, right, Result.class)) {
|
|
||||||
if (!left.getClass().equals(right.getClass()) || checkDelegatedAuthority) {
|
|
||||||
return mergeResultsOfDifferentTypes((Result)left, (Result) right);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sameClass(left, right, Publication.class)) {
|
|
||||||
return mergePublication((Publication) left, (Publication) right);
|
|
||||||
}
|
|
||||||
if (sameClass(left, right, Dataset.class)) {
|
|
||||||
return mergeDataset((Dataset) left, (Dataset) right);
|
|
||||||
}
|
|
||||||
if (sameClass(left, right, OtherResearchProduct.class)) {
|
|
||||||
return mergeORP((OtherResearchProduct) left, (OtherResearchProduct) right);
|
|
||||||
}
|
|
||||||
if (sameClass(left, right, Software.class)) {
|
|
||||||
return mergeSoftware((Software) left, (Software) right);
|
|
||||||
}
|
|
||||||
|
|
||||||
return mergeResult((Result) left, (Result) right);
|
|
||||||
} else if (sameClass(left, right, Datasource.class)) {
|
|
||||||
// TODO
|
|
||||||
final int trust = compareTrust(left, right);
|
|
||||||
return mergeOafEntityFields((Datasource) left, (Datasource) right, trust);
|
|
||||||
} else if (sameClass(left, right, Organization.class)) {
|
|
||||||
return mergeOrganization((Organization) left, (Organization) right);
|
|
||||||
} else if (sameClass(left, right, Project.class)) {
|
|
||||||
return mergeProject((Project) left, (Project) right);
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException(
|
|
||||||
String
|
|
||||||
.format(
|
|
||||||
"MERGE_FROM_AND_GET incompatible types: %s, %s",
|
|
||||||
left.getClass().getCanonicalName(), right.getClass().getCanonicalName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is used in the global result grouping phase. It checks if one of the two is from a delegated authority
|
|
||||||
* https://graph.openaire.eu/docs/data-model/pids-and-identifiers#delegated-authorities and in that case it prefers
|
|
||||||
* such version.
|
|
||||||
* <p>
|
|
||||||
* Otherwise, it considers a resulttype priority order implemented in {@link ResultTypeComparator}
|
|
||||||
* and proceeds with the canonical property merging.
|
|
||||||
*
|
|
||||||
* @param left
|
|
||||||
* @param right
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
private static <T extends Result> T mergeResultsOfDifferentTypes(T left, T right) {
|
|
||||||
|
|
||||||
final boolean leftFromDelegatedAuthority = isFromDelegatedAuthority(left);
|
|
||||||
final boolean rightFromDelegatedAuthority = isFromDelegatedAuthority(right);
|
|
||||||
|
|
||||||
if (leftFromDelegatedAuthority && !rightFromDelegatedAuthority) {
|
|
||||||
return left;
|
|
||||||
}
|
|
||||||
if (!leftFromDelegatedAuthority && rightFromDelegatedAuthority) {
|
|
||||||
return right;
|
|
||||||
}
|
|
||||||
//TODO: raise trust to have preferred fields from one or the other??
|
|
||||||
if (new ResultTypeComparator().compare(left, right) < 0) {
|
|
||||||
return mergeResult(left, right);
|
|
||||||
} else {
|
|
||||||
return mergeResult(right, left);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static DataInfo chooseDataInfo(DataInfo left, DataInfo right, int trust) {
|
|
||||||
if (trust > 0) {
|
|
||||||
return left;
|
|
||||||
} else if (trust == 0) {
|
|
||||||
if (left == null || (left.getInvisible() != null && left.getInvisible().equals(Boolean.TRUE))) {
|
|
||||||
return right;
|
|
||||||
} else {
|
|
||||||
return left;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String chooseString(String left, String right, int trust) {
|
|
||||||
if (trust > 0) {
|
|
||||||
return left;
|
|
||||||
} else if (trust == 0) {
|
|
||||||
return StringUtils.isNotBlank(left) ? left : right;
|
|
||||||
} else {
|
|
||||||
return right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T> T chooseReference(T left, T right, int trust) {
|
|
||||||
if (trust > 0) {
|
|
||||||
return left;
|
|
||||||
} else if (trust == 0) {
|
|
||||||
return left != null ? left : right;
|
|
||||||
} else {
|
|
||||||
return right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Long max(Long left, Long right) {
|
|
||||||
if (left == null)
|
|
||||||
return right;
|
|
||||||
if (right == null)
|
|
||||||
return left;
|
|
||||||
|
|
||||||
return Math.max(left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
// trust ??
|
|
||||||
private static Boolean booleanOR(Boolean a, Boolean b) {
|
|
||||||
if (a == null) {
|
|
||||||
return b;
|
|
||||||
} else if (b == null) {
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
|
|
||||||
return a || b;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private static <T> List<T> unionDistinctLists(final List<T> left, final List<T> right, int trust) {
|
|
||||||
if (left == null) {
|
|
||||||
return right;
|
|
||||||
} else if (right == null) {
|
|
||||||
return left;
|
|
||||||
}
|
|
||||||
|
|
||||||
List<T> h = trust >= 0 ? left : right;
|
|
||||||
List<T> l = trust >= 0 ? right : left;
|
|
||||||
|
|
||||||
return Stream.concat(h.stream(), l.stream())
|
|
||||||
.filter(Objects::nonNull)
|
|
||||||
.distinct()
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<String> unionDistinctListOfString(final List<String> l, final List<String> r) {
|
|
||||||
if (l == null) {
|
|
||||||
return r;
|
|
||||||
} else if (r == null) {
|
|
||||||
return l;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Stream.concat(l.stream(), r.stream())
|
|
||||||
.filter(StringUtils::isNotBlank)
|
|
||||||
.distinct()
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO review
|
|
||||||
private static List<KeyValue> mergeKeyValue(List<KeyValue> left, List<KeyValue> right, int trust) {
|
|
||||||
if (trust < 0) {
|
|
||||||
List<KeyValue> s = left;
|
|
||||||
left = right;
|
|
||||||
right = s;
|
|
||||||
}
|
|
||||||
|
|
||||||
HashMap<String, KeyValue> values = new HashMap<>();
|
|
||||||
left.forEach(kv -> values.put(kv.getKey(), kv));
|
|
||||||
right.forEach(kv -> values.putIfAbsent(kv.getKey(), kv));
|
|
||||||
|
|
||||||
return new ArrayList<>(values.values());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<StructuredProperty> unionTitle(List<StructuredProperty> left, List<StructuredProperty> right, int trust) {
|
|
||||||
if (left == null) {
|
|
||||||
return right;
|
|
||||||
} else if (right == null) {
|
|
||||||
return left;
|
|
||||||
}
|
|
||||||
|
|
||||||
List<StructuredProperty> h = trust >= 0 ? left : right;
|
|
||||||
List<StructuredProperty> l = trust >= 0 ? right : left;
|
|
||||||
|
|
||||||
return Stream.concat(h.stream(), l.stream())
|
|
||||||
.filter(Objects::isNull)
|
|
||||||
.distinct()
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal utility that merges the common OafEntity fields
|
|
||||||
*
|
|
||||||
* @param merged
|
|
||||||
* @param enrich
|
|
||||||
* @param <T>
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
private static <T extends Oaf> T mergeOafFields(T merged, T enrich, int trust) {
|
|
||||||
|
|
||||||
//TODO: union of all values, but what does it mean with KeyValue pairs???
|
|
||||||
merged.setCollectedfrom(mergeKeyValue(merged.getCollectedfrom(), enrich.getCollectedfrom(), trust));
|
|
||||||
merged.setDataInfo(chooseDataInfo(merged.getDataInfo(), enrich.getDataInfo(), trust));
|
|
||||||
merged.setLastupdatetimestamp(max(merged.getLastupdatetimestamp(), enrich.getLastupdatetimestamp()));
|
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal utility that merges the common OafEntity fields
|
|
||||||
*
|
|
||||||
* @param original
|
|
||||||
* @param enrich
|
|
||||||
* @param <T>
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
private static <T extends OafEntity> T mergeOafEntityFields(T original, T enrich, int trust) {
|
|
||||||
final T merged = mergeOafFields(original, enrich, trust);
|
|
||||||
|
|
||||||
merged.setOriginalId(unionDistinctListOfString(merged.getOriginalId(), enrich.getOriginalId()));
|
|
||||||
merged.setPid(unionDistinctLists(merged.getPid(), enrich.getPid(), trust));
|
|
||||||
// dateofcollection mettere today quando si fa merge
|
|
||||||
merged.setDateofcollection(chooseString(merged.getDateofcollection(), enrich.getDateofcollection(), trust));
|
|
||||||
// setDateoftransformation mettere vuota in dedup, nota per Claudio
|
|
||||||
merged.setDateoftransformation(chooseString(merged.getDateoftransformation(), enrich.getDateoftransformation(), trust));
|
|
||||||
// TODO: was missing in OafEntity.merge
|
|
||||||
merged.setExtraInfo(unionDistinctLists(merged.getExtraInfo(), enrich.getExtraInfo(), trust));
|
|
||||||
//oaiprovenanze da mettere a null quando si genera merge
|
|
||||||
merged.setOaiprovenance(chooseReference(merged.getOaiprovenance(), enrich.getOaiprovenance(), trust));
|
|
||||||
merged.setMeasures(unionDistinctLists(merged.getMeasures(), enrich.getMeasures(), trust));
|
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public static <T extends Relation> T mergeRelation(T original, T enrich) {
|
|
||||||
int trust = compareTrust(original, enrich);
|
|
||||||
T merge = mergeOafFields(original, enrich, trust);
|
|
||||||
|
|
||||||
checkArgument(Objects.equals(merge.getSource(), enrich.getSource()), "source ids must be equal");
|
|
||||||
checkArgument(Objects.equals(merge.getTarget(), enrich.getTarget()), "target ids must be equal");
|
|
||||||
checkArgument(Objects.equals(merge.getRelType(), enrich.getRelType()), "relType(s) must be equal");
|
|
||||||
checkArgument(
|
|
||||||
Objects.equals(merge.getSubRelType(), enrich.getSubRelType()), "subRelType(s) must be equal");
|
|
||||||
checkArgument(Objects.equals(merge.getRelClass(), enrich.getRelClass()), "relClass(es) must be equal");
|
|
||||||
|
|
||||||
//merge.setProvenance(mergeLists(merge.getProvenance(), enrich.getProvenance()));
|
|
||||||
|
|
||||||
//TODO: trust ??
|
|
||||||
merge.setValidated(booleanOR(merge.getValidated(), enrich.getValidated()));
|
|
||||||
try {
|
|
||||||
merge.setValidationDate(ModelSupport.oldest(merge.getValidationDate(), enrich.getValidationDate()));
|
|
||||||
} catch (ParseException e) {
|
|
||||||
throw new IllegalArgumentException(String
|
|
||||||
.format(
|
|
||||||
"invalid validation date format in relation [s:%s, t:%s]: %s", merge.getSource(),
|
|
||||||
merge.getTarget(),
|
|
||||||
merge.getValidationDate()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO keyvalue merge
|
|
||||||
merge.setProperties(mergeKeyValue(merge.getProperties(), enrich.getProperties(), trust));
|
|
||||||
|
|
||||||
return merge;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T extends Result> T mergeResult(T original, T enrich) {
|
|
||||||
final int trust = compareTrust(original, enrich);
|
|
||||||
T merge = mergeOafEntityFields(original, enrich, trust);
|
|
||||||
|
|
||||||
if (merge.getProcessingchargeamount() == null || StringUtils.isBlank(merge.getProcessingchargeamount().getValue())) {
|
|
||||||
merge.setProcessingchargeamount(enrich.getProcessingchargeamount());
|
|
||||||
merge.setProcessingchargecurrency(enrich.getProcessingchargecurrency());
|
|
||||||
}
|
|
||||||
|
|
||||||
// author = usare la stessa logica che in dedup
|
|
||||||
merge.setAuthor(chooseReference(merge.getAuthor(), enrich.getAuthor(), trust));
|
|
||||||
// il primo che mi arriva secondo l'ordinamento per priorita'
|
|
||||||
merge.setResulttype(chooseReference(merge.getResulttype(), enrich.getResulttype(), trust));
|
|
||||||
// gestito come il resulttype perche' e' un subtype
|
|
||||||
merge.setMetaResourceType(chooseReference(merge.getMetaResourceType(), enrich.getMetaResourceType(), trust));
|
|
||||||
// spostiamo nell'instance e qui prendo il primo che arriva
|
|
||||||
merge.setLanguage(chooseReference(merge.getLanguage(), enrich.getLanguage(), trust));
|
|
||||||
// country lasicamo,o cosi' -> parentesi sul datainfo
|
|
||||||
merge.setCountry(unionDistinctLists(merge.getCountry(), enrich.getCountry(), trust));
|
|
||||||
//ok
|
|
||||||
merge.setSubject(unionDistinctLists(merge.getSubject(), enrich.getSubject(), trust));
|
|
||||||
// union per priority quindi vanno in append
|
|
||||||
merge.setTitle(unionTitle(merge.getTitle(), enrich.getTitle(), trust));
|
|
||||||
//ok
|
|
||||||
merge.setRelevantdate(unionDistinctLists(merge.getRelevantdate(), enrich.getRelevantdate(), trust));
|
|
||||||
// prima trust e poi longest list
|
|
||||||
merge.setDescription(longestLists(merge.getDescription(), enrich.getDescription()));
|
|
||||||
// trust piu' alto e poi piu' vecchia
|
|
||||||
merge.setDateofacceptance(chooseReference(merge.getDateofacceptance(), enrich.getDateofacceptance(), trust));
|
|
||||||
// ok, ma publisher va messo ripetibile
|
|
||||||
merge.setPublisher(chooseReference(merge.getPublisher(), enrich.getPublisher(), trust));
|
|
||||||
// ok
|
|
||||||
merge.setEmbargoenddate(chooseReference(merge.getEmbargoenddate(), enrich.getEmbargoenddate(), trust));
|
|
||||||
// ok
|
|
||||||
merge.setSource(unionDistinctLists(merge.getSource(), enrich.getSource(), trust));
|
|
||||||
// ok
|
|
||||||
merge.setFulltext(unionDistinctLists(merge.getFulltext(), enrich.getFulltext(), trust));
|
|
||||||
// ok
|
|
||||||
merge.setFormat(unionDistinctLists(merge.getFormat(), enrich.getFormat(), trust));
|
|
||||||
// ok
|
|
||||||
merge.setContributor(unionDistinctLists(merge.getContributor(), enrich.getContributor(), trust));
|
|
||||||
|
|
||||||
// prima prendo l'higher trust, su questo prendo il valore migliore nelle istanze TODO
|
|
||||||
// trust maggiore ma a parita' di trust il piu' specifico (base del vocabolario)
|
|
||||||
// vedi note
|
|
||||||
merge.setResourcetype(firstNonNull(merge.getResourcetype(), enrich.getResourcetype()));
|
|
||||||
|
|
||||||
// ok
|
|
||||||
merge.setCoverage(unionDistinctLists(merge.getCoverage(), enrich.getCoverage(), trust));
|
|
||||||
|
|
||||||
// most open ok
|
|
||||||
if (enrich.getBestaccessright() != null
|
|
||||||
&& new AccessRightComparator<>()
|
|
||||||
.compare(enrich.getBestaccessright(), merge.getBestaccessright()) < 0) {
|
|
||||||
merge.setBestaccessright(enrich.getBestaccessright());
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO merge of datainfo given same id
|
|
||||||
merge.setContext(unionDistinctLists(merge.getContext(), enrich.getContext(), trust));
|
|
||||||
|
|
||||||
//ok
|
|
||||||
merge.setExternalReference(unionDistinctLists(merge.getExternalReference(), enrich.getExternalReference(), trust));
|
|
||||||
|
|
||||||
//instance enrichment or union
|
|
||||||
// review instance equals => add pid to comparision
|
|
||||||
if (!isAnEnrichment(merge) && !isAnEnrichment(enrich))
|
|
||||||
merge.setInstance(unionDistinctLists(merge.getInstance(), enrich.getInstance(), trust));
|
|
||||||
else {
|
|
||||||
final List<Instance> enrichmentInstances = isAnEnrichment(merge) ? merge.getInstance()
|
|
||||||
: enrich.getInstance();
|
|
||||||
final List<Instance> enrichedInstances = isAnEnrichment(merge) ? enrich.getInstance()
|
|
||||||
: merge.getInstance();
|
|
||||||
if (isAnEnrichment(merge))
|
|
||||||
merge.setDataInfo(enrich.getDataInfo());
|
|
||||||
merge.setInstance(enrichInstances(enrichedInstances, enrichmentInstances));
|
|
||||||
}
|
|
||||||
|
|
||||||
merge.setEoscifguidelines(unionDistinctLists(merge.getEoscifguidelines(), enrich.getEoscifguidelines(), trust));
|
|
||||||
merge.setIsGreen(booleanOR(merge.getIsGreen(), enrich.getIsGreen()));
|
|
||||||
// OK but should be list of values
|
|
||||||
merge.setOpenAccessColor(chooseReference(merge.getOpenAccessColor(), enrich.getOpenAccessColor(), trust));
|
|
||||||
merge.setIsInDiamondJournal(booleanOR(merge.getIsInDiamondJournal(), enrich.getIsInDiamondJournal()));
|
|
||||||
merge.setPubliclyFunded(booleanOR(merge.getPubliclyFunded(), enrich.getPubliclyFunded()));
|
|
||||||
|
|
||||||
return merge;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends OtherResearchProduct> T mergeORP(T original, T enrich) {
|
|
||||||
int trust = compareTrust(original, enrich);
|
|
||||||
final T merge = mergeResult(original, enrich);
|
|
||||||
|
|
||||||
merge.setContactperson(unionDistinctLists(merge.getContactperson(), enrich.getContactperson(), trust));
|
|
||||||
merge.setContactgroup(unionDistinctLists(merge.getContactgroup(), enrich.getContactgroup(), trust));
|
|
||||||
merge.setTool(unionDistinctLists(merge.getTool(), enrich.getTool(), trust));
|
|
||||||
|
|
||||||
return merge;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends Software> T mergeSoftware(T original, T enrich) {
|
|
||||||
int trust = compareTrust(original, enrich);
|
|
||||||
final T merge = mergeResult(original, enrich);
|
|
||||||
|
|
||||||
merge.setDocumentationUrl(unionDistinctLists(merge.getDocumentationUrl(), enrich.getDocumentationUrl(), trust));
|
|
||||||
merge.setLicense(unionDistinctLists(merge.getLicense(), enrich.getLicense(), trust));
|
|
||||||
merge.setCodeRepositoryUrl(chooseReference(merge.getCodeRepositoryUrl(), enrich.getCodeRepositoryUrl(), trust));
|
|
||||||
merge.setProgrammingLanguage(chooseReference(merge.getProgrammingLanguage(), enrich.getProgrammingLanguage(), trust));
|
|
||||||
|
|
||||||
return merge;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends Dataset> T mergeDataset(T original, T enrich) {
|
|
||||||
int trust = compareTrust(original, enrich);
|
|
||||||
T merge = mergeResult(original, enrich);
|
|
||||||
|
|
||||||
merge.setStoragedate(chooseReference(merge.getStoragedate(), enrich.getStoragedate(), trust));
|
|
||||||
merge.setDevice(chooseReference(merge.getDevice(), enrich.getDevice(), trust));
|
|
||||||
merge.setSize(chooseReference(merge.getSize(), enrich.getSize(), trust));
|
|
||||||
merge.setVersion(chooseReference(merge.getVersion(), enrich.getVersion(), trust));
|
|
||||||
merge.setLastmetadataupdate(chooseReference(merge.getLastmetadataupdate(), enrich.getLastmetadataupdate(), trust));
|
|
||||||
merge.setMetadataversionnumber(chooseReference(merge.getMetadataversionnumber(), enrich.getMetadataversionnumber(), trust));
|
|
||||||
merge.setGeolocation(unionDistinctLists(merge.getGeolocation(), enrich.getGeolocation(), trust));
|
|
||||||
|
|
||||||
return merge;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T extends Publication> T mergePublication(T original, T enrich) {
|
|
||||||
final int trust = compareTrust(original, enrich);
|
|
||||||
T merged = mergeResult(original, enrich);
|
|
||||||
|
|
||||||
merged.setJournal(chooseReference(merged.getJournal(), enrich.getJournal(), trust));
|
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends Organization> T mergeOrganization(T left, T enrich) {
|
|
||||||
int trust = compareTrust(left, enrich);
|
|
||||||
T merged = mergeOafEntityFields(left, enrich, trust);
|
|
||||||
|
|
||||||
merged.setLegalshortname(chooseReference(merged.getLegalshortname(), enrich.getLegalshortname(), trust));
|
|
||||||
merged.setLegalname(chooseReference(merged.getLegalname(), enrich.getLegalname(), trust));
|
|
||||||
merged.setAlternativeNames(unionDistinctLists(enrich.getAlternativeNames(), merged.getAlternativeNames(), trust));
|
|
||||||
merged.setWebsiteurl(chooseReference(merged.getWebsiteurl(), enrich.getWebsiteurl(), trust));
|
|
||||||
merged.setLogourl(chooseReference(merged.getLogourl(), enrich.getLogourl(), trust));
|
|
||||||
merged.setEclegalbody(chooseReference(merged.getEclegalbody(), enrich.getEclegalbody(), trust));
|
|
||||||
merged.setEclegalperson(chooseReference(merged.getEclegalperson(), enrich.getEclegalperson(), trust));
|
|
||||||
merged.setEcnonprofit(chooseReference(merged.getEcnonprofit(), enrich.getEcnonprofit(), trust));
|
|
||||||
merged.setEcresearchorganization(chooseReference(merged.getEcresearchorganization(), enrich.getEcresearchorganization(), trust));
|
|
||||||
merged.setEchighereducation(chooseReference(merged.getEchighereducation(), enrich.getEchighereducation(), trust));
|
|
||||||
merged.setEcinternationalorganizationeurinterests(chooseReference(merged.getEcinternationalorganizationeurinterests(), enrich.getEcinternationalorganizationeurinterests(), trust));
|
|
||||||
merged.setEcinternationalorganization(chooseReference(merged.getEcinternationalorganization(), enrich.getEcinternationalorganization(), trust));
|
|
||||||
merged.setEcenterprise(chooseReference(merged.getEcenterprise(), enrich.getEcenterprise(), trust));
|
|
||||||
merged.setEcsmevalidated(chooseReference(merged.getEcsmevalidated(), enrich.getEcsmevalidated(), trust));
|
|
||||||
merged.setEcnutscode(chooseReference(merged.getEcnutscode(), enrich.getEcnutscode(), trust));
|
|
||||||
merged.setCountry(chooseReference(merged.getCountry(), enrich.getCountry(), trust));
|
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T extends Project> T mergeProject(T original, T enrich) {
|
|
||||||
int trust = compareTrust(original, enrich);
|
|
||||||
T merged = mergeOafEntityFields(original, enrich, trust);
|
|
||||||
|
|
||||||
merged.setWebsiteurl(chooseReference(merged.getWebsiteurl(), enrich.getWebsiteurl(), trust));
|
|
||||||
merged.setCode(chooseReference(merged.getCode(), enrich.getCode(), trust));
|
|
||||||
merged.setAcronym(chooseReference(merged.getAcronym(), enrich.getAcronym(), trust));
|
|
||||||
merged.setTitle(chooseReference(merged.getTitle(), enrich.getTitle(), trust));
|
|
||||||
merged.setStartdate(chooseReference(merged.getStartdate(), enrich.getStartdate(), trust));
|
|
||||||
merged.setEnddate(chooseReference(merged.getEnddate(), enrich.getEnddate(), trust));
|
|
||||||
merged.setCallidentifier(chooseReference(merged.getCallidentifier(), enrich.getCallidentifier(), trust));
|
|
||||||
merged.setKeywords(chooseReference(merged.getKeywords(), enrich.getKeywords(), trust));
|
|
||||||
merged.setDuration(chooseReference(merged.getDuration(), enrich.getDuration(), trust));
|
|
||||||
merged.setEcsc39(chooseReference(merged.getEcsc39(), enrich.getEcsc39(), trust));
|
|
||||||
merged.setOamandatepublications(chooseReference(merged.getOamandatepublications(), enrich.getOamandatepublications(), trust));
|
|
||||||
merged.setEcarticle29_3(chooseReference(merged.getEcarticle29_3(), enrich.getEcarticle29_3(), trust));
|
|
||||||
merged.setSubjects(unionDistinctLists(merged.getSubjects(), enrich.getSubjects(), trust));
|
|
||||||
merged.setFundingtree(unionDistinctLists(merged.getFundingtree(), enrich.getFundingtree(), trust));
|
|
||||||
merged.setContracttype(chooseReference(merged.getContracttype(), enrich.getContracttype(), trust));
|
|
||||||
merged.setOptional1(chooseReference(merged.getOptional1(), enrich.getOptional1(), trust));
|
|
||||||
merged.setOptional2(chooseReference(merged.getOptional2(), enrich.getOptional2(), trust));
|
|
||||||
merged.setJsonextrainfo(chooseReference(merged.getJsonextrainfo(), enrich.getJsonextrainfo(), trust));
|
|
||||||
merged.setContactfullname(chooseReference(merged.getContactfullname(), enrich.getContactfullname(), trust));
|
|
||||||
merged.setContactfax(chooseReference(merged.getContactfax(), enrich.getContactfax(), trust));
|
|
||||||
merged.setContactphone(chooseReference(merged.getContactphone(), enrich.getContactphone(), trust));
|
|
||||||
merged.setContactemail(chooseReference(merged.getContactemail(), enrich.getContactemail(), trust));
|
|
||||||
merged.setSummary(chooseReference(merged.getSummary(), enrich.getSummary(), trust));
|
|
||||||
merged.setCurrency(chooseReference(merged.getCurrency(), enrich.getCurrency(), trust));
|
|
||||||
|
|
||||||
//missin in Project.merge
|
|
||||||
merged.setTotalcost(chooseReference(merged.getTotalcost(), enrich.getTotalcost(), trust));
|
|
||||||
merged.setFundedamount(chooseReference(merged.getFundedamount(), enrich.getFundedamount(), trust));
|
|
||||||
|
|
||||||
// trust ??
|
|
||||||
if (enrich.getH2020topiccode() != null && StringUtils.isEmpty(merged.getH2020topiccode())) {
|
|
||||||
merged.setH2020topiccode(enrich.getH2020topiccode());
|
|
||||||
merged.setH2020topicdescription(enrich.getH2020topicdescription());
|
|
||||||
}
|
|
||||||
|
|
||||||
merged.setH2020classification(unionDistinctLists(merged.getH2020classification(), enrich.getH2020classification(), trust));
|
|
||||||
|
|
||||||
return merged;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Longest lists list.
|
|
||||||
*
|
|
||||||
* @param a the a
|
|
||||||
* @param b the b
|
|
||||||
* @return the list
|
|
||||||
*/
|
|
||||||
public static List<Field<String>> longestLists(List<Field<String>> a, List<Field<String>> b) {
|
|
||||||
if (a == null || b == null)
|
|
||||||
return a == null ? b : a;
|
|
||||||
|
|
||||||
return a.size() >= b.size() ? a : b;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This main method apply the enrichment of the instances
|
|
||||||
*
|
|
||||||
* @param toEnrichInstances the instances that could be enriched
|
|
||||||
* @param enrichmentInstances the enrichment instances
|
|
||||||
* @return list of instances possibly enriched
|
|
||||||
*/
|
|
||||||
private static List<Instance> enrichInstances(final List<Instance> toEnrichInstances,
|
|
||||||
final List<Instance> enrichmentInstances) {
|
|
||||||
final List<Instance> enrichmentResult = new ArrayList<>();
|
|
||||||
|
|
||||||
if (toEnrichInstances == null) {
|
|
||||||
return enrichmentResult;
|
|
||||||
}
|
|
||||||
if (enrichmentInstances == null) {
|
|
||||||
return enrichmentResult;
|
|
||||||
}
|
|
||||||
Map<String, Instance> ri = toInstanceMap(enrichmentInstances);
|
|
||||||
|
|
||||||
toEnrichInstances.forEach(i -> {
|
|
||||||
final List<Instance> e = findEnrichmentsByPID(i.getPid(), ri);
|
|
||||||
if (e != null && e.size() > 0) {
|
|
||||||
e.forEach(enr -> applyEnrichment(i, enr));
|
|
||||||
} else {
|
|
||||||
final List<Instance> a = findEnrichmentsByPID(i.getAlternateIdentifier(), ri);
|
|
||||||
if (a != null && a.size() > 0) {
|
|
||||||
a.forEach(enr -> applyEnrichment(i, enr));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enrichmentResult.add(i);
|
|
||||||
});
|
|
||||||
return enrichmentResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method converts the list of instance enrichments
|
|
||||||
* into a Map where the key is the normalized identifier
|
|
||||||
* and the value is the instance itself
|
|
||||||
*
|
|
||||||
* @param ri the list of enrichment instances
|
|
||||||
* @return the result map
|
|
||||||
*/
|
|
||||||
private static Map<String, Instance> toInstanceMap(final List<Instance> ri) {
|
|
||||||
return ri
|
|
||||||
.stream()
|
|
||||||
.filter(i -> i.getPid() != null || i.getAlternateIdentifier() != null)
|
|
||||||
.flatMap(i -> {
|
|
||||||
final List<Pair<String, Instance>> result = new ArrayList<>();
|
|
||||||
if (i.getPid() != null)
|
|
||||||
i
|
|
||||||
.getPid()
|
|
||||||
.stream()
|
|
||||||
.filter(MergeUtils::validPid)
|
|
||||||
.forEach(p -> result.add(new ImmutablePair<>(extractKeyFromPid(p), i)));
|
|
||||||
if (i.getAlternateIdentifier() != null)
|
|
||||||
i
|
|
||||||
.getAlternateIdentifier()
|
|
||||||
.stream()
|
|
||||||
.filter(MergeUtils::validPid)
|
|
||||||
.forEach(p -> result.add(new ImmutablePair<>(extractKeyFromPid(p), i)));
|
|
||||||
return result.stream();
|
|
||||||
})
|
|
||||||
.collect(
|
|
||||||
Collectors
|
|
||||||
.toMap(
|
|
||||||
Pair::getLeft,
|
|
||||||
Pair::getRight,
|
|
||||||
(a, b) -> a));
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isFromDelegatedAuthority(Result r) {
|
|
||||||
return Optional
|
|
||||||
.ofNullable(r.getInstance())
|
|
||||||
.map(
|
|
||||||
instance -> instance
|
|
||||||
.stream()
|
|
||||||
.filter(i -> Objects.nonNull(i.getCollectedfrom()))
|
|
||||||
.map(i -> i.getCollectedfrom().getKey())
|
|
||||||
.anyMatch(cfId -> IdentifierFactory.delegatedAuthorityDatasourceIds().contains(cfId)))
|
|
||||||
.orElse(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Valid pid boolean.
|
|
||||||
*
|
|
||||||
* @param p the p
|
|
||||||
* @return the boolean
|
|
||||||
*/
|
|
||||||
private static boolean validPid(final StructuredProperty p) {
|
|
||||||
return p.getValue() != null && p.getQualifier() != null && p.getQualifier().getClassid() != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Normalize pid string.
|
|
||||||
*
|
|
||||||
* @param pid the pid
|
|
||||||
* @return the string
|
|
||||||
*/
|
|
||||||
private static String extractKeyFromPid(final StructuredProperty pid) {
|
|
||||||
if (pid == null)
|
|
||||||
return null;
|
|
||||||
final StructuredProperty normalizedPid = CleaningFunctions.normalizePidValue(pid);
|
|
||||||
|
|
||||||
return String.format("%s::%s", normalizedPid.getQualifier().getClassid(), normalizedPid.getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This utility method finds the list of enrichment instances
|
|
||||||
* that match one or more PIDs in the input list
|
|
||||||
*
|
|
||||||
* @param pids the list of PIDs
|
|
||||||
* @param enrichments the List of enrichment instances having the same pid
|
|
||||||
* @return the list
|
|
||||||
*/
|
|
||||||
private static List<Instance> findEnrichmentsByPID(final List<StructuredProperty> pids,
|
|
||||||
final Map<String, Instance> enrichments) {
|
|
||||||
if (pids == null || enrichments == null)
|
|
||||||
return null;
|
|
||||||
return pids
|
|
||||||
.stream()
|
|
||||||
.map(MergeUtils::extractKeyFromPid)
|
|
||||||
.map(enrichments::get)
|
|
||||||
.filter(Objects::nonNull)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Is an enrichment boolean.
|
|
||||||
*
|
|
||||||
* @param e the e
|
|
||||||
* @return the boolean
|
|
||||||
*/
|
|
||||||
private static boolean isAnEnrichment(OafEntity e) {
|
|
||||||
return e.getDataInfo() != null &&
|
|
||||||
e.getDataInfo().getProvenanceaction() != null
|
|
||||||
&& ModelConstants.PROVENANCE_ENRICH.equalsIgnoreCase(e.getDataInfo().getProvenanceaction().getClassid());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method apply enrichment on a single instance
|
|
||||||
* The enrichment consists of replacing values on
|
|
||||||
* single attribute only if in the current instance is missing
|
|
||||||
* The only repeatable field enriched is measures
|
|
||||||
*
|
|
||||||
* @param merge the current instance
|
|
||||||
* @param enrichment the enrichment instance
|
|
||||||
*/
|
|
||||||
private static void applyEnrichment(final Instance merge, final Instance enrichment) {
|
|
||||||
if (merge == null || enrichment == null)
|
|
||||||
return;
|
|
||||||
|
|
||||||
merge.setLicense(firstNonNull(merge.getLicense(), enrichment.getLicense()));
|
|
||||||
merge.setAccessright(firstNonNull(merge.getAccessright(), enrichment.getAccessright()));
|
|
||||||
merge.setInstancetype(firstNonNull(merge.getInstancetype(), enrichment.getInstancetype()));
|
|
||||||
merge.setInstanceTypeMapping(firstNonNull(merge.getInstanceTypeMapping(), enrichment.getInstanceTypeMapping()));
|
|
||||||
merge.setHostedby(firstNonNull(merge.getHostedby(), enrichment.getHostedby()));
|
|
||||||
merge.setUrl(unionDistinctLists(merge.getUrl(), enrichment.getUrl(), 0));
|
|
||||||
merge.setDistributionlocation(firstNonNull(merge.getDistributionlocation(), enrichment.getDistributionlocation()));
|
|
||||||
merge.setCollectedfrom(firstNonNull(merge.getCollectedfrom(), enrichment.getCollectedfrom()));
|
|
||||||
// pid and alternateId are used for matching
|
|
||||||
merge.setDateofacceptance(firstNonNull(merge.getDateofacceptance(), enrichment.getDateofacceptance()));
|
|
||||||
merge.setProcessingchargeamount(firstNonNull(merge.getProcessingchargeamount(), enrichment.getProcessingchargeamount()));
|
|
||||||
merge.setProcessingchargecurrency(firstNonNull(merge.getProcessingchargecurrency(), enrichment.getProcessingchargecurrency()));
|
|
||||||
merge.setRefereed(firstNonNull(merge.getRefereed(), enrichment.getRefereed()));
|
|
||||||
merge.setMeasures(unionDistinctLists(merge.getMeasures(), enrichment.getMeasures(), 0));
|
|
||||||
merge.setFulltext(firstNonNull(merge.getFulltext(), enrichment.getFulltext()));
|
|
||||||
}
|
|
||||||
|
|
||||||
private static int compareTrust(Oaf a, Oaf b) {
|
|
||||||
String left = Optional
|
|
||||||
.ofNullable(a.getDataInfo())
|
|
||||||
.map(DataInfo::getTrust)
|
|
||||||
.orElse("0.0");
|
|
||||||
|
|
||||||
String right = Optional
|
|
||||||
.ofNullable(b.getDataInfo())
|
|
||||||
.map(DataInfo::getTrust)
|
|
||||||
.orElse("0.0");
|
|
||||||
|
|
||||||
return left.compareTo(right);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -14,6 +14,7 @@ import java.util.stream.Collectors;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
|
import eu.dnetlib.dhp.schema.common.AccessRightComparator;
|
||||||
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
|
|
||||||
public class OafMapperUtils {
|
public class OafMapperUtils {
|
||||||
|
@ -21,6 +22,65 @@ public class OafMapperUtils {
|
||||||
private OafMapperUtils() {
|
private OafMapperUtils() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Oaf merge(final Oaf left, final Oaf right) {
|
||||||
|
if (ModelSupport.isSubClass(left, OafEntity.class)) {
|
||||||
|
return mergeEntities((OafEntity) left, (OafEntity) right);
|
||||||
|
} else if (ModelSupport.isSubClass(left, Relation.class)) {
|
||||||
|
((Relation) left).mergeFrom((Relation) right);
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("invalid Oaf type:" + left.getClass().getCanonicalName());
|
||||||
|
}
|
||||||
|
return left;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static OafEntity mergeEntities(OafEntity left, OafEntity right) {
|
||||||
|
if (ModelSupport.isSubClass(left, Result.class)) {
|
||||||
|
return mergeResults((Result) left, (Result) right);
|
||||||
|
} else if (ModelSupport.isSubClass(left, Datasource.class)) {
|
||||||
|
left.mergeFrom(right);
|
||||||
|
} else if (ModelSupport.isSubClass(left, Organization.class)) {
|
||||||
|
left.mergeFrom(right);
|
||||||
|
} else if (ModelSupport.isSubClass(left, Project.class)) {
|
||||||
|
left.mergeFrom(right);
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("invalid OafEntity subtype:" + left.getClass().getCanonicalName());
|
||||||
|
}
|
||||||
|
return left;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Result mergeResults(Result left, Result right) {
|
||||||
|
|
||||||
|
final boolean leftFromDelegatedAuthority = isFromDelegatedAuthority(left);
|
||||||
|
final boolean rightFromDelegatedAuthority = isFromDelegatedAuthority(right);
|
||||||
|
|
||||||
|
if (leftFromDelegatedAuthority && !rightFromDelegatedAuthority) {
|
||||||
|
return left;
|
||||||
|
}
|
||||||
|
if (!leftFromDelegatedAuthority && rightFromDelegatedAuthority) {
|
||||||
|
return right;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new ResultTypeComparator().compare(left, right) < 0) {
|
||||||
|
left.mergeFrom(right);
|
||||||
|
return left;
|
||||||
|
} else {
|
||||||
|
right.mergeFrom(left);
|
||||||
|
return right;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isFromDelegatedAuthority(Result r) {
|
||||||
|
return Optional
|
||||||
|
.ofNullable(r.getInstance())
|
||||||
|
.map(
|
||||||
|
instance -> instance
|
||||||
|
.stream()
|
||||||
|
.filter(i -> Objects.nonNull(i.getCollectedfrom()))
|
||||||
|
.map(i -> i.getCollectedfrom().getKey())
|
||||||
|
.anyMatch(cfId -> IdentifierFactory.delegatedAuthorityDatasourceIds().contains(cfId)))
|
||||||
|
.orElse(false);
|
||||||
|
}
|
||||||
|
|
||||||
public static KeyValue keyValue(final String k, final String v) {
|
public static KeyValue keyValue(final String k, final String v) {
|
||||||
final KeyValue kv = new KeyValue();
|
final KeyValue kv = new KeyValue();
|
||||||
kv.setKey(k);
|
kv.setKey(k);
|
||||||
|
|
|
@ -1,109 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.common.api;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.junit.jupiter.api.Assertions;
|
|
||||||
import org.junit.jupiter.api.Disabled;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
|
|
||||||
@Disabled
|
|
||||||
class ZenodoAPIClientTest {
|
|
||||||
|
|
||||||
private final String URL_STRING = "https://sandbox.zenodo.org/api/deposit/depositions";
|
|
||||||
private final String ACCESS_TOKEN = "";
|
|
||||||
|
|
||||||
private final String CONCEPT_REC_ID = "657113";
|
|
||||||
|
|
||||||
private final String depositionId = "674915";
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testUploadOldDeposition() throws IOException, MissingConceptDoiException {
|
|
||||||
ZenodoAPIClient client = new ZenodoAPIClient(URL_STRING,
|
|
||||||
ACCESS_TOKEN);
|
|
||||||
Assertions.assertEquals(200, client.uploadOpenDeposition(depositionId));
|
|
||||||
|
|
||||||
File file = new File(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/common/api/COVID-19.json.gz")
|
|
||||||
.getPath());
|
|
||||||
|
|
||||||
InputStream is = new FileInputStream(file);
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.uploadIS(is, "COVID-19.json.gz"));
|
|
||||||
|
|
||||||
String metadata = IOUtils.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/common/api/metadata.json"));
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.sendMretadata(metadata));
|
|
||||||
|
|
||||||
Assertions.assertEquals(202, client.publish());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testNewDeposition() throws IOException {
|
|
||||||
|
|
||||||
ZenodoAPIClient client = new ZenodoAPIClient(URL_STRING,
|
|
||||||
ACCESS_TOKEN);
|
|
||||||
Assertions.assertEquals(201, client.newDeposition());
|
|
||||||
|
|
||||||
File file = new File(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/common/api/COVID-19.json.gz")
|
|
||||||
.getPath());
|
|
||||||
|
|
||||||
InputStream is = new FileInputStream(file);
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.uploadIS(is, "COVID-19.json.gz"));
|
|
||||||
|
|
||||||
String metadata = IOUtils.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/common/api/metadata.json"));
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.sendMretadata(metadata));
|
|
||||||
|
|
||||||
Assertions.assertEquals(202, client.publish());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testNewVersionNewName() throws IOException, MissingConceptDoiException {
|
|
||||||
|
|
||||||
ZenodoAPIClient client = new ZenodoAPIClient(URL_STRING,
|
|
||||||
ACCESS_TOKEN);
|
|
||||||
|
|
||||||
Assertions.assertEquals(201, client.newVersion(CONCEPT_REC_ID));
|
|
||||||
|
|
||||||
File file = new File(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/common/api/newVersion")
|
|
||||||
.getPath());
|
|
||||||
|
|
||||||
InputStream is = new FileInputStream(file);
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.uploadIS(is, "newVersion_deposition"));
|
|
||||||
|
|
||||||
Assertions.assertEquals(202, client.publish());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testNewVersionOldName() throws IOException, MissingConceptDoiException {
|
|
||||||
|
|
||||||
ZenodoAPIClient client = new ZenodoAPIClient(URL_STRING,
|
|
||||||
ACCESS_TOKEN);
|
|
||||||
|
|
||||||
Assertions.assertEquals(201, client.newVersion(CONCEPT_REC_ID));
|
|
||||||
|
|
||||||
File file = new File(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/common/api/newVersion2")
|
|
||||||
.getPath());
|
|
||||||
|
|
||||||
InputStream is = new FileInputStream(file);
|
|
||||||
|
|
||||||
Assertions.assertEquals(200, client.uploadIS(is, "newVersion_deposition"));
|
|
||||||
|
|
||||||
Assertions.assertEquals(202, client.publish());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,111 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.schema.oaf.utils;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.*;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Dataset;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.KeyValue;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Publication;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
|
||||||
|
|
||||||
public class MergeUtilsTest {
|
|
||||||
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper()
|
|
||||||
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testMergePubs() throws IOException {
|
|
||||||
Publication p1 = read("publication_1.json", Publication.class);
|
|
||||||
Publication p2 = read("publication_2.json", Publication.class);
|
|
||||||
Dataset d1 = read("dataset_1.json", Dataset.class);
|
|
||||||
Dataset d2 = read("dataset_2.json", Dataset.class);
|
|
||||||
|
|
||||||
assertEquals(1, p1.getCollectedfrom().size());
|
|
||||||
assertEquals(ModelConstants.CROSSREF_ID, p1.getCollectedfrom().get(0).getKey());
|
|
||||||
assertEquals(1, d2.getCollectedfrom().size());
|
|
||||||
assertFalse(cfId(d2.getCollectedfrom()).contains(ModelConstants.CROSSREF_ID));
|
|
||||||
|
|
||||||
assertEquals(1, p2.getCollectedfrom().size());
|
|
||||||
assertFalse(cfId(p2.getCollectedfrom()).contains(ModelConstants.CROSSREF_ID));
|
|
||||||
assertEquals(1, d1.getCollectedfrom().size());
|
|
||||||
assertTrue(cfId(d1.getCollectedfrom()).contains(ModelConstants.CROSSREF_ID));
|
|
||||||
|
|
||||||
final Result p1d2 = MergeUtils.checkedMerge(p1, d2);
|
|
||||||
assertEquals(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID, p1d2.getResulttype().getClassid());
|
|
||||||
assertTrue(p1d2 instanceof Publication);
|
|
||||||
assertEquals(p1.getId(), p1d2.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testMergePubs_1() throws IOException {
|
|
||||||
Publication p2 = read("publication_2.json", Publication.class);
|
|
||||||
Dataset d1 = read("dataset_1.json", Dataset.class);
|
|
||||||
|
|
||||||
final Result p2d1 = MergeUtils.checkedMerge(p2, d1);
|
|
||||||
assertEquals((ModelConstants.DATASET_RESULTTYPE_CLASSID), p2d1.getResulttype().getClassid());
|
|
||||||
assertTrue(p2d1 instanceof Dataset);
|
|
||||||
assertEquals(d1.getId(), p2d1.getId());
|
|
||||||
assertEquals(2, p2d1.getCollectedfrom().size());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testMergePubs_2() throws IOException {
|
|
||||||
Publication p1 = read("publication_1.json", Publication.class);
|
|
||||||
Publication p2 = read("publication_2.json", Publication.class);
|
|
||||||
|
|
||||||
Result p1p2 = MergeUtils.checkedMerge(p1, p2);
|
|
||||||
assertTrue(p1p2 instanceof Publication);
|
|
||||||
assertEquals(p1.getId(), p1p2.getId());
|
|
||||||
assertEquals(2, p1p2.getCollectedfrom().size());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testDelegatedAuthority_1() throws IOException {
|
|
||||||
Dataset d1 = read("dataset_2.json", Dataset.class);
|
|
||||||
Dataset d2 = read("dataset_delegated.json", Dataset.class);
|
|
||||||
|
|
||||||
assertEquals(1, d2.getCollectedfrom().size());
|
|
||||||
assertTrue(cfId(d2.getCollectedfrom()).contains(ModelConstants.ZENODO_OD_ID));
|
|
||||||
|
|
||||||
Result res = (Result) MergeUtils.merge(d1, d2, true);
|
|
||||||
|
|
||||||
assertEquals(d2, res);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testDelegatedAuthority_2() throws IOException {
|
|
||||||
Dataset p1 = read("publication_1.json", Dataset.class);
|
|
||||||
Dataset d2 = read("dataset_delegated.json", Dataset.class);
|
|
||||||
|
|
||||||
assertEquals(1, d2.getCollectedfrom().size());
|
|
||||||
assertTrue(cfId(d2.getCollectedfrom()).contains(ModelConstants.ZENODO_OD_ID));
|
|
||||||
|
|
||||||
Result res = (Result) MergeUtils.merge(p1, d2, true);
|
|
||||||
|
|
||||||
assertEquals(d2, res);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected HashSet<String> cfId(List<KeyValue> collectedfrom) {
|
|
||||||
return collectedfrom.stream().map(KeyValue::getKey).collect(Collectors.toCollection(HashSet::new));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected <T extends Result> T read(String filename, Class<T> clazz) throws IOException {
|
|
||||||
final String json = IOUtils.toString(getClass().getResourceAsStream(filename));
|
|
||||||
return OBJECT_MAPPER.readValue(json, clazz);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -149,7 +149,7 @@ class OafMapperUtilsTest {
|
||||||
void testDate() {
|
void testDate() {
|
||||||
final String date = GraphCleaningFunctions.cleanDate("23-FEB-1998");
|
final String date = GraphCleaningFunctions.cleanDate("23-FEB-1998");
|
||||||
assertNotNull(date);
|
assertNotNull(date);
|
||||||
assertEquals("1998-02-23", date);
|
System.out.println(date);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -166,8 +166,8 @@ class OafMapperUtilsTest {
|
||||||
|
|
||||||
assertEquals(
|
assertEquals(
|
||||||
ModelConstants.PUBLICATION_RESULTTYPE_CLASSID,
|
ModelConstants.PUBLICATION_RESULTTYPE_CLASSID,
|
||||||
MergeUtils
|
OafMapperUtils
|
||||||
.mergeResult(p1, d2)
|
.mergeResults(p1, d2)
|
||||||
.getResulttype()
|
.getResulttype()
|
||||||
.getClassid());
|
.getClassid());
|
||||||
|
|
||||||
|
@ -178,8 +178,8 @@ class OafMapperUtilsTest {
|
||||||
|
|
||||||
assertEquals(
|
assertEquals(
|
||||||
ModelConstants.DATASET_RESULTTYPE_CLASSID,
|
ModelConstants.DATASET_RESULTTYPE_CLASSID,
|
||||||
MergeUtils
|
OafMapperUtils
|
||||||
.mergeResult(p2, d1)
|
.mergeResults(p2, d1)
|
||||||
.getResulttype()
|
.getResulttype()
|
||||||
.getClassid());
|
.getClassid());
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ class OafMapperUtilsTest {
|
||||||
assertEquals(1, d2.getCollectedfrom().size());
|
assertEquals(1, d2.getCollectedfrom().size());
|
||||||
assertTrue(cfId(d2.getCollectedfrom()).contains(ModelConstants.ZENODO_OD_ID));
|
assertTrue(cfId(d2.getCollectedfrom()).contains(ModelConstants.ZENODO_OD_ID));
|
||||||
|
|
||||||
Result res = MergeUtils.mergeResult(d1, d2);
|
Result res = OafMapperUtils.mergeResults(d1, d2);
|
||||||
|
|
||||||
assertEquals(d2, res);
|
assertEquals(d2, res);
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.enrich.orcid;
|
package eu.dnetlib.oa.merge;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.*;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
@ -13,9 +14,10 @@ import org.junit.platform.commons.util.StringUtils;
|
||||||
import com.fasterxml.jackson.core.type.TypeReference;
|
import com.fasterxml.jackson.core.type.TypeReference;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.oa.merge.AuthorMerger;
|
||||||
import eu.dnetlib.dhp.schema.oaf.Author;
|
import eu.dnetlib.dhp.schema.oaf.Author;
|
||||||
|
|
||||||
public class ORCIDAuthorEnricherTest {
|
public class AuthorMergerTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testEnrcichAuthor() throws Exception {
|
public void testEnrcichAuthor() throws Exception {
|
||||||
|
@ -24,13 +26,12 @@ public class ORCIDAuthorEnricherTest {
|
||||||
BufferedReader pr = new BufferedReader(new InputStreamReader(
|
BufferedReader pr = new BufferedReader(new InputStreamReader(
|
||||||
Objects
|
Objects
|
||||||
.requireNonNull(
|
.requireNonNull(
|
||||||
ORCIDAuthorEnricherTest.class
|
AuthorMergerTest.class
|
||||||
.getResourceAsStream("/eu/dnetlib/dhp/enrich/orcid/authors_publication_sample.json"))));
|
.getResourceAsStream("/eu/dnetlib/dhp/oa/merge/authors_publication_sample.json"))));
|
||||||
BufferedReader or = new BufferedReader(new InputStreamReader(
|
BufferedReader or = new BufferedReader(new InputStreamReader(
|
||||||
Objects
|
Objects
|
||||||
.requireNonNull(
|
.requireNonNull(
|
||||||
ORCIDAuthorEnricherTest.class
|
AuthorMergerTest.class.getResourceAsStream("/eu/dnetlib/dhp/oa/merge/authors_orcid_sample.json"))));
|
||||||
.getResourceAsStream("/eu/dnetlib/dhp/enrich/orcid/authors_orcid_sample.json"))));
|
|
||||||
|
|
||||||
TypeReference<List<Author>> aclass = new TypeReference<List<Author>>() {
|
TypeReference<List<Author>> aclass = new TypeReference<List<Author>>() {
|
||||||
};
|
};
|
||||||
|
@ -66,8 +67,7 @@ public class ORCIDAuthorEnricherTest {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
|
|
||||||
// final List<Author> enrichedList = AuthorMerger.enrichOrcid(publicationAuthors, orcidAuthors);
|
// final List<Author> enrichedList = AuthorMerger.enrichOrcid(publicationAuthors, orcidAuthors);
|
||||||
final List<Author> enrichedList = Collections.emptyList(); // SparkEnrichGraphWithOrcidAuthors.enrichOrcid(publicationAuthors,
|
final List<Author> enrichedList = AuthorMerger.enrichOrcid(publicationAuthors, orcidAuthors);
|
||||||
// orcidAuthors);
|
|
||||||
|
|
||||||
long enrichedAuthorWithPid = enrichedList
|
long enrichedAuthorWithPid = enrichedList
|
||||||
.stream()
|
.stream()
|
||||||
|
@ -91,4 +91,24 @@ public class ORCIDAuthorEnricherTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void checkSimilarityTest() {
|
||||||
|
final Author left = new Author();
|
||||||
|
left.setName("Anand");
|
||||||
|
left.setSurname("Rachna");
|
||||||
|
left.setFullname("Anand, Rachna");
|
||||||
|
|
||||||
|
System.out.println(AuthorMerger.normalizeFullName(left.getFullname()));
|
||||||
|
|
||||||
|
final Author right = new Author();
|
||||||
|
right.setName("Rachna");
|
||||||
|
right.setSurname("Anand");
|
||||||
|
right.setFullname("Rachna, Anand");
|
||||||
|
// System.out.println(AuthorMerger.normalize(right.getFullname()));
|
||||||
|
boolean same = AuthorMerger.checkORCIDSimilarity(left, right);
|
||||||
|
|
||||||
|
assertTrue(same);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -23,18 +23,15 @@ public class InstanceTypeMatch extends AbstractListComparator {
|
||||||
|
|
||||||
// jolly types
|
// jolly types
|
||||||
translationMap.put("Conference object", "*");
|
translationMap.put("Conference object", "*");
|
||||||
translationMap.put("Research", "*");
|
|
||||||
translationMap.put("Other literature type", "*");
|
translationMap.put("Other literature type", "*");
|
||||||
translationMap.put("Unknown", "*");
|
translationMap.put("Unknown", "*");
|
||||||
translationMap.put("UNKNOWN", "*");
|
translationMap.put("UNKNOWN", "*");
|
||||||
|
|
||||||
// article types
|
// article types
|
||||||
translationMap.put("Article", "Article");
|
translationMap.put("Article", "Article");
|
||||||
translationMap.put("Journal", "Article");
|
|
||||||
translationMap.put("Data Paper", "Article");
|
translationMap.put("Data Paper", "Article");
|
||||||
translationMap.put("Software Paper", "Article");
|
translationMap.put("Software Paper", "Article");
|
||||||
translationMap.put("Preprint", "Article");
|
translationMap.put("Preprint", "Article");
|
||||||
translationMap.put("Part of book or chapter of book", "Article");
|
|
||||||
|
|
||||||
// thesis types
|
// thesis types
|
||||||
translationMap.put("Thesis", "Thesis");
|
translationMap.put("Thesis", "Thesis");
|
||||||
|
|
|
@ -8,9 +8,9 @@ import java.util.Map;
|
||||||
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.Disabled;
|
||||||
|
|
||||||
import eu.dnetlib.pace.model.Person;
|
import eu.dnetlib.pace.model.Person;
|
||||||
import jdk.nashorn.internal.ir.annotations.Ignore;
|
|
||||||
|
|
||||||
public class UtilTest {
|
public class UtilTest {
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ public class UtilTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@Ignore
|
@Disabled
|
||||||
public void paceResolverTest() {
|
public void paceResolverTest() {
|
||||||
PaceResolver paceResolver = new PaceResolver();
|
PaceResolver paceResolver = new PaceResolver();
|
||||||
paceResolver.getComparator("keywordMatch", params);
|
paceResolver.getComparator("keywordMatch", params);
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.promote;
|
package eu.dnetlib.dhp.actionmanager.promote;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.common.FunctionalInterfaceSupport.SerializableSupplier;
|
import static eu.dnetlib.dhp.schema.common.ModelSupport.isSubClass;
|
||||||
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.MergeUtils;
|
|
||||||
|
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.schema.common.ModelSupport.isSubClass;
|
import eu.dnetlib.dhp.common.FunctionalInterfaceSupport.SerializableSupplier;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Oaf;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||||
|
|
||||||
/** OAF model merging support. */
|
/** OAF model merging support. */
|
||||||
public class MergeAndGet {
|
public class MergeAndGet {
|
||||||
|
@ -45,7 +46,20 @@ public class MergeAndGet {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static <G extends Oaf, A extends Oaf> G mergeFromAndGet(G x, A y) {
|
private static <G extends Oaf, A extends Oaf> G mergeFromAndGet(G x, A y) {
|
||||||
return (G) MergeUtils.merge(x, y);
|
if (isSubClass(x, Relation.class) && isSubClass(y, Relation.class)) {
|
||||||
|
((Relation) x).mergeFrom((Relation) y);
|
||||||
|
return x;
|
||||||
|
} else if (isSubClass(x, OafEntity.class)
|
||||||
|
&& isSubClass(y, OafEntity.class)
|
||||||
|
&& isSubClass(x, y)) {
|
||||||
|
((OafEntity) x).mergeFrom((OafEntity) y);
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
throw new RuntimeException(
|
||||||
|
String
|
||||||
|
.format(
|
||||||
|
"MERGE_FROM_AND_GET incompatible types: %s, %s",
|
||||||
|
x.getClass().getCanonicalName(), y.getClass().getCanonicalName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
|
|
|
@ -64,9 +64,6 @@ public class PrepareAffiliationRelations implements Serializable {
|
||||||
final String pubmedInputPath = parser.get("pubmedInputPath");
|
final String pubmedInputPath = parser.get("pubmedInputPath");
|
||||||
log.info("pubmedInputPath: {}", pubmedInputPath);
|
log.info("pubmedInputPath: {}", pubmedInputPath);
|
||||||
|
|
||||||
final String openapcInputPath = parser.get("openapcInputPath");
|
|
||||||
log.info("openapcInputPath: {}", openapcInputPath);
|
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
final String outputPath = parser.get("outputPath");
|
||||||
log.info("outputPath: {}", outputPath);
|
log.info("outputPath: {}", outputPath);
|
||||||
|
|
||||||
|
@ -88,14 +85,8 @@ public class PrepareAffiliationRelations implements Serializable {
|
||||||
JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations(
|
JavaPairRDD<Text, Text> pubmedRelations = prepareAffiliationRelations(
|
||||||
spark, pubmedInputPath, collectedFromPubmed);
|
spark, pubmedInputPath, collectedFromPubmed);
|
||||||
|
|
||||||
List<KeyValue> collectedFromOpenAPC = OafMapperUtils
|
|
||||||
.listKeyValues(ModelConstants.OPEN_APC_ID, "OpenAPC");
|
|
||||||
JavaPairRDD<Text, Text> openAPCRelations = prepareAffiliationRelations(
|
|
||||||
spark, openapcInputPath, collectedFromOpenAPC);
|
|
||||||
|
|
||||||
crossrefRelations
|
crossrefRelations
|
||||||
.union(pubmedRelations)
|
.union(pubmedRelations)
|
||||||
.union(openAPCRelations)
|
|
||||||
.saveAsHadoopFile(
|
.saveAsHadoopFile(
|
||||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ public class SparkAtomicActionScoreJob implements Serializable {
|
||||||
|
|
||||||
return projectScores.map((MapFunction<BipProjectModel, Project>) bipProjectScores -> {
|
return projectScores.map((MapFunction<BipProjectModel, Project>) bipProjectScores -> {
|
||||||
Project project = new Project();
|
Project project = new Project();
|
||||||
//project.setId(bipProjectScores.getProjectId());
|
project.setId(bipProjectScores.getProjectId());
|
||||||
project.setMeasures(bipProjectScores.toMeasures());
|
project.setMeasures(bipProjectScores.toMeasures());
|
||||||
return project;
|
return project;
|
||||||
}, Encoders.bean(Project.class))
|
}, Encoders.bean(Project.class))
|
||||||
|
|
|
@ -34,11 +34,6 @@ public class BipProjectModel {
|
||||||
|
|
||||||
String totalCitationCount;
|
String totalCitationCount;
|
||||||
|
|
||||||
public String getProjectId() {
|
|
||||||
return projectId;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// each project bip measure has exactly one value, hence one key-value pair
|
// each project bip measure has exactly one value, hence one key-value pair
|
||||||
private Measure createMeasure(String measureId, String measureValue) {
|
private Measure createMeasure(String measureId, String measureValue) {
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,6 @@ public class GetFOSSparkJob implements Serializable {
|
||||||
fosData.map((MapFunction<Row, FOSDataModel>) r -> {
|
fosData.map((MapFunction<Row, FOSDataModel>) r -> {
|
||||||
FOSDataModel fosDataModel = new FOSDataModel();
|
FOSDataModel fosDataModel = new FOSDataModel();
|
||||||
fosDataModel.setDoi(r.getString(0).toLowerCase());
|
fosDataModel.setDoi(r.getString(0).toLowerCase());
|
||||||
fosDataModel.setOaid(r.getString(1).toLowerCase());
|
|
||||||
fosDataModel.setLevel1(r.getString(2));
|
fosDataModel.setLevel1(r.getString(2));
|
||||||
fosDataModel.setLevel2(r.getString(3));
|
fosDataModel.setLevel2(r.getString(3));
|
||||||
fosDataModel.setLevel3(r.getString(4));
|
fosDataModel.setLevel3(r.getString(4));
|
||||||
|
|
|
@ -16,14 +16,12 @@ import org.apache.spark.sql.Dataset;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
import org.apache.spark.sql.SaveMode;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import org.jetbrains.annotations.NotNull;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.model.FOSDataModel;
|
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.model.FOSDataModel;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
import eu.dnetlib.dhp.schema.oaf.Result;
|
||||||
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
import eu.dnetlib.dhp.schema.oaf.StructuredProperty;
|
||||||
import eu.dnetlib.dhp.schema.oaf.Subject;
|
import eu.dnetlib.dhp.schema.oaf.Subject;
|
||||||
|
@ -54,90 +52,62 @@ public class PrepareFOSSparkJob implements Serializable {
|
||||||
final String outputPath = parser.get("outputPath");
|
final String outputPath = parser.get("outputPath");
|
||||||
log.info("outputPath: {}", outputPath);
|
log.info("outputPath: {}", outputPath);
|
||||||
|
|
||||||
final Boolean distributeDOI = Optional
|
|
||||||
.ofNullable(parser.get("distributeDoi"))
|
|
||||||
.map(Boolean::valueOf)
|
|
||||||
.orElse(Boolean.TRUE);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
SparkConf conf = new SparkConf();
|
||||||
runWithSparkSession(
|
runWithSparkSession(
|
||||||
conf,
|
conf,
|
||||||
isSparkSessionManaged,
|
isSparkSessionManaged,
|
||||||
spark -> {
|
spark -> {
|
||||||
if (distributeDOI)
|
distributeFOSdois(
|
||||||
distributeFOSdois(
|
spark,
|
||||||
spark,
|
sourcePath,
|
||||||
sourcePath,
|
|
||||||
|
|
||||||
outputPath);
|
outputPath);
|
||||||
else
|
|
||||||
distributeFOSoaid(spark, sourcePath, outputPath);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void distributeFOSoaid(SparkSession spark, String sourcePath, String outputPath) {
|
|
||||||
Dataset<FOSDataModel> fosDataset = readPath(spark, sourcePath, FOSDataModel.class);
|
|
||||||
|
|
||||||
fosDataset
|
|
||||||
.groupByKey((MapFunction<FOSDataModel, String>) v -> v.getOaid().toLowerCase(), Encoders.STRING())
|
|
||||||
.mapGroups((MapGroupsFunction<String, FOSDataModel, Result>) (k, it) -> {
|
|
||||||
return getResult(ModelSupport.getIdPrefix(Result.class) + "|" + k, it);
|
|
||||||
}, Encoders.bean(Result.class))
|
|
||||||
.write()
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.option("compression", "gzip")
|
|
||||||
.json(outputPath + "/fos");
|
|
||||||
}
|
|
||||||
|
|
||||||
@NotNull
|
|
||||||
private static Result getResult(String k, Iterator<FOSDataModel> it) {
|
|
||||||
Result r = new Result();
|
|
||||||
FOSDataModel first = it.next();
|
|
||||||
r.setId(k);
|
|
||||||
|
|
||||||
HashSet<String> level1 = new HashSet<>();
|
|
||||||
HashSet<String> level2 = new HashSet<>();
|
|
||||||
HashSet<String> level3 = new HashSet<>();
|
|
||||||
HashSet<String> level4 = new HashSet<>();
|
|
||||||
addLevels(level1, level2, level3, level4, first);
|
|
||||||
it.forEachRemaining(v -> addLevels(level1, level2, level3, level4, v));
|
|
||||||
List<Subject> sbjs = new ArrayList<>();
|
|
||||||
level1
|
|
||||||
.forEach(l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID)));
|
|
||||||
level2
|
|
||||||
.forEach(l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID)));
|
|
||||||
level3
|
|
||||||
.forEach(
|
|
||||||
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
|
||||||
level4
|
|
||||||
.forEach(
|
|
||||||
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
|
||||||
r.setSubject(sbjs);
|
|
||||||
r
|
|
||||||
.setDataInfo(
|
|
||||||
OafMapperUtils
|
|
||||||
.dataInfo(
|
|
||||||
false, null, true,
|
|
||||||
false,
|
|
||||||
OafMapperUtils
|
|
||||||
.qualifier(
|
|
||||||
ModelConstants.PROVENANCE_ENRICH,
|
|
||||||
null,
|
|
||||||
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
|
||||||
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
|
||||||
null));
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void distributeFOSdois(SparkSession spark, String sourcePath, String outputPath) {
|
private static void distributeFOSdois(SparkSession spark, String sourcePath, String outputPath) {
|
||||||
Dataset<FOSDataModel> fosDataset = readPath(spark, sourcePath, FOSDataModel.class);
|
Dataset<FOSDataModel> fosDataset = readPath(spark, sourcePath, FOSDataModel.class);
|
||||||
|
|
||||||
fosDataset
|
fosDataset
|
||||||
.groupByKey((MapFunction<FOSDataModel, String>) v -> v.getDoi().toLowerCase(), Encoders.STRING())
|
.groupByKey((MapFunction<FOSDataModel, String>) v -> v.getDoi().toLowerCase(), Encoders.STRING())
|
||||||
.mapGroups(
|
.mapGroups((MapGroupsFunction<String, FOSDataModel, Result>) (k, it) -> {
|
||||||
(MapGroupsFunction<String, FOSDataModel, Result>) (k,
|
Result r = new Result();
|
||||||
it) -> getResult(DHPUtils.generateUnresolvedIdentifier(k, DOI), it),
|
FOSDataModel first = it.next();
|
||||||
Encoders.bean(Result.class))
|
r.setId(DHPUtils.generateUnresolvedIdentifier(k, DOI));
|
||||||
|
|
||||||
|
HashSet<String> level1 = new HashSet<>();
|
||||||
|
HashSet<String> level2 = new HashSet<>();
|
||||||
|
HashSet<String> level3 = new HashSet<>();
|
||||||
|
HashSet<String> level4 = new HashSet<>();
|
||||||
|
addLevels(level1, level2, level3, level4, first);
|
||||||
|
it.forEachRemaining(v -> addLevels(level1, level2, level3, level4, v));
|
||||||
|
List<Subject> sbjs = new ArrayList<>();
|
||||||
|
level1
|
||||||
|
.forEach(l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID)));
|
||||||
|
level2
|
||||||
|
.forEach(l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID)));
|
||||||
|
level3
|
||||||
|
.forEach(
|
||||||
|
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
||||||
|
level4
|
||||||
|
.forEach(
|
||||||
|
l -> add(sbjs, getSubject(l, FOS_CLASS_ID, FOS_CLASS_NAME, UPDATE_SUBJECT_FOS_CLASS_ID, true)));
|
||||||
|
r.setSubject(sbjs);
|
||||||
|
r
|
||||||
|
.setDataInfo(
|
||||||
|
OafMapperUtils
|
||||||
|
.dataInfo(
|
||||||
|
false, null, true,
|
||||||
|
false,
|
||||||
|
OafMapperUtils
|
||||||
|
.qualifier(
|
||||||
|
ModelConstants.PROVENANCE_ENRICH,
|
||||||
|
null,
|
||||||
|
ModelConstants.DNET_PROVENANCE_ACTIONS,
|
||||||
|
ModelConstants.DNET_PROVENANCE_ACTIONS),
|
||||||
|
null));
|
||||||
|
return r;
|
||||||
|
}, Encoders.bean(Result.class))
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
|
|
|
@ -1,92 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.fosnodoi;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.*;
|
|
||||||
|
|
||||||
import org.apache.commons.cli.ParseException;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.io.compress.GzipCodec;
|
|
||||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaPairRDD;
|
|
||||||
import org.apache.spark.api.java.function.FilterFunction;
|
|
||||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
|
||||||
import org.apache.spark.sql.Encoders;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
|
||||||
import scala.Tuple2;
|
|
||||||
|
|
||||||
public class CreateActionSetSparkJob implements Serializable {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
|
||||||
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
public static void main(final String[] args) throws IOException, ParseException {
|
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
|
||||||
IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
CreateActionSetSparkJob.class
|
|
||||||
.getResourceAsStream(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/fosnodoi/as_parameters.json"))));
|
|
||||||
|
|
||||||
parser.parseArgument(args);
|
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = Optional
|
|
||||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
||||||
.map(Boolean::valueOf)
|
|
||||||
.orElse(Boolean.TRUE);
|
|
||||||
|
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
||||||
|
|
||||||
final String inputPath = parser.get("sourcePath");
|
|
||||||
log.info("inputPath {}", inputPath);
|
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
|
||||||
log.info("outputPath {}", outputPath);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
runWithSparkSession(
|
|
||||||
conf,
|
|
||||||
isSparkSessionManaged,
|
|
||||||
spark -> createActionSet(spark, inputPath, outputPath));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void createActionSet(SparkSession spark, String inputPath, String outputPath) {
|
|
||||||
spark
|
|
||||||
.read()
|
|
||||||
.textFile(inputPath)
|
|
||||||
.map(
|
|
||||||
(MapFunction<String, Result>) value -> OBJECT_MAPPER.readValue(value, Result.class),
|
|
||||||
Encoders.bean(Result.class))
|
|
||||||
.toJavaRDD()
|
|
||||||
.map(p -> new AtomicAction(p.getClass(), p))
|
|
||||||
.mapToPair(
|
|
||||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
|
||||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
|
||||||
.saveAsHadoopFile(
|
|
||||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -22,14 +22,12 @@ import org.apache.spark.sql.SparkSession;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
||||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||||
|
@ -39,12 +37,16 @@ public class CreateActionSetSparkJob implements Serializable {
|
||||||
public static final String OPENCITATIONS_CLASSID = "sysimport:crosswalk:opencitations";
|
public static final String OPENCITATIONS_CLASSID = "sysimport:crosswalk:opencitations";
|
||||||
public static final String OPENCITATIONS_CLASSNAME = "Imported from OpenCitations";
|
public static final String OPENCITATIONS_CLASSNAME = "Imported from OpenCitations";
|
||||||
|
|
||||||
|
// DOI-to-DOI citations
|
||||||
|
public static final String COCI = "COCI";
|
||||||
|
|
||||||
|
// PMID-to-PMID citations
|
||||||
|
public static final String POCI = "POCI";
|
||||||
|
|
||||||
private static final String DOI_PREFIX = "50|doi_________::";
|
private static final String DOI_PREFIX = "50|doi_________::";
|
||||||
|
|
||||||
private static final String PMID_PREFIX = "50|pmid________::";
|
private static final String PMID_PREFIX = "50|pmid________::";
|
||||||
private static final String ARXIV_PREFIX = "50|arXiv_______::";
|
|
||||||
|
|
||||||
private static final String PMCID_PREFIX = "50|pmcid_______::";
|
|
||||||
private static final String TRUST = "0.91";
|
private static final String TRUST = "0.91";
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
||||||
|
@ -77,30 +79,38 @@ public class CreateActionSetSparkJob implements Serializable {
|
||||||
final String outputPath = parser.get("outputPath");
|
final String outputPath = parser.get("outputPath");
|
||||||
log.info("outputPath {}", outputPath);
|
log.info("outputPath {}", outputPath);
|
||||||
|
|
||||||
|
final boolean shouldDuplicateRels = Optional
|
||||||
|
.ofNullable(parser.get("shouldDuplicateRels"))
|
||||||
|
.map(Boolean::valueOf)
|
||||||
|
.orElse(Boolean.FALSE);
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
SparkConf conf = new SparkConf();
|
||||||
runWithSparkSession(
|
runWithSparkSession(
|
||||||
conf,
|
conf,
|
||||||
isSparkSessionManaged,
|
isSparkSessionManaged,
|
||||||
spark -> extractContent(spark, inputPath, outputPath));
|
spark -> extractContent(spark, inputPath, outputPath, shouldDuplicateRels));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void extractContent(SparkSession spark, String inputPath, String outputPath) {
|
private static void extractContent(SparkSession spark, String inputPath, String outputPath,
|
||||||
|
boolean shouldDuplicateRels) {
|
||||||
|
|
||||||
getTextTextJavaPairRDD(spark, inputPath)
|
getTextTextJavaPairRDD(spark, inputPath, shouldDuplicateRels, COCI)
|
||||||
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);// , GzipCodec.class);
|
.union(getTextTextJavaPairRDD(spark, inputPath, shouldDuplicateRels, POCI))
|
||||||
|
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static JavaPairRDD<Text, Text> getTextTextJavaPairRDD(SparkSession spark, String inputPath) {
|
private static JavaPairRDD<Text, Text> getTextTextJavaPairRDD(SparkSession spark, String inputPath,
|
||||||
|
boolean shouldDuplicateRels, String prefix) {
|
||||||
return spark
|
return spark
|
||||||
.read()
|
.read()
|
||||||
.textFile(inputPath)
|
.textFile(inputPath + "/" + prefix + "/" + prefix + "_JSON/*")
|
||||||
.map(
|
.map(
|
||||||
(MapFunction<String, COCI>) value -> OBJECT_MAPPER.readValue(value, COCI.class),
|
(MapFunction<String, COCI>) value -> OBJECT_MAPPER.readValue(value, COCI.class),
|
||||||
Encoders.bean(COCI.class))
|
Encoders.bean(COCI.class))
|
||||||
.flatMap(
|
.flatMap(
|
||||||
(FlatMapFunction<COCI, Relation>) value -> createRelation(
|
(FlatMapFunction<COCI, Relation>) value -> createRelation(
|
||||||
value)
|
value, shouldDuplicateRels, prefix)
|
||||||
.iterator(),
|
.iterator(),
|
||||||
Encoders.bean(Relation.class))
|
Encoders.bean(Relation.class))
|
||||||
.filter((FilterFunction<Relation>) Objects::nonNull)
|
.filter((FilterFunction<Relation>) Objects::nonNull)
|
||||||
|
@ -111,68 +121,34 @@ public class CreateActionSetSparkJob implements Serializable {
|
||||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))));
|
new Text(OBJECT_MAPPER.writeValueAsString(aa))));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static List<Relation> createRelation(COCI value) throws JsonProcessingException {
|
private static List<Relation> createRelation(COCI value, boolean duplicate, String p) {
|
||||||
|
|
||||||
List<Relation> relationList = new ArrayList<>();
|
List<Relation> relationList = new ArrayList<>();
|
||||||
|
String prefix;
|
||||||
String citing;
|
String citing;
|
||||||
String cited;
|
String cited;
|
||||||
|
|
||||||
switch (value.getCiting_pid()) {
|
switch (p) {
|
||||||
case "doi":
|
case COCI:
|
||||||
citing = DOI_PREFIX
|
prefix = DOI_PREFIX;
|
||||||
|
citing = prefix
|
||||||
+ IdentifierFactory
|
+ IdentifierFactory
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getCiting()));
|
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getCiting()));
|
||||||
break;
|
cited = prefix
|
||||||
case "pmid":
|
|
||||||
citing = PMID_PREFIX
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), value.getCiting()));
|
|
||||||
break;
|
|
||||||
case "arxiv":
|
|
||||||
citing = ARXIV_PREFIX
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.arXiv.toString(), value.getCiting()));
|
|
||||||
break;
|
|
||||||
case "pmcid":
|
|
||||||
citing = PMCID_PREFIX
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.pmc.toString(), value.getCiting()));
|
|
||||||
break;
|
|
||||||
case "isbn":
|
|
||||||
case "issn":
|
|
||||||
return relationList;
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw new IllegalStateException("Invalid prefix: " + new ObjectMapper().writeValueAsString(value));
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (value.getCited_pid()) {
|
|
||||||
case "doi":
|
|
||||||
cited = DOI_PREFIX
|
|
||||||
+ IdentifierFactory
|
+ IdentifierFactory
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getCited()));
|
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getCited()));
|
||||||
break;
|
break;
|
||||||
case "pmid":
|
case POCI:
|
||||||
cited = PMID_PREFIX
|
prefix = PMID_PREFIX;
|
||||||
|
citing = prefix
|
||||||
|
+ IdentifierFactory
|
||||||
|
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), value.getCiting()));
|
||||||
|
cited = prefix
|
||||||
+ IdentifierFactory
|
+ IdentifierFactory
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), value.getCited()));
|
.md5(PidCleaner.normalizePidValue(PidType.pmid.toString(), value.getCited()));
|
||||||
break;
|
break;
|
||||||
case "arxiv":
|
|
||||||
cited = ARXIV_PREFIX
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.arXiv.toString(), value.getCited()));
|
|
||||||
break;
|
|
||||||
case "pmcid":
|
|
||||||
cited = PMCID_PREFIX
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.pmc.toString(), value.getCited()));
|
|
||||||
break;
|
|
||||||
case "isbn":
|
|
||||||
case "issn":
|
|
||||||
return relationList;
|
|
||||||
default:
|
default:
|
||||||
throw new IllegalStateException("Invalid prefix: " + new ObjectMapper().writeValueAsString(value));
|
throw new IllegalStateException("Invalid prefix: " + p);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!citing.equals(cited)) {
|
if (!citing.equals(cited)) {
|
||||||
|
@ -181,6 +157,15 @@ public class CreateActionSetSparkJob implements Serializable {
|
||||||
getRelation(
|
getRelation(
|
||||||
citing,
|
citing,
|
||||||
cited, ModelConstants.CITES));
|
cited, ModelConstants.CITES));
|
||||||
|
|
||||||
|
if (duplicate && value.getCiting().endsWith(".refs")) {
|
||||||
|
citing = prefix + IdentifierFactory
|
||||||
|
.md5(
|
||||||
|
CleaningFunctions
|
||||||
|
.normalizePidValue(
|
||||||
|
"doi", value.getCiting().substring(0, value.getCiting().indexOf(".refs"))));
|
||||||
|
relationList.add(getRelation(citing, cited, ModelConstants.CITES));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return relationList;
|
return relationList;
|
||||||
|
|
|
@ -12,7 +12,10 @@ import java.util.zip.ZipInputStream;
|
||||||
import org.apache.commons.cli.ParseException;
|
import org.apache.commons.cli.ParseException;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -34,17 +37,17 @@ public class GetOpenCitationsRefs implements Serializable {
|
||||||
|
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
|
|
||||||
// final String[] inputFile = parser.get("inputFile").split(";");
|
final String[] inputFile = parser.get("inputFile").split(";");
|
||||||
// log.info("inputFile {}", Arrays.asList(inputFile));
|
log.info("inputFile {}", Arrays.asList(inputFile));
|
||||||
|
|
||||||
final String inputPath = parser.get("inputPath");
|
final String workingPath = parser.get("workingPath");
|
||||||
log.info("inputPath {}", inputPath);
|
log.info("workingPath {}", workingPath);
|
||||||
|
|
||||||
final String hdfsNameNode = parser.get("hdfsNameNode");
|
final String hdfsNameNode = parser.get("hdfsNameNode");
|
||||||
log.info("hdfsNameNode {}", hdfsNameNode);
|
log.info("hdfsNameNode {}", hdfsNameNode);
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
final String prefix = parser.get("prefix");
|
||||||
log.info("outputPath {}", outputPath);
|
log.info("prefix {}", prefix);
|
||||||
|
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set("fs.defaultFS", hdfsNameNode);
|
conf.set("fs.defaultFS", hdfsNameNode);
|
||||||
|
@ -53,42 +56,41 @@ public class GetOpenCitationsRefs implements Serializable {
|
||||||
|
|
||||||
GetOpenCitationsRefs ocr = new GetOpenCitationsRefs();
|
GetOpenCitationsRefs ocr = new GetOpenCitationsRefs();
|
||||||
|
|
||||||
ocr.doExtract(inputPath, outputPath, fileSystem);
|
for (String file : inputFile) {
|
||||||
|
ocr.doExtract(workingPath + "/Original/" + file, workingPath, fileSystem, prefix);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void doExtract(String inputPath, String outputPath, FileSystem fileSystem)
|
private void doExtract(String inputFile, String workingPath, FileSystem fileSystem, String prefix)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
final Path path = new Path(inputFile);
|
||||||
.listFiles(
|
|
||||||
new Path(inputPath), true);
|
|
||||||
while (fileStatusListIterator.hasNext()) {
|
|
||||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
|
||||||
// do stuff with the file like ...
|
|
||||||
FSDataInputStream oc_zip = fileSystem.open(fileStatus.getPath());
|
|
||||||
try (ZipInputStream zis = new ZipInputStream(oc_zip)) {
|
|
||||||
ZipEntry entry = null;
|
|
||||||
while ((entry = zis.getNextEntry()) != null) {
|
|
||||||
|
|
||||||
if (!entry.isDirectory()) {
|
FSDataInputStream oc_zip = fileSystem.open(path);
|
||||||
String fileName = entry.getName();
|
|
||||||
// fileName = fileName.substring(0, fileName.indexOf("T")) + "_" + count;
|
|
||||||
fileName = fileName.substring(0, fileName.lastIndexOf("."));
|
|
||||||
// count++;
|
|
||||||
try (
|
|
||||||
FSDataOutputStream out = fileSystem
|
|
||||||
.create(new Path(outputPath + "/" + fileName + ".gz"));
|
|
||||||
GZIPOutputStream gzipOs = new GZIPOutputStream(new BufferedOutputStream(out))) {
|
|
||||||
|
|
||||||
IOUtils.copy(zis, gzipOs);
|
// int count = 1;
|
||||||
|
try (ZipInputStream zis = new ZipInputStream(oc_zip)) {
|
||||||
|
ZipEntry entry = null;
|
||||||
|
while ((entry = zis.getNextEntry()) != null) {
|
||||||
|
|
||||||
|
if (!entry.isDirectory()) {
|
||||||
|
String fileName = entry.getName();
|
||||||
|
// fileName = fileName.substring(0, fileName.indexOf("T")) + "_" + count;
|
||||||
|
fileName = fileName.substring(0, fileName.lastIndexOf("."));
|
||||||
|
// count++;
|
||||||
|
try (
|
||||||
|
FSDataOutputStream out = fileSystem
|
||||||
|
.create(new Path(workingPath + "/" + prefix + "/" + fileName + ".gz"));
|
||||||
|
GZIPOutputStream gzipOs = new GZIPOutputStream(new BufferedOutputStream(out))) {
|
||||||
|
|
||||||
|
IOUtils.copy(zis, gzipOs);
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,171 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.opencitations;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.zip.ZipEntry;
|
|
||||||
import java.util.zip.ZipInputStream;
|
|
||||||
|
|
||||||
import org.apache.commons.cli.ParseException;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
|
||||||
import org.apache.spark.api.java.function.ForeachFunction;
|
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
|
||||||
import org.apache.spark.sql.*;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
||||||
import scala.Tuple2;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author miriam.baglioni
|
|
||||||
* @Date 29/02/24
|
|
||||||
*/
|
|
||||||
public class MapOCIdsInPids implements Serializable {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
|
||||||
private static final String DELIMITER = ",";
|
|
||||||
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
public static void main(final String[] args) throws IOException, ParseException {
|
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
|
||||||
IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
MapOCIdsInPids.class
|
|
||||||
.getResourceAsStream(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/remap_parameters.json"))));
|
|
||||||
|
|
||||||
parser.parseArgument(args);
|
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = Optional
|
|
||||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
||||||
.map(Boolean::valueOf)
|
|
||||||
.orElse(Boolean.TRUE);
|
|
||||||
|
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
||||||
|
|
||||||
final String inputPath = parser.get("inputPath");
|
|
||||||
log.info("inputPath {}", inputPath);
|
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
|
||||||
log.info("outputPath {}", outputPath);
|
|
||||||
|
|
||||||
final String nameNode = parser.get("nameNode");
|
|
||||||
log.info("nameNode {}", nameNode);
|
|
||||||
|
|
||||||
unzipCorrespondenceFile(inputPath, nameNode);
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
runWithSparkSession(
|
|
||||||
conf,
|
|
||||||
isSparkSessionManaged,
|
|
||||||
spark -> mapIdentifiers(spark, inputPath, outputPath));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void unzipCorrespondenceFile(String inputPath, String hdfsNameNode) throws IOException {
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
conf.set("fs.defaultFS", hdfsNameNode);
|
|
||||||
|
|
||||||
final Path path = new Path(inputPath + "/correspondence/omid.zip");
|
|
||||||
FileSystem fileSystem = FileSystem.get(conf);
|
|
||||||
|
|
||||||
FSDataInputStream project_zip = fileSystem.open(path);
|
|
||||||
|
|
||||||
try (ZipInputStream zis = new ZipInputStream(project_zip)) {
|
|
||||||
ZipEntry entry = null;
|
|
||||||
while ((entry = zis.getNextEntry()) != null) {
|
|
||||||
|
|
||||||
if (!entry.isDirectory()) {
|
|
||||||
String fileName = entry.getName();
|
|
||||||
byte buffer[] = new byte[1024];
|
|
||||||
int count;
|
|
||||||
|
|
||||||
try (
|
|
||||||
FSDataOutputStream out = fileSystem
|
|
||||||
.create(new Path(inputPath + "/correspondence/omid.csv"))) {
|
|
||||||
|
|
||||||
while ((count = zis.read(buffer, 0, buffer.length)) != -1)
|
|
||||||
out.write(buffer, 0, count);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void mapIdentifiers(SparkSession spark, String inputPath, String outputPath) {
|
|
||||||
Dataset<COCI> coci = spark
|
|
||||||
.read()
|
|
||||||
.textFile(inputPath + "/JSON")
|
|
||||||
.map(
|
|
||||||
(MapFunction<String, COCI>) value -> OBJECT_MAPPER.readValue(value, COCI.class),
|
|
||||||
Encoders.bean(COCI.class));
|
|
||||||
|
|
||||||
Dataset<Tuple2<String, String>> correspondenceData = spark
|
|
||||||
.read()
|
|
||||||
.format("csv")
|
|
||||||
.option("sep", DELIMITER)
|
|
||||||
.option("inferSchema", "true")
|
|
||||||
.option("header", "true")
|
|
||||||
.option("quotes", "\"")
|
|
||||||
.load(inputPath + "/correspondence/omid.csv")
|
|
||||||
.repartition(5000)
|
|
||||||
.flatMap((FlatMapFunction<Row, Tuple2<String, String>>) r -> {
|
|
||||||
String ocIdentifier = r.getAs("omid");
|
|
||||||
String[] correspondentIdentifiers = ((String) r.getAs("id")).split(" ");
|
|
||||||
return Arrays
|
|
||||||
.stream(correspondentIdentifiers)
|
|
||||||
.map(ci -> new Tuple2<String, String>(ocIdentifier, ci))
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
.iterator();
|
|
||||||
}, Encoders.tuple(Encoders.STRING(), Encoders.STRING()));
|
|
||||||
|
|
||||||
Dataset<COCI> mappedCitingDataset = coci
|
|
||||||
.joinWith(correspondenceData, coci.col("citing").equalTo(correspondenceData.col("_1")))
|
|
||||||
.map((MapFunction<Tuple2<COCI, Tuple2<String, String>>, COCI>) t2 -> {
|
|
||||||
String correspondent = t2._2()._2();
|
|
||||||
t2._1().setCiting_pid(correspondent.substring(0, correspondent.indexOf(":")));
|
|
||||||
t2._1().setCiting(correspondent.substring(correspondent.indexOf(":") + 1));
|
|
||||||
return t2._1();
|
|
||||||
}, Encoders.bean(COCI.class));
|
|
||||||
|
|
||||||
mappedCitingDataset
|
|
||||||
.joinWith(correspondenceData, mappedCitingDataset.col("cited").equalTo(correspondenceData.col("_1")))
|
|
||||||
.map((MapFunction<Tuple2<COCI, Tuple2<String, String>>, COCI>) t2 -> {
|
|
||||||
String correspondent = t2._2()._2();
|
|
||||||
t2._1().setCited_pid(correspondent.substring(0, correspondent.indexOf(":")));
|
|
||||||
t2._1().setCited(correspondent.substring(correspondent.indexOf(":") + 1));
|
|
||||||
return t2._1();
|
|
||||||
}, Encoders.bean(COCI.class))
|
|
||||||
.write()
|
|
||||||
.mode(SaveMode.Append)
|
|
||||||
.option("compression", "gzip")
|
|
||||||
.json(outputPath);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -12,9 +12,11 @@ import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.function.FilterFunction;
|
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.sql.*;
|
import org.apache.spark.sql.*;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -40,21 +42,19 @@ public class ReadCOCI implements Serializable {
|
||||||
final String outputPath = parser.get("outputPath");
|
final String outputPath = parser.get("outputPath");
|
||||||
log.info("outputPath: {}", outputPath);
|
log.info("outputPath: {}", outputPath);
|
||||||
|
|
||||||
final String hdfsNameNode = parser.get("hdfsNameNode");
|
final String[] inputFile = parser.get("inputFile").split(";");
|
||||||
log.info("hdfsNameNode {}", hdfsNameNode);
|
log.info("inputFile {}", Arrays.asList(inputFile));
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = isSparkSessionManaged(parser);
|
Boolean isSparkSessionManaged = isSparkSessionManaged(parser);
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||||
|
|
||||||
final String workingPath = parser.get("inputPath");
|
final String workingPath = parser.get("workingPath");
|
||||||
log.info("workingPath {}", workingPath);
|
log.info("workingPath {}", workingPath);
|
||||||
|
|
||||||
|
final String format = parser.get("format");
|
||||||
|
log.info("format {}", format);
|
||||||
|
|
||||||
SparkConf sconf = new SparkConf();
|
SparkConf sconf = new SparkConf();
|
||||||
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
conf.set("fs.defaultFS", hdfsNameNode);
|
|
||||||
|
|
||||||
FileSystem fileSystem = FileSystem.get(conf);
|
|
||||||
final String delimiter = Optional
|
final String delimiter = Optional
|
||||||
.ofNullable(parser.get("delimiter"))
|
.ofNullable(parser.get("delimiter"))
|
||||||
.orElse(DEFAULT_DELIMITER);
|
.orElse(DEFAULT_DELIMITER);
|
||||||
|
@ -66,21 +66,20 @@ public class ReadCOCI implements Serializable {
|
||||||
doRead(
|
doRead(
|
||||||
spark,
|
spark,
|
||||||
workingPath,
|
workingPath,
|
||||||
fileSystem,
|
inputFile,
|
||||||
outputPath,
|
outputPath,
|
||||||
delimiter);
|
delimiter,
|
||||||
|
format);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void doRead(SparkSession spark, String workingPath, FileSystem fileSystem,
|
private static void doRead(SparkSession spark, String workingPath, String[] inputFiles,
|
||||||
String outputPath,
|
String outputPath,
|
||||||
String delimiter) throws IOException {
|
String delimiter, String format) {
|
||||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
|
||||||
.listFiles(
|
for (String inputFile : inputFiles) {
|
||||||
new Path(workingPath), true);
|
String pString = workingPath + "/" + inputFile + ".gz";
|
||||||
while (fileStatusListIterator.hasNext()) {
|
|
||||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
|
||||||
log.info("extracting file {}", fileStatus.getPath().toString());
|
|
||||||
Dataset<Row> cociData = spark
|
Dataset<Row> cociData = spark
|
||||||
.read()
|
.read()
|
||||||
.format("csv")
|
.format("csv")
|
||||||
|
@ -88,26 +87,26 @@ public class ReadCOCI implements Serializable {
|
||||||
.option("inferSchema", "true")
|
.option("inferSchema", "true")
|
||||||
.option("header", "true")
|
.option("header", "true")
|
||||||
.option("quotes", "\"")
|
.option("quotes", "\"")
|
||||||
.load(fileStatus.getPath().toString())
|
.load(pString)
|
||||||
.repartition(100);
|
.repartition(100);
|
||||||
|
|
||||||
cociData.map((MapFunction<Row, COCI>) row -> {
|
cociData.map((MapFunction<Row, COCI>) row -> {
|
||||||
|
|
||||||
COCI coci = new COCI();
|
COCI coci = new COCI();
|
||||||
|
if (format.equals("COCI")) {
|
||||||
coci.setCiting(row.getString(1));
|
coci.setCiting(row.getString(1));
|
||||||
coci.setCited(row.getString(2));
|
coci.setCited(row.getString(2));
|
||||||
|
} else {
|
||||||
|
coci.setCiting(String.valueOf(row.getInt(1)));
|
||||||
|
coci.setCited(String.valueOf(row.getInt(2)));
|
||||||
|
}
|
||||||
coci.setOci(row.getString(0));
|
coci.setOci(row.getString(0));
|
||||||
|
|
||||||
return coci;
|
return coci;
|
||||||
}, Encoders.bean(COCI.class))
|
}, Encoders.bean(COCI.class))
|
||||||
.filter((FilterFunction<COCI>) c -> c != null)
|
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Append)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
.json(outputPath);
|
.json(outputPath + inputFile);
|
||||||
fileSystem.rename(fileStatus.getPath(), new Path("/tmp/miriam/OC/DONE"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,10 +9,8 @@ public class COCI implements Serializable {
|
||||||
private String oci;
|
private String oci;
|
||||||
|
|
||||||
private String citing;
|
private String citing;
|
||||||
private String citing_pid;
|
|
||||||
|
|
||||||
private String cited;
|
private String cited;
|
||||||
private String cited_pid;
|
|
||||||
|
|
||||||
public String getOci() {
|
public String getOci() {
|
||||||
return oci;
|
return oci;
|
||||||
|
@ -27,8 +25,6 @@ public class COCI implements Serializable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCiting(String citing) {
|
public void setCiting(String citing) {
|
||||||
if (citing != null && citing.startsWith("omid:"))
|
|
||||||
citing = citing.substring(5);
|
|
||||||
this.citing = citing;
|
this.citing = citing;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,24 +33,7 @@ public class COCI implements Serializable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCited(String cited) {
|
public void setCited(String cited) {
|
||||||
if (cited != null && cited.startsWith("omid:"))
|
|
||||||
cited = cited.substring(5);
|
|
||||||
this.cited = cited;
|
this.cited = cited;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getCiting_pid() {
|
|
||||||
return citing_pid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCiting_pid(String citing_pid) {
|
|
||||||
this.citing_pid = citing_pid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getCited_pid() {
|
|
||||||
return cited_pid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCited_pid(String cited_pid) {
|
|
||||||
this.cited_pid = cited_pid;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,20 +1,12 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project;
|
package eu.dnetlib.dhp.actionmanager.project;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProgramme;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProject;
|
import java.util.Arrays;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.utils.model.JsonTopic;
|
import java.util.Objects;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import java.util.Optional;
|
||||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.H2020Classification;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.H2020Programme;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Project;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.MergeUtils;
|
|
||||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||||
|
@ -26,14 +18,24 @@ import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProgramme;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProject;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.model.EXCELTopic;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.model.JsonTopic;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
|
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||||
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.H2020Classification;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.H2020Programme;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.OafEntity;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Project;
|
||||||
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class that makes the ActionSet. To prepare the AS two joins are needed
|
* Class that makes the ActionSet. To prepare the AS two joins are needed
|
||||||
*
|
*
|
||||||
|
@ -158,11 +160,9 @@ public class SparkAtomicActionJob {
|
||||||
(MapFunction<Project, String>) OafEntity::getId,
|
(MapFunction<Project, String>) OafEntity::getId,
|
||||||
Encoders.STRING())
|
Encoders.STRING())
|
||||||
.mapGroups((MapGroupsFunction<String, Project, Project>) (s, it) -> {
|
.mapGroups((MapGroupsFunction<String, Project, Project>) (s, it) -> {
|
||||||
Project merge = it.next();
|
Project first = it.next();
|
||||||
while (it.hasNext()) {
|
it.forEachRemaining(first::mergeFrom);
|
||||||
merge = MergeUtils.mergeProject(merge, it.next());
|
return first;
|
||||||
}
|
|
||||||
return merge;
|
|
||||||
}, Encoders.bean(Project.class))
|
}, Encoders.bean(Project.class))
|
||||||
.toJavaRDD()
|
.toJavaRDD()
|
||||||
.map(p -> new AtomicAction(Project.class, p))
|
.map(p -> new AtomicAction(Project.class, p))
|
||||||
|
|
|
@ -1,195 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.transformativeagreement;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.*;
|
|
||||||
|
|
||||||
import org.apache.commons.cli.ParseException;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.io.compress.GzipCodec;
|
|
||||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.function.FilterFunction;
|
|
||||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
|
||||||
import org.apache.spark.sql.Encoders;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.transformativeagreement.model.TransformativeAgreementModel;
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Country;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.*;
|
|
||||||
import scala.Tuple2;
|
|
||||||
|
|
||||||
public class CreateActionSetSparkJob implements Serializable {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(CreateActionSetSparkJob.class);
|
|
||||||
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
private static final String IREL_PROJECT = "40|100018998___::1e5e62235d094afd01cd56e65112fc63";
|
|
||||||
private static final String TRANSFORMATIVE_AGREEMENT = "openapc::transformativeagreement";
|
|
||||||
|
|
||||||
public static void main(final String[] args) throws IOException, ParseException {
|
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
|
||||||
IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
CreateActionSetSparkJob.class
|
|
||||||
.getResourceAsStream(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/transformativeagreement/as_parameters.json"))));
|
|
||||||
|
|
||||||
parser.parseArgument(args);
|
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = Optional
|
|
||||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
||||||
.map(Boolean::valueOf)
|
|
||||||
.orElse(Boolean.TRUE);
|
|
||||||
|
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
||||||
|
|
||||||
final String inputPath = parser.get("inputPath");
|
|
||||||
log.info("inputPath {}", inputPath);
|
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
|
||||||
log.info("outputPath {}", outputPath);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
runWithSparkSession(
|
|
||||||
conf,
|
|
||||||
isSparkSessionManaged,
|
|
||||||
spark -> createActionSet(spark, inputPath, outputPath));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void createActionSet(SparkSession spark, String inputPath, String outputPath) {
|
|
||||||
JavaRDD<AtomicAction> relations = spark
|
|
||||||
.read()
|
|
||||||
.textFile(inputPath)
|
|
||||||
.map(
|
|
||||||
(MapFunction<String, TransformativeAgreementModel>) value -> OBJECT_MAPPER
|
|
||||||
.readValue(value, TransformativeAgreementModel.class),
|
|
||||||
Encoders.bean(TransformativeAgreementModel.class))
|
|
||||||
.flatMap(
|
|
||||||
(FlatMapFunction<TransformativeAgreementModel, Relation>) value -> createRelation(
|
|
||||||
value)
|
|
||||||
.iterator(),
|
|
||||||
Encoders.bean(Relation.class))
|
|
||||||
.filter((FilterFunction<Relation>) Objects::nonNull)
|
|
||||||
.toJavaRDD()
|
|
||||||
.map(p -> new AtomicAction(p.getClass(), p));
|
|
||||||
//TODO relations in stand-by waiting to know if we need to create them or not In case we need just make a union before saving the sequence file
|
|
||||||
spark
|
|
||||||
.read()
|
|
||||||
.textFile(inputPath)
|
|
||||||
.map(
|
|
||||||
(MapFunction<String, TransformativeAgreementModel>) value -> OBJECT_MAPPER
|
|
||||||
.readValue(value, TransformativeAgreementModel.class),
|
|
||||||
Encoders.bean(TransformativeAgreementModel.class))
|
|
||||||
.map(
|
|
||||||
(MapFunction<TransformativeAgreementModel, Result>) value -> createResult(
|
|
||||||
value),
|
|
||||||
Encoders.bean(Result.class))
|
|
||||||
.filter((FilterFunction<Result>) r -> r != null)
|
|
||||||
.toJavaRDD()
|
|
||||||
.map(p -> new AtomicAction(p.getClass(), p))
|
|
||||||
.mapToPair(
|
|
||||||
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
|
||||||
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
|
||||||
.saveAsHadoopFile(
|
|
||||||
outputPath, Text.class, Text.class, SequenceFileOutputFormat.class, GzipCodec.class);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Result createResult(TransformativeAgreementModel value) {
|
|
||||||
Result r = new Result();
|
|
||||||
r
|
|
||||||
.setId(
|
|
||||||
"50|doi_________::"
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getDoi())));
|
|
||||||
r.setTransformativeAgreement(value.getAgreement());
|
|
||||||
Country country = new Country();
|
|
||||||
country.setClassid(value.getCountry());
|
|
||||||
country.setClassname(value.getCountry());
|
|
||||||
country
|
|
||||||
.setDataInfo(
|
|
||||||
OafMapperUtils
|
|
||||||
.dataInfo(
|
|
||||||
false, ModelConstants.SYSIMPORT_ACTIONSET, false, false,
|
|
||||||
OafMapperUtils
|
|
||||||
.qualifier(
|
|
||||||
"openapc::transformativeagreement",
|
|
||||||
"Harvested from Trnasformative Agreement file from OpenAPC",
|
|
||||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
|
||||||
"0.9"));
|
|
||||||
country.setSchemeid(ModelConstants.DNET_COUNTRY_TYPE);
|
|
||||||
country.setSchemename(ModelConstants.DNET_COUNTRY_TYPE);
|
|
||||||
r.setCountry(Arrays.asList(country));
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<Relation> createRelation(TransformativeAgreementModel value) {
|
|
||||||
|
|
||||||
List<Relation> relationList = new ArrayList<>();
|
|
||||||
|
|
||||||
if (value.getAgreement().startsWith("IReL")) {
|
|
||||||
String paper;
|
|
||||||
|
|
||||||
paper = "50|doi_________::"
|
|
||||||
+ IdentifierFactory
|
|
||||||
.md5(PidCleaner.normalizePidValue(PidType.doi.toString(), value.getDoi()));
|
|
||||||
|
|
||||||
relationList
|
|
||||||
.add(
|
|
||||||
getRelation(
|
|
||||||
paper,
|
|
||||||
IREL_PROJECT, ModelConstants.IS_PRODUCED_BY));
|
|
||||||
|
|
||||||
relationList.add(getRelation(IREL_PROJECT, paper, ModelConstants.PRODUCES));
|
|
||||||
}
|
|
||||||
return relationList;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Relation getRelation(
|
|
||||||
String source,
|
|
||||||
String target,
|
|
||||||
String relClass) {
|
|
||||||
|
|
||||||
return OafMapperUtils
|
|
||||||
.getRelation(
|
|
||||||
source,
|
|
||||||
target,
|
|
||||||
ModelConstants.RESULT_PROJECT,
|
|
||||||
ModelConstants.OUTCOME,
|
|
||||||
relClass,
|
|
||||||
Arrays
|
|
||||||
.asList(
|
|
||||||
OafMapperUtils.keyValue(ModelConstants.OPEN_APC_ID, ModelConstants.OPEN_APC_NAME)),
|
|
||||||
OafMapperUtils
|
|
||||||
.dataInfo(
|
|
||||||
false, null, false, false,
|
|
||||||
OafMapperUtils
|
|
||||||
.qualifier(
|
|
||||||
TRANSFORMATIVE_AGREEMENT, "Transformative Agreement",
|
|
||||||
ModelConstants.DNET_PROVENANCE_ACTIONS, ModelConstants.DNET_PROVENANCE_ACTIONS),
|
|
||||||
"0.9"),
|
|
||||||
null);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.transformativeagreement.model;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author miriam.baglioni
|
|
||||||
* @Date 18/12/23
|
|
||||||
*/
|
|
||||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
|
||||||
|
|
||||||
public class TransformativeAgreementModel implements Serializable {
|
|
||||||
private String institution;
|
|
||||||
private String doi;
|
|
||||||
private String agreement;
|
|
||||||
private String country;
|
|
||||||
|
|
||||||
public String getCountry() {
|
|
||||||
return country;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCountry(String country) {
|
|
||||||
this.country = country;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getInstitution() {
|
|
||||||
return institution;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setInstitution(String institution) {
|
|
||||||
this.institution = institution;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getDoi() {
|
|
||||||
return doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDoi(String doi) {
|
|
||||||
this.doi = doi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getAgreement() {
|
|
||||||
return agreement;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setAgreement(String agreement) {
|
|
||||||
this.agreement = agreement;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,244 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.collection.orcid;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.net.HttpURLConnection;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.util.concurrent.BlockingQueue;
|
|
||||||
|
|
||||||
import javax.swing.*;
|
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.http.HttpHeaders;
|
|
||||||
import org.jetbrains.annotations.NotNull;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
|
||||||
|
|
||||||
public class ORCIDWorker extends Thread {
|
|
||||||
|
|
||||||
final static Logger log = LoggerFactory.getLogger(ORCIDWorker.class);
|
|
||||||
|
|
||||||
public static String JOB_COMPLETE = "JOB_COMPLETE";
|
|
||||||
|
|
||||||
private static final String userAgent = "Mozilla/5.0 (compatible; OAI; +http://www.openaire.eu)";
|
|
||||||
|
|
||||||
private final BlockingQueue<String> queue;
|
|
||||||
|
|
||||||
private boolean hasComplete = false;
|
|
||||||
|
|
||||||
private final SequenceFile.Writer employments;
|
|
||||||
|
|
||||||
private final SequenceFile.Writer summary;
|
|
||||||
private final SequenceFile.Writer works;
|
|
||||||
|
|
||||||
private final String token;
|
|
||||||
|
|
||||||
private final String id;
|
|
||||||
|
|
||||||
public static ORCIDWorkerBuilder builder() {
|
|
||||||
return new ORCIDWorkerBuilder();
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorker(String id, BlockingQueue<String> myqueue, SequenceFile.Writer employments,
|
|
||||||
SequenceFile.Writer summary, SequenceFile.Writer works, String token) {
|
|
||||||
this.id = id;
|
|
||||||
this.queue = myqueue;
|
|
||||||
this.employments = employments;
|
|
||||||
this.summary = summary;
|
|
||||||
this.works = works;
|
|
||||||
this.token = token;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String retrieveURL(final String id, final String apiUrl, String token) {
|
|
||||||
try {
|
|
||||||
final HttpURLConnection urlConn = getHttpURLConnection(apiUrl, token);
|
|
||||||
if (urlConn.getResponseCode() > 199 && urlConn.getResponseCode() < 300) {
|
|
||||||
InputStream input = urlConn.getInputStream();
|
|
||||||
return IOUtils.toString(input);
|
|
||||||
} else {
|
|
||||||
log
|
|
||||||
.error(
|
|
||||||
"Thread {} UNABLE TO DOWNLOAD FROM THIS URL {} , status code {}", id, apiUrl,
|
|
||||||
urlConn.getResponseCode());
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.error("Thread {} Error on retrieving URL {} {}", id, apiUrl, e);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@NotNull
|
|
||||||
private static HttpURLConnection getHttpURLConnection(String apiUrl, String token) throws IOException {
|
|
||||||
final HttpURLConnection urlConn = (HttpURLConnection) new URL(apiUrl).openConnection();
|
|
||||||
final HttpClientParams clientParams = new HttpClientParams();
|
|
||||||
urlConn.setInstanceFollowRedirects(false);
|
|
||||||
urlConn.setReadTimeout(clientParams.getReadTimeOut() * 1000);
|
|
||||||
urlConn.setConnectTimeout(clientParams.getConnectTimeOut() * 1000);
|
|
||||||
urlConn.addRequestProperty(HttpHeaders.USER_AGENT, userAgent);
|
|
||||||
urlConn.addRequestProperty(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", token));
|
|
||||||
return urlConn;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String generateSummaryURL(final String orcidId) {
|
|
||||||
return "https://api.orcid.org/v3.0/" + orcidId + "/record";
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String generateWorksURL(final String orcidId) {
|
|
||||||
return "https://api.orcid.org/v3.0/" + orcidId + "/works";
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String generateEmploymentsURL(final String orcidId) {
|
|
||||||
return "https://api.orcid.org/v3.0/" + orcidId + "/employments";
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void writeResultToSequenceFile(String id, String url, String token, String orcidId,
|
|
||||||
SequenceFile.Writer file) throws IOException {
|
|
||||||
final String response = retrieveURL(id, url, token);
|
|
||||||
if (response != null) {
|
|
||||||
if (orcidId == null) {
|
|
||||||
log.error("Thread {} {} {}", id, orcidId, response);
|
|
||||||
throw new RuntimeException("null items ");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (file == null) {
|
|
||||||
log.error("Thread {} file is null for {} URL:{}", id, url, orcidId);
|
|
||||||
} else {
|
|
||||||
file.append(new Text(orcidId), new Text(response));
|
|
||||||
file.hflush();
|
|
||||||
}
|
|
||||||
|
|
||||||
} else
|
|
||||||
log.error("Thread {} response is null for {} URL:{}", id, url, orcidId);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
final Text key = new Text();
|
|
||||||
final Text value = new Text();
|
|
||||||
long start;
|
|
||||||
long total_time;
|
|
||||||
String orcidId = "";
|
|
||||||
int requests = 0;
|
|
||||||
if (summary == null || employments == null || works == null)
|
|
||||||
throw new RuntimeException("Null files");
|
|
||||||
|
|
||||||
while (!hasComplete) {
|
|
||||||
try {
|
|
||||||
|
|
||||||
orcidId = queue.take();
|
|
||||||
|
|
||||||
if (orcidId.equalsIgnoreCase(JOB_COMPLETE)) {
|
|
||||||
hasComplete = true;
|
|
||||||
} else {
|
|
||||||
start = System.currentTimeMillis();
|
|
||||||
writeResultToSequenceFile(id, generateSummaryURL(orcidId), token, orcidId, summary);
|
|
||||||
total_time = System.currentTimeMillis() - start;
|
|
||||||
requests++;
|
|
||||||
if (total_time < 1000) {
|
|
||||||
// I know making a sleep on a thread is bad, but we need to stay to 24 requests per seconds,
|
|
||||||
// hence
|
|
||||||
// the time between two http request in a thread must be 1 second
|
|
||||||
Thread.sleep(1000L - total_time);
|
|
||||||
}
|
|
||||||
start = System.currentTimeMillis();
|
|
||||||
writeResultToSequenceFile(id, generateWorksURL(orcidId), token, orcidId, works);
|
|
||||||
total_time = System.currentTimeMillis() - start;
|
|
||||||
requests++;
|
|
||||||
if (total_time < 1000) {
|
|
||||||
// I know making a sleep on a thread is bad, but we need to stay to 24 requests per seconds,
|
|
||||||
// hence
|
|
||||||
// the time between two http request in a thread must be 1 second
|
|
||||||
Thread.sleep(1000L - total_time);
|
|
||||||
}
|
|
||||||
start = System.currentTimeMillis();
|
|
||||||
writeResultToSequenceFile(id, generateEmploymentsURL(orcidId), token, orcidId, employments);
|
|
||||||
total_time = System.currentTimeMillis() - start;
|
|
||||||
requests++;
|
|
||||||
if (total_time < 1000) {
|
|
||||||
// I know making a sleep on a thread is bad, but we need to stay to 24 requests per seconds,
|
|
||||||
// hence
|
|
||||||
// the time between two http request in a thread must be 1 second
|
|
||||||
Thread.sleep(1000L - total_time);
|
|
||||||
}
|
|
||||||
if (requests % 30 == 0) {
|
|
||||||
log.info("Thread {} Downloaded {}", id, requests);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (Throwable e) {
|
|
||||||
|
|
||||||
log.error("Thread {} Unable to save ORICD: {} item error", id, orcidId, e);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
works.close();
|
|
||||||
summary.close();
|
|
||||||
employments.close();
|
|
||||||
} catch (Throwable e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Thread {} COMPLETE ", id);
|
|
||||||
log.info("Thread {} Downloaded {}", id, requests);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class ORCIDWorkerBuilder {
|
|
||||||
|
|
||||||
private String id;
|
|
||||||
private SequenceFile.Writer employments;
|
|
||||||
private SequenceFile.Writer summary;
|
|
||||||
private SequenceFile.Writer works;
|
|
||||||
private BlockingQueue<String> queue;
|
|
||||||
|
|
||||||
private String token;
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withId(final String id) {
|
|
||||||
this.id = id;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withEmployments(final SequenceFile.Writer sequenceFile) {
|
|
||||||
this.employments = sequenceFile;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withSummary(final SequenceFile.Writer sequenceFile) {
|
|
||||||
this.summary = sequenceFile;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withWorks(final SequenceFile.Writer sequenceFile) {
|
|
||||||
this.works = sequenceFile;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withAccessToken(final String accessToken) {
|
|
||||||
this.token = accessToken;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorkerBuilder withBlockingQueue(final BlockingQueue<String> queue) {
|
|
||||||
this.queue = queue;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ORCIDWorker build() {
|
|
||||||
if (this.summary == null || this.works == null || this.employments == null || StringUtils.isEmpty(token)
|
|
||||||
|| queue == null)
|
|
||||||
throw new RuntimeException("Unable to build missing required params");
|
|
||||||
return new ORCIDWorker(id, queue, employments, summary, works, token);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,171 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.collection.orcid;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.utils.DHPUtils.getHadoopConfiguration;
|
|
||||||
|
|
||||||
import java.io.*;
|
|
||||||
import java.net.HttpURLConnection;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.concurrent.ArrayBlockingQueue;
|
|
||||||
import java.util.concurrent.BlockingQueue;
|
|
||||||
|
|
||||||
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
|
|
||||||
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
|
|
||||||
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
||||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
|
||||||
|
|
||||||
public class OrcidGetUpdatesFile {
|
|
||||||
|
|
||||||
private static Logger log = LoggerFactory.getLogger(OrcidGetUpdatesFile.class);
|
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
|
||||||
|
|
||||||
ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
|
||||||
IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
OrcidGetUpdatesFile.class
|
|
||||||
.getResourceAsStream(
|
|
||||||
"/eu/dnetlib/dhp/collection/orcid/download_orcid_update_parameter.json")))
|
|
||||||
|
|
||||||
);
|
|
||||||
parser.parseArgument(args);
|
|
||||||
|
|
||||||
final String namenode = parser.get("namenode");
|
|
||||||
log.info("got variable namenode: {}", namenode);
|
|
||||||
|
|
||||||
final String master = parser.get("master");
|
|
||||||
log.info("got variable master: {}", master);
|
|
||||||
|
|
||||||
final String targetPath = parser.get("targetPath");
|
|
||||||
log.info("got variable targetPath: {}", targetPath);
|
|
||||||
|
|
||||||
final String apiURL = parser.get("apiURL");
|
|
||||||
log.info("got variable apiURL: {}", apiURL);
|
|
||||||
|
|
||||||
final String accessToken = parser.get("accessToken");
|
|
||||||
log.info("got variable accessToken: {}", accessToken);
|
|
||||||
|
|
||||||
final String graphPath = parser.get("graphPath");
|
|
||||||
log.info("got variable graphPath: {}", graphPath);
|
|
||||||
|
|
||||||
final SparkSession spark = SparkSession
|
|
||||||
.builder()
|
|
||||||
.appName(OrcidGetUpdatesFile.class.getName())
|
|
||||||
.master(master)
|
|
||||||
.getOrCreate();
|
|
||||||
|
|
||||||
final String latestDate = spark
|
|
||||||
.read()
|
|
||||||
.load(graphPath + "/Authors")
|
|
||||||
.selectExpr("max(lastModifiedDate)")
|
|
||||||
.first()
|
|
||||||
.getString(0);
|
|
||||||
|
|
||||||
log.info("latest date is {}", latestDate);
|
|
||||||
|
|
||||||
final FileSystem fileSystem = FileSystem.get(getHadoopConfiguration(namenode));
|
|
||||||
|
|
||||||
new OrcidGetUpdatesFile().readTar(fileSystem, accessToken, apiURL, targetPath, latestDate);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private SequenceFile.Writer createFile(Path aPath, FileSystem fileSystem) throws IOException {
|
|
||||||
return SequenceFile
|
|
||||||
.createWriter(
|
|
||||||
fileSystem.getConf(),
|
|
||||||
SequenceFile.Writer.file(aPath),
|
|
||||||
SequenceFile.Writer.keyClass(Text.class),
|
|
||||||
SequenceFile.Writer.valueClass(Text.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
private ORCIDWorker createWorker(final String id, final String targetPath, final BlockingQueue<String> queue,
|
|
||||||
final String accessToken, FileSystem fileSystem) throws Exception {
|
|
||||||
return ORCIDWorker
|
|
||||||
.builder()
|
|
||||||
.withId(id)
|
|
||||||
.withEmployments(createFile(new Path(String.format("%s/employments_%s", targetPath, id)), fileSystem))
|
|
||||||
.withSummary(createFile(new Path(String.format("%s/summary_%s", targetPath, id)), fileSystem))
|
|
||||||
.withWorks(createFile(new Path(String.format("%s/works_%s", targetPath, id)), fileSystem))
|
|
||||||
.withAccessToken(accessToken)
|
|
||||||
.withBlockingQueue(queue)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void readTar(FileSystem fileSystem, final String accessToken, final String apiURL, final String targetPath,
|
|
||||||
final String startDate) throws Exception {
|
|
||||||
final HttpURLConnection urlConn = (HttpURLConnection) new URL(apiURL).openConnection();
|
|
||||||
final HttpClientParams clientParams = new HttpClientParams();
|
|
||||||
urlConn.setInstanceFollowRedirects(false);
|
|
||||||
urlConn.setReadTimeout(clientParams.getReadTimeOut() * 1000);
|
|
||||||
urlConn.setConnectTimeout(clientParams.getConnectTimeOut() * 1000);
|
|
||||||
if (urlConn.getResponseCode() > 199 && urlConn.getResponseCode() < 300) {
|
|
||||||
InputStream input = urlConn.getInputStream();
|
|
||||||
|
|
||||||
Path hdfsWritePath = new Path("/tmp/orcid_updates.tar.gz");
|
|
||||||
final FSDataOutputStream fsDataOutputStream = fileSystem.create(hdfsWritePath, true);
|
|
||||||
IOUtils.copy(input, fsDataOutputStream);
|
|
||||||
fsDataOutputStream.flush();
|
|
||||||
fsDataOutputStream.close();
|
|
||||||
FSDataInputStream updateFile = fileSystem.open(hdfsWritePath);
|
|
||||||
TarArchiveInputStream tais = new TarArchiveInputStream(new GzipCompressorInputStream(
|
|
||||||
new BufferedInputStream(
|
|
||||||
updateFile.getWrappedStream())));
|
|
||||||
TarArchiveEntry entry;
|
|
||||||
|
|
||||||
BlockingQueue<String> queue = new ArrayBlockingQueue<String>(3000);
|
|
||||||
final List<ORCIDWorker> workers = new ArrayList<>();
|
|
||||||
for (int i = 0; i < 22; i++) {
|
|
||||||
workers.add(createWorker("" + i, targetPath, queue, accessToken, fileSystem));
|
|
||||||
}
|
|
||||||
workers.forEach(Thread::start);
|
|
||||||
|
|
||||||
while ((entry = tais.getNextTarEntry()) != null) {
|
|
||||||
|
|
||||||
if (entry.isFile()) {
|
|
||||||
|
|
||||||
BufferedReader br = new BufferedReader(new InputStreamReader(tais));
|
|
||||||
System.out.println(br.readLine());
|
|
||||||
br
|
|
||||||
.lines()
|
|
||||||
.map(l -> l.split(","))
|
|
||||||
.filter(s -> StringUtils.compare(s[3].substring(0, 10), startDate) > 0)
|
|
||||||
.map(s -> s[0])
|
|
||||||
.forEach(s -> {
|
|
||||||
try {
|
|
||||||
queue.put(s);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < 22; i++) {
|
|
||||||
queue.put(ORCIDWorker.JOB_COMPLETE);
|
|
||||||
}
|
|
||||||
for (ORCIDWorker worker : workers) {
|
|
||||||
worker.join();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +1,11 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.collection.orcid;
|
package eu.dnetlib.dhp.collection.orcid;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.Arrays;
|
||||||
import java.util.stream.Collectors;
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.dom4j.Document;
|
|
||||||
import org.dom4j.DocumentFactory;
|
|
||||||
import org.dom4j.DocumentHelper;
|
|
||||||
import org.dom4j.Node;
|
|
||||||
import org.jetbrains.annotations.NotNull;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -44,8 +40,8 @@ public class OrcidParser {
|
||||||
private static final String NS_ERROR = "error";
|
private static final String NS_ERROR = "error";
|
||||||
private static final String NS_HISTORY = "history";
|
private static final String NS_HISTORY = "history";
|
||||||
private static final String NS_HISTORY_URL = "http://www.orcid.org/ns/history";
|
private static final String NS_HISTORY_URL = "http://www.orcid.org/ns/history";
|
||||||
private static final String NS_EMPLOYMENT = "employment";
|
private static final String NS_BULK_URL = "http://www.orcid.org/ns/bulk";
|
||||||
private static final String NS_EMPLOYMENT_URL = "http://www.orcid.org/ns/employment";
|
private static final String NS_BULK = "bulk";
|
||||||
private static final String NS_EXTERNAL = "external-identifier";
|
private static final String NS_EXTERNAL = "external-identifier";
|
||||||
private static final String NS_EXTERNAL_URL = "http://www.orcid.org/ns/external-identifier";
|
private static final String NS_EXTERNAL_URL = "http://www.orcid.org/ns/external-identifier";
|
||||||
|
|
||||||
|
@ -65,7 +61,6 @@ public class OrcidParser {
|
||||||
ap.declareXPathNameSpace(NS_WORK, NS_WORK_URL);
|
ap.declareXPathNameSpace(NS_WORK, NS_WORK_URL);
|
||||||
ap.declareXPathNameSpace(NS_EXTERNAL, NS_EXTERNAL_URL);
|
ap.declareXPathNameSpace(NS_EXTERNAL, NS_EXTERNAL_URL);
|
||||||
ap.declareXPathNameSpace(NS_ACTIVITIES, NS_ACTIVITIES_URL);
|
ap.declareXPathNameSpace(NS_ACTIVITIES, NS_ACTIVITIES_URL);
|
||||||
ap.declareXPathNameSpace(NS_EMPLOYMENT, NS_EMPLOYMENT_URL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Author parseSummary(final String xml) {
|
public Author parseSummary(final String xml) {
|
||||||
|
@ -75,15 +70,13 @@ public class OrcidParser {
|
||||||
generateParsedDocument(xml);
|
generateParsedDocument(xml);
|
||||||
List<VtdUtilityParser.Node> recordNodes = VtdUtilityParser
|
List<VtdUtilityParser.Node> recordNodes = VtdUtilityParser
|
||||||
.getTextValuesWithAttributes(
|
.getTextValuesWithAttributes(
|
||||||
ap, vn, "//record:record", Collections.singletonList("path"));
|
ap, vn, "//record:record", Arrays.asList("path"));
|
||||||
if (!recordNodes.isEmpty()) {
|
if (!recordNodes.isEmpty()) {
|
||||||
final String oid = (recordNodes.get(0).getAttributes().get("path")).substring(1);
|
final String oid = (recordNodes.get(0).getAttributes().get("path")).substring(1);
|
||||||
author.setOrcid(oid);
|
author.setOrcid(oid);
|
||||||
} else {
|
} else {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
final String ltm = VtdUtilityParser.getSingleValue(ap, vn, "//common:last-modified-date");
|
|
||||||
author.setLastModifiedDate(ltm);
|
|
||||||
List<VtdUtilityParser.Node> personNodes = VtdUtilityParser
|
List<VtdUtilityParser.Node> personNodes = VtdUtilityParser
|
||||||
.getTextValuesWithAttributes(
|
.getTextValuesWithAttributes(
|
||||||
ap, vn, "//person:name", Arrays.asList("visibility"));
|
ap, vn, "//person:name", Arrays.asList("visibility"));
|
||||||
|
@ -136,64 +129,6 @@ public class OrcidParser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Work> parseWorks(final String xml) {
|
|
||||||
|
|
||||||
try {
|
|
||||||
String oid;
|
|
||||||
|
|
||||||
generateParsedDocument(xml);
|
|
||||||
List<VtdUtilityParser.Node> workNodes = VtdUtilityParser
|
|
||||||
.getTextValuesWithAttributes(ap, vn, "//activities:works", Arrays.asList("path", "visibility"));
|
|
||||||
if (!workNodes.isEmpty()) {
|
|
||||||
oid = (workNodes.get(0).getAttributes().get("path")).split("/")[1];
|
|
||||||
|
|
||||||
} else {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
final List<Work> works = new ArrayList<>();
|
|
||||||
ap.selectXPath("//work:work-summary");
|
|
||||||
|
|
||||||
while (ap.evalXPath() != -1) {
|
|
||||||
final Work work = new Work();
|
|
||||||
work.setOrcid(oid);
|
|
||||||
final AutoPilot ap1 = new AutoPilot(ap.getNav());
|
|
||||||
ap1.selectXPath("./work:title/common:title");
|
|
||||||
while (ap1.evalXPath() != -1) {
|
|
||||||
int it = vn.getText();
|
|
||||||
work.setTitle(vn.toNormalizedString(it));
|
|
||||||
}
|
|
||||||
ap1.selectXPath(".//common:external-id");
|
|
||||||
while (ap1.evalXPath() != -1) {
|
|
||||||
final Pid pid = new Pid();
|
|
||||||
|
|
||||||
final AutoPilot ap2 = new AutoPilot(ap1.getNav());
|
|
||||||
|
|
||||||
ap2.selectXPath("./common:external-id-type");
|
|
||||||
while (ap2.evalXPath() != -1) {
|
|
||||||
int it = vn.getText();
|
|
||||||
pid.setSchema(vn.toNormalizedString(it));
|
|
||||||
}
|
|
||||||
ap2.selectXPath("./common:external-id-value");
|
|
||||||
while (ap2.evalXPath() != -1) {
|
|
||||||
int it = vn.getText();
|
|
||||||
pid.setValue(vn.toNormalizedString(it));
|
|
||||||
}
|
|
||||||
|
|
||||||
work.addPid(pid);
|
|
||||||
}
|
|
||||||
|
|
||||||
works.add(work);
|
|
||||||
}
|
|
||||||
return works;
|
|
||||||
|
|
||||||
} catch (Throwable e) {
|
|
||||||
log.error("Error on parsing {}", xml);
|
|
||||||
log.error(e.getMessage());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public Work parseWork(final String xml) {
|
public Work parseWork(final String xml) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -241,15 +176,11 @@ public class OrcidParser {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String extractEmploymentDate(final String xpath) throws Exception {
|
private String extractEmploymentDate(final String xpath) throws Exception {
|
||||||
return extractEmploymentDate(xpath, ap);
|
|
||||||
}
|
|
||||||
|
|
||||||
private String extractEmploymentDate(final String xpath, AutoPilot pp) throws Exception {
|
ap.selectXPath(xpath);
|
||||||
|
|
||||||
pp.selectXPath(xpath);
|
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
while (pp.evalXPath() != -1) {
|
while (ap.evalXPath() != -1) {
|
||||||
final AutoPilot ap1 = new AutoPilot(pp.getNav());
|
final AutoPilot ap1 = new AutoPilot(ap.getNav());
|
||||||
ap1.selectXPath("./common:year");
|
ap1.selectXPath("./common:year");
|
||||||
while (ap1.evalXPath() != -1) {
|
while (ap1.evalXPath() != -1) {
|
||||||
int it = vn.getText();
|
int it = vn.getText();
|
||||||
|
@ -272,104 +203,6 @@ public class OrcidParser {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Employment> parseEmployments(final String xml) {
|
|
||||||
try {
|
|
||||||
String oid;
|
|
||||||
Map<String, String> nsContext = getNameSpaceMap();
|
|
||||||
DocumentFactory.getInstance().setXPathNamespaceURIs(nsContext);
|
|
||||||
Document doc = DocumentHelper.parseText(xml);
|
|
||||||
oid = doc.valueOf("//activities:employments/@path");
|
|
||||||
if (oid == null || StringUtils.isEmpty(oid))
|
|
||||||
return null;
|
|
||||||
final String orcid = oid.split("/")[1];
|
|
||||||
|
|
||||||
List<Node> nodes = doc.selectNodes("//employment:employment-summary");
|
|
||||||
return nodes.stream().map(n -> {
|
|
||||||
final Employment e = new Employment();
|
|
||||||
e.setOrcid(orcid);
|
|
||||||
|
|
||||||
final String depName = n.valueOf(".//common:department-name");
|
|
||||||
if (StringUtils.isNotBlank(depName))
|
|
||||||
e.setDepartmentName(depName);
|
|
||||||
final String roleTitle = n.valueOf(".//common:role-title");
|
|
||||||
e.setRoleTitle(roleTitle);
|
|
||||||
final String organizationName = n.valueOf(".//common:organization/common:name");
|
|
||||||
if (StringUtils.isEmpty(e.getDepartmentName()))
|
|
||||||
e.setDepartmentName(organizationName);
|
|
||||||
final Pid p = new Pid();
|
|
||||||
final String pid = n
|
|
||||||
.valueOf(
|
|
||||||
"./common:organization/common:disambiguated-organization/common:disambiguated-organization-identifier");
|
|
||||||
p.setValue(pid);
|
|
||||||
final String pidType = n
|
|
||||||
.valueOf("./common:organization/common:disambiguated-organization/common:disambiguation-source");
|
|
||||||
p.setSchema(pidType);
|
|
||||||
e.setAffiliationId(p);
|
|
||||||
|
|
||||||
final StringBuilder aDate = new StringBuilder();
|
|
||||||
final String sy = n.valueOf("./common:start-date/common:year");
|
|
||||||
if (StringUtils.isNotBlank(sy)) {
|
|
||||||
aDate.append(sy);
|
|
||||||
final String sm = n.valueOf("./common:start-date/common:month");
|
|
||||||
final String sd = n.valueOf("./common:start-date/common:day");
|
|
||||||
aDate.append("-");
|
|
||||||
if (StringUtils.isNotBlank(sm))
|
|
||||||
aDate.append(sm);
|
|
||||||
else
|
|
||||||
aDate.append("01");
|
|
||||||
aDate.append("-");
|
|
||||||
if (StringUtils.isNotBlank(sd))
|
|
||||||
aDate.append(sd);
|
|
||||||
else
|
|
||||||
aDate.append("01");
|
|
||||||
e.setEndDate(aDate.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
final String ey = n.valueOf("./common:end-date/common:year");
|
|
||||||
if (StringUtils.isNotBlank(ey)) {
|
|
||||||
aDate.append(ey);
|
|
||||||
final String em = n.valueOf("./common:end-date/common:month");
|
|
||||||
final String ed = n.valueOf("./common:end-date/common:day");
|
|
||||||
aDate.append("-");
|
|
||||||
if (StringUtils.isNotBlank(em))
|
|
||||||
aDate.append(em);
|
|
||||||
else
|
|
||||||
aDate.append("01");
|
|
||||||
aDate.append("-");
|
|
||||||
if (StringUtils.isNotBlank(ed))
|
|
||||||
aDate.append(ed);
|
|
||||||
else
|
|
||||||
aDate.append("01");
|
|
||||||
e.setEndDate(aDate.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
return e;
|
|
||||||
|
|
||||||
}).collect(Collectors.toList());
|
|
||||||
} catch (Throwable e) {
|
|
||||||
log.error("Error on parsing {}", xml);
|
|
||||||
log.error(e.getMessage());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@NotNull
|
|
||||||
private static Map<String, String> getNameSpaceMap() {
|
|
||||||
Map<String, String> nsContext = new HashMap<>();
|
|
||||||
nsContext.put(NS_COMMON, NS_COMMON_URL);
|
|
||||||
nsContext.put(NS_PERSON, NS_PERSON_URL);
|
|
||||||
nsContext.put(NS_DETAILS, NS_DETAILS_URL);
|
|
||||||
nsContext.put(NS_OTHER, NS_OTHER_URL);
|
|
||||||
nsContext.put(NS_RECORD, NS_RECORD_URL);
|
|
||||||
nsContext.put(NS_ERROR, NS_ERROR_URL);
|
|
||||||
nsContext.put(NS_HISTORY, NS_HISTORY_URL);
|
|
||||||
nsContext.put(NS_WORK, NS_WORK_URL);
|
|
||||||
nsContext.put(NS_EXTERNAL, NS_EXTERNAL_URL);
|
|
||||||
nsContext.put(NS_ACTIVITIES, NS_ACTIVITIES_URL);
|
|
||||||
nsContext.put(NS_EMPLOYMENT, NS_EMPLOYMENT_URL);
|
|
||||||
return nsContext;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Employment parseEmployment(final String xml) {
|
public Employment parseEmployment(final String xml) {
|
||||||
try {
|
try {
|
||||||
final Employment employment = new Employment();
|
final Employment employment = new Employment();
|
||||||
|
|
|
@ -18,8 +18,6 @@ public class Author extends ORCIDItem {
|
||||||
|
|
||||||
private String biography;
|
private String biography;
|
||||||
|
|
||||||
private String lastModifiedDate;
|
|
||||||
|
|
||||||
public String getBiography() {
|
public String getBiography() {
|
||||||
return biography;
|
return biography;
|
||||||
}
|
}
|
||||||
|
@ -76,14 +74,6 @@ public class Author extends ORCIDItem {
|
||||||
this.otherPids = otherPids;
|
this.otherPids = otherPids;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getLastModifiedDate() {
|
|
||||||
return lastModifiedDate;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLastModifiedDate(String lastModifiedDate) {
|
|
||||||
this.lastModifiedDate = lastModifiedDate;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void addOtherPid(final Pid pid) {
|
public void addOtherPid(final Pid pid) {
|
||||||
|
|
||||||
if (otherPids == null)
|
if (otherPids == null)
|
||||||
|
|
|
@ -52,6 +52,8 @@ public class RestIterator implements Iterator<String> {
|
||||||
|
|
||||||
private final String BASIC = "basic";
|
private final String BASIC = "basic";
|
||||||
|
|
||||||
|
private final JsonUtils jsonUtils;
|
||||||
|
|
||||||
private final String baseUrl;
|
private final String baseUrl;
|
||||||
private final String resumptionType;
|
private final String resumptionType;
|
||||||
private final String resumptionParam;
|
private final String resumptionParam;
|
||||||
|
@ -104,6 +106,7 @@ public class RestIterator implements Iterator<String> {
|
||||||
final String resultOutputFormat) {
|
final String resultOutputFormat) {
|
||||||
|
|
||||||
this.clientParams = clientParams;
|
this.clientParams = clientParams;
|
||||||
|
this.jsonUtils = new JsonUtils();
|
||||||
this.baseUrl = baseUrl;
|
this.baseUrl = baseUrl;
|
||||||
this.resumptionType = resumptionType;
|
this.resumptionType = resumptionType;
|
||||||
this.resumptionParam = resumptionParam;
|
this.resumptionParam = resumptionParam;
|
||||||
|
@ -123,7 +126,6 @@ public class RestIterator implements Iterator<String> {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new IllegalStateException("xml transformation init failed: " + e.getMessage());
|
throw new IllegalStateException("xml transformation init failed: " + e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
initQueue();
|
initQueue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +190,7 @@ public class RestIterator implements Iterator<String> {
|
||||||
String resultJson;
|
String resultJson;
|
||||||
String resultXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
String resultXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||||
String nextQuery = "";
|
String nextQuery = "";
|
||||||
String emptyXml = resultXml + "<" + JsonUtils.XML_WRAP_TAG + "></" + JsonUtils.XML_WRAP_TAG + ">";
|
String emptyXml = resultXml + "<" + JsonUtils.wrapName + "></" + JsonUtils.wrapName + ">";
|
||||||
Node resultNode = null;
|
Node resultNode = null;
|
||||||
NodeList nodeList = null;
|
NodeList nodeList = null;
|
||||||
String qUrlArgument = "";
|
String qUrlArgument = "";
|
||||||
|
@ -229,7 +231,7 @@ public class RestIterator implements Iterator<String> {
|
||||||
resultStream = theHttpInputStream;
|
resultStream = theHttpInputStream;
|
||||||
if ("json".equals(resultOutputFormat)) {
|
if ("json".equals(resultOutputFormat)) {
|
||||||
resultJson = IOUtils.toString(resultStream, StandardCharsets.UTF_8);
|
resultJson = IOUtils.toString(resultStream, StandardCharsets.UTF_8);
|
||||||
resultXml = JsonUtils.convertToXML(resultJson);
|
resultXml = jsonUtils.convertToXML(resultJson);
|
||||||
resultStream = IOUtils.toInputStream(resultXml, UTF_8);
|
resultStream = IOUtils.toInputStream(resultXml, UTF_8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,142 +3,82 @@ package eu.dnetlib.dhp.collection.plugin.utils;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.json.JSONArray;
|
|
||||||
import org.json.JSONObject;
|
|
||||||
|
|
||||||
public class JsonUtils {
|
public class JsonUtils {
|
||||||
public static final String XML_WRAP_TAG = "recordWrap";
|
|
||||||
private static final String XML_HEADER = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
|
||||||
private static final String INVALID_XMLTAG_CHARS = "!\"#$%&'()*+,/;<=>?@[\\]^`{|}~,";
|
|
||||||
|
|
||||||
private static final Log log = LogFactory.getLog(JsonUtils.class);
|
private static final Log log = LogFactory.getLog(JsonUtils.class);
|
||||||
|
|
||||||
|
public static final String wrapName = "recordWrap";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cleanup in JSON-KeyName
|
* convert in JSON-KeyName 'whitespace(s)' to '_' and '/' to '_', '(' and ')' to ''
|
||||||
* check W3C XML syntax: https://www.w3.org/TR/2006/REC-xml11-20060816/#sec-starttags for valid tag names
|
* check W3C XML syntax: https://www.w3.org/TR/2006/REC-xml11-20060816/#sec-starttags for valid tag names
|
||||||
* and work-around for the JSON to XML converting of org.json.XML-package.
|
* and work-around for the JSON to XML converting of org.json.XML-package.
|
||||||
*
|
*
|
||||||
* @param input
|
* known bugs: doesn't prevent "key name":" ["sexy name",": penari","erotic dance"],
|
||||||
* @return converted json object
|
*
|
||||||
|
* @param jsonInput
|
||||||
|
* @return convertedJsonKeynameOutput
|
||||||
*/
|
*/
|
||||||
public static JSONObject cleanJsonObject(final JSONObject input) {
|
public String syntaxConvertJsonKeyNames(String jsonInput) {
|
||||||
if (null == input) {
|
|
||||||
return null;
|
log.trace("before convertJsonKeyNames: " + jsonInput);
|
||||||
|
// pre-clean json - rid spaces of element names (misinterpreted as elements with attributes in xml)
|
||||||
|
// replace ' 's in JSON Namens with '_'
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*)\\s+([^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"]*)\\s+([^\"]*)\":", "\"$1_$2\":");
|
||||||
}
|
}
|
||||||
|
|
||||||
JSONObject result = new JSONObject();
|
// replace forward-slash (sign '/' ) in JSON Names with '_'
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*)/([^\"]*)\":.*")) {
|
||||||
for (String key : input.keySet()) {
|
jsonInput = jsonInput.replaceAll("\"([^\"]*)/([^\"]*)\":", "\"$1_$2\":");
|
||||||
Object value = input.opt(key);
|
|
||||||
if (value != null) {
|
|
||||||
result.put(cleanKey(key), cleanValue(value));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
// replace '(' in JSON Names with ''
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*)[(]([^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"]*)[(]([^\"]*)\":", "\"$1$2\":");
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace ')' in JSON Names with ''
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*)[)]([^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"]*)[)]([^\"]*)\":", "\"$1$2\":");
|
||||||
|
}
|
||||||
|
|
||||||
|
// add prefix of startNumbers in JSON Keynames with 'n_'
|
||||||
|
while (jsonInput.matches(".*\"([^\"][0-9])([^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"][0-9])([^\"]*)\":", "\"n_$1$2\":");
|
||||||
|
}
|
||||||
|
// add prefix of only numbers in JSON Keynames with 'm_'
|
||||||
|
while (jsonInput.matches(".*\"([0-9]+)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([0-9]+)\":", "\"m_$1\":");
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace ':' between number like '2018-08-28T11:05:00Z' in JSON keynames with ''
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*[0-9]):([0-9][^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"]*[0-9]):([0-9][^\"]*)\":", "\"$1$2\":");
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace ',' in JSON Keynames with '.' to prevent , in xml tagnames.
|
||||||
|
// while (jsonInput.matches(".*\"([^\"]*),([^\"]*)\":.*")) {
|
||||||
|
// jsonInput = jsonInput.replaceAll("\"([^\"]*),([^\"]*)\":", "\"$1.$2\":");
|
||||||
|
// }
|
||||||
|
|
||||||
|
// replace '=' in JSON Keynames with '-'
|
||||||
|
while (jsonInput.matches(".*\"([^\"]*)=([^\"]*)\":.*")) {
|
||||||
|
jsonInput = jsonInput.replaceAll("\"([^\"]*)=([^\"]*)\":", "\"$1-$2\":");
|
||||||
|
}
|
||||||
|
|
||||||
|
log.trace("after syntaxConvertJsonKeyNames: " + jsonInput);
|
||||||
|
return jsonInput;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Object cleanValue(Object object) {
|
public String convertToXML(final String jsonRecord) {
|
||||||
if (object instanceof JSONObject) {
|
String resultXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||||
return cleanJsonObject((JSONObject) object);
|
org.json.JSONObject jsonObject = new org.json.JSONObject(syntaxConvertJsonKeyNames(jsonRecord));
|
||||||
} else if (object instanceof JSONArray) {
|
resultXml += org.json.XML.toString(jsonObject, wrapName); // wrap xml in single root element
|
||||||
JSONArray array = (JSONArray) object;
|
log.trace("before inputStream: " + resultXml);
|
||||||
JSONArray res = new JSONArray();
|
resultXml = XmlCleaner.cleanAllEntities(resultXml);
|
||||||
|
log.trace("after cleaning: " + resultXml);
|
||||||
for (int i = array.length() - 1; i >= 0; i--) {
|
return resultXml;
|
||||||
res.put(i, cleanValue(array.opt(i)));
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
} else if (object instanceof String) {
|
|
||||||
String value = (String) object;
|
|
||||||
|
|
||||||
// XML 1.0 Allowed characters
|
|
||||||
// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
|
|
||||||
|
|
||||||
return value
|
|
||||||
.codePoints()
|
|
||||||
.filter(
|
|
||||||
cp -> cp == 0x9 || cp == 0xA || cp == 0xD || (cp >= 0x20 && cp <= 0xD7FF)
|
|
||||||
|| (cp >= 0xE000 && cp <= 0xFFFD)
|
|
||||||
|| (cp >= 0x10000 && cp <= 0x10FFFF))
|
|
||||||
.collect(
|
|
||||||
StringBuilder::new,
|
|
||||||
StringBuilder::appendCodePoint,
|
|
||||||
StringBuilder::append)
|
|
||||||
.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
return object;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String cleanKey(String key) {
|
|
||||||
if (key == null || key.isEmpty()) {
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
|
|
||||||
// xml tag cannot begin with "-", ".", or a numeric digit.
|
|
||||||
switch (key.charAt(0)) {
|
|
||||||
case '-':
|
|
||||||
case '.':
|
|
||||||
key = "_" + key.substring(1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Character.isDigit(key.charAt(0))) {
|
|
||||||
if (key.matches("^[0-9]+$")) {
|
|
||||||
// add prefix of only numbers in JSON Keynames with 'm_'
|
|
||||||
key = "m_" + key;
|
|
||||||
} else {
|
|
||||||
// add prefix of startNumbers in JSON Keynames with 'n_'
|
|
||||||
key = "n_" + key;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
StringBuilder res = new StringBuilder(key.length());
|
|
||||||
for (int i = 0; i < key.length(); i++) {
|
|
||||||
char c = key.charAt(i);
|
|
||||||
|
|
||||||
// sequence of whitespaces are rendered as a single '_'
|
|
||||||
if (Character.isWhitespace(c)) {
|
|
||||||
while (i + 1 < key.length() && Character.isWhitespace(key.charAt(i + 1))) {
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
res.append('_');
|
|
||||||
}
|
|
||||||
// remove invalid chars for xml tags with the expception of '=' and '/'
|
|
||||||
else if (INVALID_XMLTAG_CHARS.indexOf(c) >= 0) {
|
|
||||||
switch (c) {
|
|
||||||
case '=':
|
|
||||||
res.append('-');
|
|
||||||
break;
|
|
||||||
case '/':
|
|
||||||
res.append('_');
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// nothing
|
|
||||||
}
|
|
||||||
// all other chars are kept
|
|
||||||
else {
|
|
||||||
res.append(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
static public String convertToXML(final String jsonRecord) {
|
|
||||||
if (log.isTraceEnabled()) {
|
|
||||||
log.trace("input json: " + jsonRecord);
|
|
||||||
}
|
|
||||||
|
|
||||||
JSONObject jsonObject = cleanJsonObject(new org.json.JSONObject(jsonRecord));
|
|
||||||
String res = XML_HEADER + org.json.XML.toString(jsonObject, XML_WRAP_TAG); // wrap xml in single root element
|
|
||||||
|
|
||||||
if (log.isTraceEnabled()) {
|
|
||||||
log.trace("outout xml: " + res);
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ public class XSLTTransformationFunction implements MapFunction<MetadataRecord, M
|
||||||
@Override
|
@Override
|
||||||
public MetadataRecord call(MetadataRecord value) {
|
public MetadataRecord call(MetadataRecord value) {
|
||||||
aggregationCounter.getTotalItems().add(1);
|
aggregationCounter.getTotalItems().add(1);
|
||||||
|
try {
|
||||||
Processor processor = new Processor(false);
|
Processor processor = new Processor(false);
|
||||||
|
|
||||||
processor.registerExtensionFunction(cleanFunction);
|
processor.registerExtensionFunction(cleanFunction);
|
||||||
|
@ -60,18 +60,11 @@ public class XSLTTransformationFunction implements MapFunction<MetadataRecord, M
|
||||||
comp.setParameter(datasourceIDParam, new XdmAtomicValue(value.getProvenance().getDatasourceId()));
|
comp.setParameter(datasourceIDParam, new XdmAtomicValue(value.getProvenance().getDatasourceId()));
|
||||||
QName datasourceNameParam = new QName(DATASOURCE_NAME_PARAM);
|
QName datasourceNameParam = new QName(DATASOURCE_NAME_PARAM);
|
||||||
comp.setParameter(datasourceNameParam, new XdmAtomicValue(value.getProvenance().getDatasourceName()));
|
comp.setParameter(datasourceNameParam, new XdmAtomicValue(value.getProvenance().getDatasourceName()));
|
||||||
XsltExecutable xslt;
|
XsltExecutable xslt = comp
|
||||||
XdmNode source;
|
.compile(new StreamSource(IOUtils.toInputStream(transformationRule, StandardCharsets.UTF_8)));
|
||||||
try {
|
XdmNode source = processor
|
||||||
xslt = comp
|
.newDocumentBuilder()
|
||||||
.compile(new StreamSource(IOUtils.toInputStream(transformationRule, StandardCharsets.UTF_8)));
|
.build(new StreamSource(IOUtils.toInputStream(value.getBody(), StandardCharsets.UTF_8)));
|
||||||
source = processor
|
|
||||||
.newDocumentBuilder()
|
|
||||||
.build(new StreamSource(IOUtils.toInputStream(value.getBody(), StandardCharsets.UTF_8)));
|
|
||||||
} catch (Throwable e) {
|
|
||||||
throw new RuntimeException("Error on parsing xslt", e);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
XsltTransformer trans = xslt.load();
|
XsltTransformer trans = xslt.load();
|
||||||
trans.setInitialContextNode(source);
|
trans.setInitialContextNode(source);
|
||||||
final StringWriter output = new StringWriter();
|
final StringWriter output = new StringWriter();
|
||||||
|
|
|
@ -17,12 +17,6 @@
|
||||||
"paramDescription": "the path to get the input data from Pubmed",
|
"paramDescription": "the path to get the input data from Pubmed",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"paramName": "oip",
|
|
||||||
"paramLongName": "openapcInputPath",
|
|
||||||
"paramDescription": "the path to get the input data from OpenAPC",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"paramName": "o",
|
"paramName": "o",
|
||||||
"paramLongName": "outputPath",
|
"paramLongName": "outputPath",
|
||||||
|
|
|
@ -31,7 +31,6 @@ spark2SqlQueryExecutionListeners=com.cloudera.spark.lineage.NavigatorQueryListen
|
||||||
# The following is needed as a property of a workflow
|
# The following is needed as a property of a workflow
|
||||||
oozie.wf.application.path=${oozieTopWfApplicationPath}
|
oozie.wf.application.path=${oozieTopWfApplicationPath}
|
||||||
|
|
||||||
crossrefInputPath=/data/bip-affiliations/crossref-data.json
|
crossrefInputPath=/data/bip-affiliations/data.json
|
||||||
pubmedInputPath=/data/bip-affiliations/pubmed-data.json
|
pubmedInputPath=/data/bip-affiliations/pubmed-data.json
|
||||||
openapcInputPath=/data/bip-affiliations/openapc-data.json
|
|
||||||
outputPath=/tmp/crossref-affiliations-output-v5
|
outputPath=/tmp/crossref-affiliations-output-v5
|
||||||
|
|
|
@ -9,10 +9,6 @@
|
||||||
<name>pubmedInputPath</name>
|
<name>pubmedInputPath</name>
|
||||||
<description>the path where to find the inferred affiliation relations from Pubmed</description>
|
<description>the path where to find the inferred affiliation relations from Pubmed</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
|
||||||
<name>openapcInputPath</name>
|
|
||||||
<description>the path where to find the inferred affiliation relations from OpenAPC</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>outputPath</name>
|
<name>outputPath</name>
|
||||||
<description>the path where to store the actionset</description>
|
<description>the path where to store the actionset</description>
|
||||||
|
@ -106,7 +102,6 @@
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--crossrefInputPath</arg><arg>${crossrefInputPath}</arg>
|
<arg>--crossrefInputPath</arg><arg>${crossrefInputPath}</arg>
|
||||||
<arg>--pubmedInputPath</arg><arg>${pubmedInputPath}</arg>
|
<arg>--pubmedInputPath</arg><arg>${pubmedInputPath}</arg>
|
||||||
<arg>--openapcInputPath</arg><arg>${openapcInputPath}</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
|
|
|
@ -16,10 +16,5 @@
|
||||||
"paramLongName": "outputPath",
|
"paramLongName": "outputPath",
|
||||||
"paramDescription": "the path of the new ActionSet",
|
"paramDescription": "the path of the new ActionSet",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
}, {
|
}
|
||||||
"paramName": "fd",
|
|
||||||
"paramLongName": "distributeDoi",
|
|
||||||
"paramDescription": "the path of the new ActionSet",
|
|
||||||
"paramRequired": false
|
|
||||||
}
|
|
||||||
]
|
]
|
|
@ -1,20 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"paramName": "sp",
|
|
||||||
"paramLongName": "sourcePath",
|
|
||||||
"paramDescription": "the zipped opencitations file",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "op",
|
|
||||||
"paramLongName": "outputPath",
|
|
||||||
"paramDescription": "the working path",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "issm",
|
|
||||||
"paramLongName": "isSparkSessionManaged",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": false
|
|
||||||
}
|
|
||||||
]
|
|
|
@ -1,30 +0,0 @@
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>jobTracker</name>
|
|
||||||
<value>yarnRM</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>nameNode</name>
|
|
||||||
<value>hdfs://nameservice1</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.use.system.libpath</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>hiveMetastoreUris</name>
|
|
||||||
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>hiveJdbcUrl</name>
|
|
||||||
<value>jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>hiveDbName</name>
|
|
||||||
<value>openaire</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
</configuration>
|
|
|
@ -1,153 +0,0 @@
|
||||||
|
|
||||||
<workflow-app name="FOS no doi" xmlns="uri:oozie:workflow:0.5">
|
|
||||||
<parameters>
|
|
||||||
<property>
|
|
||||||
<name>fosPath</name>
|
|
||||||
<description>the input path of the resources to be extended</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>outputPath</name>
|
|
||||||
<description>the path where to store the actionset</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkDriverMemory</name>
|
|
||||||
<description>memory for driver process</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorMemory</name>
|
|
||||||
<description>memory for individual executor</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorCores</name>
|
|
||||||
<description>number of cores used by single executor</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozieActionShareLibForSpark2</name>
|
|
||||||
<description>oozie action sharelib for spark 2.*</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2ExtraListeners</name>
|
|
||||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
|
||||||
<description>spark 2.* extra listeners classname</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2SqlQueryExecutionListeners</name>
|
|
||||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
|
||||||
<description>spark 2.* sql query execution listeners classname</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2YarnHistoryServerAddress</name>
|
|
||||||
<description>spark 2.* yarn history server address</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2EventLogDir</name>
|
|
||||||
<description>spark 2.* event log dir location</description>
|
|
||||||
</property>
|
|
||||||
</parameters>
|
|
||||||
|
|
||||||
<global>
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>mapreduce.job.queuename</name>
|
|
||||||
<value>${queueName}</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
|
||||||
<value>${oozieLauncherQueueName}</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.action.sharelib.for.spark</name>
|
|
||||||
<value>${oozieActionShareLibForSpark2}</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
</configuration>
|
|
||||||
</global>
|
|
||||||
<start to="getFOS"/>
|
|
||||||
|
|
||||||
<kill name="Kill">
|
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
|
||||||
</kill>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<action name="getFOS">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Gets Data from FOS csv file</name>
|
|
||||||
<class>eu.dnetlib.dhp.actionmanager.createunresolvedentities.GetFOSSparkJob</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--sourcePath</arg><arg>${fosPath}</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/input/fos</arg>
|
|
||||||
<arg>--delimiter</arg><arg>${delimiter}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="prepareFos"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
|
|
||||||
<action name="prepareFos">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Produces the results from FOS</name>
|
|
||||||
<class>eu.dnetlib.dhp.actionmanager.createunresolvedentities.PrepareFOSSparkJob</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--sourcePath</arg><arg>${workingDir}/input/fos</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/prepared</arg>
|
|
||||||
<arg>--distributeDoi</arg><arg>false</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="produceActionSet"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<action name="produceActionSet">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Save the action set grouping results with the same id</name>
|
|
||||||
<class>eu.dnetlib.dhp.actionmanager.fosnodoi.CreateActionSetSparkJob</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--sourcePath</arg><arg>${workingDir}/prepared/fos</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="End"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<end name="End"/>
|
|
||||||
</workflow-app>
|
|
|
@ -1,13 +1,13 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"paramName": "ip",
|
"paramName": "if",
|
||||||
"paramLongName": "inputPath",
|
"paramLongName": "inputFile",
|
||||||
"paramDescription": "the zipped opencitations file",
|
"paramDescription": "the zipped opencitations file",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"paramName": "op",
|
"paramName": "wp",
|
||||||
"paramLongName": "outputPath",
|
"paramLongName": "workingPath",
|
||||||
"paramDescription": "the working path",
|
"paramDescription": "the working path",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
},
|
||||||
|
@ -16,5 +16,11 @@
|
||||||
"paramLongName": "hdfsNameNode",
|
"paramLongName": "hdfsNameNode",
|
||||||
"paramDescription": "the hdfs name node",
|
"paramDescription": "the hdfs name node",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "p",
|
||||||
|
"paramLongName": "prefix",
|
||||||
|
"paramDescription": "COCI or POCI",
|
||||||
|
"paramRequired": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"paramName": "ip",
|
"paramName": "wp",
|
||||||
"paramLongName": "inputPath",
|
"paramLongName": "workingPath",
|
||||||
"paramDescription": "the zipped opencitations file",
|
"paramDescription": "the zipped opencitations file",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
},
|
||||||
|
@ -24,9 +24,15 @@
|
||||||
"paramLongName": "outputPath",
|
"paramLongName": "outputPath",
|
||||||
"paramDescription": "the hdfs name node",
|
"paramDescription": "the hdfs name node",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
}, {
|
},
|
||||||
"paramName": "nn",
|
{
|
||||||
"paramLongName": "hdfsNameNode",
|
"paramName": "if",
|
||||||
|
"paramLongName": "inputFile",
|
||||||
|
"paramDescription": "the hdfs name node",
|
||||||
|
"paramRequired": true
|
||||||
|
}, {
|
||||||
|
"paramName": "f",
|
||||||
|
"paramLongName": "format",
|
||||||
"paramDescription": "the hdfs name node",
|
"paramDescription": "the hdfs name node",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,9 +27,7 @@
|
||||||
<case to="download">${wf:conf('resumeFrom') eq 'DownloadDump'}</case>
|
<case to="download">${wf:conf('resumeFrom') eq 'DownloadDump'}</case>
|
||||||
<case to="extract">${wf:conf('resumeFrom') eq 'ExtractContent'}</case>
|
<case to="extract">${wf:conf('resumeFrom') eq 'ExtractContent'}</case>
|
||||||
<case to="read">${wf:conf('resumeFrom') eq 'ReadContent'}</case>
|
<case to="read">${wf:conf('resumeFrom') eq 'ReadContent'}</case>
|
||||||
<case to="remap">${wf:conf('resumeFrom') eq 'MapContent'}</case>
|
<default to="create_actionset"/> <!-- first action to be done when downloadDump is to be performed -->
|
||||||
<case to="create_actionset">${wf:conf('resumeFrom') eq 'CreateAS'}</case>
|
|
||||||
<default to="deleteoutputpath"/> <!-- first action to be done when downloadDump is to be performed -->
|
|
||||||
</switch>
|
</switch>
|
||||||
</decision>
|
</decision>
|
||||||
|
|
||||||
|
@ -37,15 +35,6 @@
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
</kill>
|
</kill>
|
||||||
|
|
||||||
<action name="deleteoutputpath">
|
|
||||||
<fs>
|
|
||||||
<delete path='${inputPath}'/>
|
|
||||||
<mkdir path='${inputPath}'/>
|
|
||||||
</fs>
|
|
||||||
<ok to="download"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="download">
|
<action name="download">
|
||||||
<shell xmlns="uri:oozie:shell-action:0.2">
|
<shell xmlns="uri:oozie:shell-action:0.2">
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
@ -58,28 +47,7 @@
|
||||||
</configuration>
|
</configuration>
|
||||||
<exec>download.sh</exec>
|
<exec>download.sh</exec>
|
||||||
<argument>${filelist}</argument>
|
<argument>${filelist}</argument>
|
||||||
<argument>${inputPath}/Original</argument>
|
<argument>${workingPath}/${prefix}/Original</argument>
|
||||||
<env-var>HADOOP_USER_NAME=${wf:user()}</env-var>
|
|
||||||
<file>download.sh</file>
|
|
||||||
<capture-output/>
|
|
||||||
</shell>
|
|
||||||
<ok to="download_correspondence"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
<!-- downloads the correspondence from the omid and the pid (doi, pmid etc)-->
|
|
||||||
<action name="download_correspondence">
|
|
||||||
<shell xmlns="uri:oozie:shell-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>mapred.job.queue.name</name>
|
|
||||||
<value>${queueName}</value>
|
|
||||||
</property>
|
|
||||||
</configuration>
|
|
||||||
<exec>download_corr.sh</exec>
|
|
||||||
<argument>${filecorrespondence}</argument>
|
|
||||||
<argument>${inputPath}/correspondence</argument>
|
|
||||||
<env-var>HADOOP_USER_NAME=${wf:user()}</env-var>
|
<env-var>HADOOP_USER_NAME=${wf:user()}</env-var>
|
||||||
<file>download.sh</file>
|
<file>download.sh</file>
|
||||||
<capture-output/>
|
<capture-output/>
|
||||||
|
@ -92,19 +60,9 @@
|
||||||
<java>
|
<java>
|
||||||
<main-class>eu.dnetlib.dhp.actionmanager.opencitations.GetOpenCitationsRefs</main-class>
|
<main-class>eu.dnetlib.dhp.actionmanager.opencitations.GetOpenCitationsRefs</main-class>
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
<arg>--inputPath</arg><arg>${inputPath}/Original</arg>
|
<arg>--inputFile</arg><arg>${inputFile}</arg>
|
||||||
<arg>--outputPath</arg><arg>${inputPath}/Extracted</arg>
|
<arg>--workingPath</arg><arg>${workingPath}/${prefix}</arg>
|
||||||
</java>
|
<arg>--prefix</arg><arg>${prefix}</arg>
|
||||||
<ok to="read"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="extract_correspondence">
|
|
||||||
<java>
|
|
||||||
<main-class>eu.dnetlib.dhp.actionmanager.opencitations.GetOpenCitationsRefs</main-class>
|
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
|
||||||
<arg>--inputPath</arg><arg>${inputPath}/correspondence</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${inputPath}/correspondence_extracted</arg>
|
|
||||||
</java>
|
</java>
|
||||||
<ok to="read"/>
|
<ok to="read"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -127,35 +85,11 @@
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--inputPath</arg><arg>${inputPath}/Extracted</arg>
|
<arg>--workingPath</arg><arg>${workingPath}/${prefix}/${prefix}</arg>
|
||||||
<arg>--outputPath</arg><arg>${inputPath}/JSON</arg>
|
<arg>--outputPath</arg><arg>${workingPath}/${prefix}/${prefix}_JSON/</arg>
|
||||||
<arg>--delimiter</arg><arg>${delimiter}</arg>
|
<arg>--delimiter</arg><arg>${delimiter}</arg>
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
<arg>--inputFile</arg><arg>${inputFileCoci}</arg>
|
||||||
</spark>
|
<arg>--format</arg><arg>${prefix}</arg>
|
||||||
<ok to="remap"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="remap">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Produces the AS for OC</name>
|
|
||||||
<class>eu.dnetlib.dhp.actionmanager.opencitations.MapOCIdsInPids</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--inputPath</arg><arg>${inputPath}</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${outputPathExtraction}</arg>
|
|
||||||
<arg>--nameNode</arg><arg>${nameNode}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="create_actionset"/>
|
<ok to="create_actionset"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -178,7 +112,7 @@
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--inputPath</arg><arg>${outputPathExtraction}</arg>
|
<arg>--inputPath</arg><arg>${workingPath}</arg>
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"paramName": "ip",
|
|
||||||
"paramLongName": "inputPath",
|
|
||||||
"paramDescription": "the zipped opencitations file",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "op",
|
|
||||||
"paramLongName": "outputPath",
|
|
||||||
"paramDescription": "the working path",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "issm",
|
|
||||||
"paramLongName": "isSparkSessionManged",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": false
|
|
||||||
},{
|
|
||||||
"paramName": "nn",
|
|
||||||
"paramLongName": "nameNode",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": true
|
|
||||||
}
|
|
||||||
]
|
|
|
@ -1,20 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"paramName": "ip",
|
|
||||||
"paramLongName": "inputPath",
|
|
||||||
"paramDescription": "the zipped opencitations file",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "op",
|
|
||||||
"paramLongName": "outputPath",
|
|
||||||
"paramDescription": "the working path",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "issm",
|
|
||||||
"paramLongName": "isSparkSessionManaged",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": false
|
|
||||||
}
|
|
||||||
]
|
|
|
@ -1,30 +0,0 @@
|
||||||
[
|
|
||||||
|
|
||||||
{
|
|
||||||
"paramName": "issm",
|
|
||||||
"paramLongName": "isSparkSessionManaged",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "d",
|
|
||||||
"paramLongName": "delimiter",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "op",
|
|
||||||
"paramLongName": "outputPath",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "if",
|
|
||||||
"paramLongName": "inputFile",
|
|
||||||
"paramDescription": "the hdfs name node",
|
|
||||||
"paramRequired": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>jobTracker</name>
|
|
||||||
<value>yarnRM</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>nameNode</name>
|
|
||||||
<value>hdfs://nameservice1</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.use.system.libpath</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.action.sharelib.for.spark</name>
|
|
||||||
<value>spark2</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>hive_metastore_uris</name>
|
|
||||||
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2YarnHistoryServerAddress</name>
|
|
||||||
<value>http://iis-cdh5-test-gw.ocean.icm.edu.pl:18089</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2ExtraListeners</name>
|
|
||||||
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2SqlQueryExecutionListeners</name>
|
|
||||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorNumber</name>
|
|
||||||
<value>4</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>spark2EventLogDir</name>
|
|
||||||
<value>/user/spark/spark2ApplicationHistory</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkDriverMemory</name>
|
|
||||||
<value>15G</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorMemory</name>
|
|
||||||
<value>6G</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorCores</name>
|
|
||||||
<value>1</value>
|
|
||||||
</property>
|
|
||||||
</configuration>
|
|
|
@ -1,2 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
curl -L $1 | hdfs dfs -put - $2
|
|
|
@ -1,82 +0,0 @@
|
||||||
<workflow-app name="Transfomative Agreement Integration" xmlns="uri:oozie:workflow:0.5">
|
|
||||||
|
|
||||||
<global>
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>mapreduce.job.queuename</name>
|
|
||||||
<value>${queueName}</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.launcher.mapred.job.queue.name</name>
|
|
||||||
<value>${oozieLauncherQueueName}</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.action.sharelib.for.spark</name>
|
|
||||||
<value>${oozieActionShareLibForSpark2}</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
</configuration>
|
|
||||||
</global>
|
|
||||||
|
|
||||||
<start to="resume_from"/>
|
|
||||||
|
|
||||||
<decision name="resume_from">
|
|
||||||
<switch>
|
|
||||||
<case to="download">${wf:conf('resumeFrom') eq 'DownloadDump'}</case>
|
|
||||||
<default to="create_actionset"/> <!-- first action to be done when downloadDump is to be performed -->
|
|
||||||
</switch>
|
|
||||||
</decision>
|
|
||||||
|
|
||||||
<kill name="Kill">
|
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
|
||||||
</kill>
|
|
||||||
|
|
||||||
<action name="download">
|
|
||||||
<shell xmlns="uri:oozie:shell-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>mapred.job.queue.name</name>
|
|
||||||
<value>${queueName}</value>
|
|
||||||
</property>
|
|
||||||
</configuration>
|
|
||||||
<exec>download.sh</exec>
|
|
||||||
<argument>${inputFile}</argument>
|
|
||||||
<argument>${workingDir}/transformativeagreement/transformativeAgreement.json</argument>
|
|
||||||
<env-var>HADOOP_USER_NAME=${wf:user()}</env-var>
|
|
||||||
<file>download.sh</file>
|
|
||||||
<capture-output/>
|
|
||||||
</shell>
|
|
||||||
<ok to="create_actionset"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
|
|
||||||
<action name="create_actionset">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Produces the AS for the Transformative Agreement</name>
|
|
||||||
<class>eu.dnetlib.dhp.actionmanager.transformativeagreement.CreateActionSetSparkJob</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--inputPath</arg><arg>${workingDir}/transformativeagreement/</arg>
|
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="End"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
<end name="End"/>
|
|
||||||
</workflow-app>
|
|
|
@ -1,26 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"paramName": "m",
|
|
||||||
"paramLongName": "master",
|
|
||||||
"paramDescription": "the master name",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "t",
|
|
||||||
"paramLongName": "targetPath",
|
|
||||||
"paramDescription": "the target PATH of the DF tables",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "g",
|
|
||||||
"paramLongName": "graphPath",
|
|
||||||
"paramDescription": "the PATH of the current graph path",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "u",
|
|
||||||
"paramLongName": "updatePath",
|
|
||||||
"paramDescription": "the PATH of the current graph update path",
|
|
||||||
"paramRequired": true
|
|
||||||
}
|
|
||||||
]
|
|
|
@ -1,37 +0,0 @@
|
||||||
[ {
|
|
||||||
"paramName": "n",
|
|
||||||
"paramLongName": "namenode",
|
|
||||||
"paramDescription": "the Name Node URI",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "m",
|
|
||||||
"paramLongName": "master",
|
|
||||||
"paramDescription": "the master name",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "t",
|
|
||||||
"paramLongName": "targetPath",
|
|
||||||
"paramDescription": "the target PATH where download the files",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "a",
|
|
||||||
"paramLongName": "apiURL",
|
|
||||||
"paramDescription": "the URL to download the tar file",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "g",
|
|
||||||
"paramLongName": "graphPath",
|
|
||||||
"paramDescription": "the path of the input graph",
|
|
||||||
"paramRequired": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "at",
|
|
||||||
"paramLongName": "accessToken",
|
|
||||||
"paramDescription": "the accessToken to contact API",
|
|
||||||
"paramRequired": true
|
|
||||||
}
|
|
||||||
]
|
|
|
@ -16,12 +16,6 @@
|
||||||
"paramLongName": "sourcePath",
|
"paramLongName": "sourcePath",
|
||||||
"paramDescription": "the PATH of the ORCID sequence file",
|
"paramDescription": "the PATH of the ORCID sequence file",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
|
||||||
{
|
|
||||||
"paramName": "fu",
|
|
||||||
"paramLongName": "fromUpdate",
|
|
||||||
"paramDescription": "whether we have to generate table from dump or from update",
|
|
||||||
"paramRequired": false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
]
|
]
|
|
@ -1,23 +0,0 @@
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>jobTracker</name>
|
|
||||||
<value>yarnRM</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>nameNode</name>
|
|
||||||
<value>hdfs://nameservice1</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.use.system.libpath</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>oozie.action.sharelib.for.spark</name>
|
|
||||||
<value>spark2</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
|
||||||
<value>true</value>
|
|
||||||
</property>
|
|
||||||
</configuration>
|
|
|
@ -1,114 +0,0 @@
|
||||||
<workflow-app name="download_Update_ORCID" xmlns="uri:oozie:workflow:0.5">
|
|
||||||
<parameters>
|
|
||||||
<property>
|
|
||||||
<name>graphPath</name>
|
|
||||||
<description>the path to store the original ORCID dump</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>targetPath</name>
|
|
||||||
<description>the path to store the original ORCID dump</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>apiURL</name>
|
|
||||||
<value>http://74804fb637bd8e2fba5b-e0a029c2f87486cddec3b416996a6057.r3.cf1.rackcdn.com/last_modified.csv.tar</value>
|
|
||||||
<description>The URL of the update CSV list </description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>accessToken</name>
|
|
||||||
<description>The access token</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
</parameters>
|
|
||||||
|
|
||||||
<start to="startUpdate"/>
|
|
||||||
|
|
||||||
<kill name="Kill">
|
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
|
||||||
</kill>
|
|
||||||
|
|
||||||
<action name="startUpdate">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Check Latest Orcid and Download updates</name>
|
|
||||||
<class>eu.dnetlib.dhp.collection.orcid.OrcidGetUpdatesFile</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.executor.memoryOverhead=2g
|
|
||||||
--conf spark.sql.shuffle.partitions=3000
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
|
||||||
<arg>--namenode</arg><arg>${nameNode}</arg>
|
|
||||||
<arg>--graphPath</arg><arg>${graphPath}</arg>
|
|
||||||
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
|
||||||
<arg>--apiURL</arg><arg>${apiURL}</arg>
|
|
||||||
<arg>--accessToken</arg><arg>${accessToken}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="generateTables"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
|
|
||||||
<action name="generateTables">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Generate ORCID Tables</name>
|
|
||||||
<class>eu.dnetlib.dhp.collection.orcid.SparkGenerateORCIDTable</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.executor.memoryOverhead=2g
|
|
||||||
--conf spark.sql.shuffle.partitions=3000
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--sourcePath</arg><arg>${targetPath}</arg>
|
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/updateTable</arg>
|
|
||||||
<arg>--fromUpdate</arg><arg>true</arg>
|
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="updateTable"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="updateTable">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Update ORCID Tables</name>
|
|
||||||
<class>eu.dnetlib.dhp.collection.orcid.SparkApplyUpdate</class>
|
|
||||||
<jar>dhp-aggregation-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.executor.memoryOverhead=2g
|
|
||||||
--conf spark.sql.shuffle.partitions=3000
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--graphPath</arg><arg>${graphPath}</arg>
|
|
||||||
<arg>--updatePath</arg><arg>${targetPath}/updateTable</arg>
|
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/newTable</arg>
|
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="End"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
<end name="End"/>
|
|
||||||
</workflow-app>
|
|
|
@ -108,7 +108,7 @@ INSERT INTO dsm_apiparams(
|
||||||
'api_________::openaire____::base_search::dump@@acceptedNormTypes',
|
'api_________::openaire____::base_search::dump@@acceptedNormTypes',
|
||||||
'api_________::openaire____::base_search::dump',
|
'api_________::openaire____::base_search::dump',
|
||||||
'acceptedNormTypes',
|
'acceptedNormTypes',
|
||||||
'1,11,111,121,14,15,18,181,182,183,1A,6,7'
|
'1,11,111,121,13,14,15,18,181,182,183,1A,6,7'
|
||||||
);
|
);
|
||||||
|
|
||||||
COMMIT;
|
COMMIT;
|
|
@ -59,13 +59,16 @@ base_dc:global_id (I used oai:identifier)
|
||||||
base_dc:collection/text()
|
base_dc:collection/text()
|
||||||
|
|
||||||
base_dc:continent
|
base_dc:continent
|
||||||
base_dc:country
|
|
||||||
base_dc:year (I used dc:date)
|
base_dc:year (I used dc:date)
|
||||||
dc:coverage
|
dc:coverage
|
||||||
dc:language (I used base_dc:lang)
|
dc:language (I used base_dc:lang)
|
||||||
base_dc:link (I used dc:identifier)
|
base_dc:link (I used dc:identifier)
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
<xsl:variable name="varBaseNormType" select="vocabulary:clean(//base_dc:typenorm, 'base:normalized_types')" />
|
||||||
|
|
||||||
|
|
||||||
<metadata>
|
<metadata>
|
||||||
<xsl:call-template name="allElements">
|
<xsl:call-template name="allElements">
|
||||||
<xsl:with-param name="sourceElement" select="//dc:title" />
|
<xsl:with-param name="sourceElement" select="//dc:title" />
|
||||||
|
@ -108,13 +111,9 @@ base_dc:link (I used dc:identifier)
|
||||||
<xsl:with-param name="targetElement" select="'dc:format'" />
|
<xsl:with-param name="targetElement" select="'dc:format'" />
|
||||||
</xsl:call-template>
|
</xsl:call-template>
|
||||||
|
|
||||||
|
<dc:type>
|
||||||
<xsl:for-each select="//base_dc:typenorm">
|
<xsl:value-of select="$varBaseNormType" />
|
||||||
<dc:type>
|
</dc:type>
|
||||||
<xsl:value-of select="vocabulary:clean(., 'base:normalized_types')" />
|
|
||||||
</dc:type>
|
|
||||||
</xsl:for-each>
|
|
||||||
|
|
||||||
<xsl:call-template name="allElements">
|
<xsl:call-template name="allElements">
|
||||||
<xsl:with-param name="sourceElement" select="//dc:type" />
|
<xsl:with-param name="sourceElement" select="//dc:type" />
|
||||||
<xsl:with-param name="targetElement" select="'dc:type'" />
|
<xsl:with-param name="targetElement" select="'dc:type'" />
|
||||||
|
@ -162,150 +161,16 @@ base_dc:link (I used dc:identifier)
|
||||||
</xsl:if>
|
</xsl:if>
|
||||||
</xsl:for-each>
|
</xsl:for-each>
|
||||||
|
|
||||||
<xsl:choose>
|
<dr:CobjCategory>
|
||||||
<!-- I used an inline mapping because the field typenorm could be repeated and I have to specify a list of priority -->
|
<xsl:variable name="varCobjCategory" select="vocabulary:clean($varBaseNormType, 'dnet:publication_resource')" />
|
||||||
|
<xsl:variable name="varSuperType" select="vocabulary:clean($varCobjCategory, 'dnet:result_typologies')" />
|
||||||
<!-- Book part -->
|
<xsl:attribute name="type" select="$varSuperType" />
|
||||||
<xsl:when test="//base_dc:typenorm = '111'">
|
<xsl:value-of select="$varCobjCategory" />
|
||||||
<dr:CobjCategory type="publication">0013</dr:CobjCategory>
|
</dr:CobjCategory>
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Book -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '11'">
|
|
||||||
<dr:CobjCategory type="publication">0002</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Article contribution -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '121'">
|
|
||||||
<dr:CobjCategory type="publication">0001</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Journal/Newspaper -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '12'">
|
|
||||||
<dr:CobjCategory type="publication">0043</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Report -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '14'">
|
|
||||||
<dr:CobjCategory type="publication">0017</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Review -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '15'">
|
|
||||||
<dr:CobjCategory type="publication">0015</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Lecture -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '17'">
|
|
||||||
<dr:CobjCategory type="publication">0010</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Bachelor's thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '181'">
|
|
||||||
<dr:CobjCategory type="publication">0008</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Master's thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '182'">
|
|
||||||
<dr:CobjCategory type="publication">0007</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Doctoral and postdoctoral thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '183'">
|
|
||||||
<dr:CobjCategory type="publication">0006</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '18'">
|
|
||||||
<dr:CobjCategory type="publication">0044</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Patent -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '1A'">
|
|
||||||
<dr:CobjCategory type="publication">0019</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Text -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '1'">
|
|
||||||
<dr:CobjCategory type="publication">0001</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Software -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '6'">
|
|
||||||
<dr:CobjCategory type="software">0029</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Dataset -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '7'">
|
|
||||||
<dr:CobjCategory type="dataset">0021</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Still image -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '51'">
|
|
||||||
<dr:CobjCategory type="other">0025</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Moving image/Video -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '52'">
|
|
||||||
<dr:CobjCategory type="other">0024</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Image/Video -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '5'">
|
|
||||||
<dr:CobjCategory type="other">0033</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Audio -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '4'">
|
|
||||||
<dr:CobjCategory type="other">0030</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Musical notation -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '2'">
|
|
||||||
<dr:CobjCategory type="other">0020</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Map -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '3'">
|
|
||||||
<dr:CobjCategory type="other">0020</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Other non-article -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '122'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Course material -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '16'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Manuscript -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '19'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Conference object -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '13'">
|
|
||||||
<dr:CobjCategory type="publication">0004</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Unknown -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = 'F'">
|
|
||||||
<dr:CobjCategory type="other">0000</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
<xsl:otherwise>
|
|
||||||
<dr:CobjCategory type="other">0000</dr:CobjCategory>
|
|
||||||
</xsl:otherwise>
|
|
||||||
</xsl:choose>
|
|
||||||
|
|
||||||
|
|
||||||
<oaf:accessrights>
|
<oaf:accessrights>
|
||||||
<xsl:choose>
|
<xsl:choose>
|
||||||
<xsl:when test="//base_dc:oa[.='0']">CLOSED</xsl:when>
|
|
||||||
<xsl:when test="//base_dc:oa[.='1']">OPEN</xsl:when>
|
<xsl:when test="//base_dc:oa[.='1']">OPEN</xsl:when>
|
||||||
<xsl:when test="//base_dc:oa[.='2']">UNKNOWN</xsl:when>
|
|
||||||
<xsl:when test="//base_dc:rightsnorm">
|
<xsl:when test="//base_dc:rightsnorm">
|
||||||
<xsl:value-of select="vocabulary:clean(//base_dc:rightsnorm, 'dnet:access_modes')" />
|
<xsl:value-of select="vocabulary:clean(//base_dc:rightsnorm, 'dnet:access_modes')" />
|
||||||
</xsl:when>
|
</xsl:when>
|
||||||
|
@ -342,7 +207,7 @@ base_dc:link (I used dc:identifier)
|
||||||
|
|
||||||
<oaf:identifier identifierType="oai-original">
|
<oaf:identifier identifierType="oai-original">
|
||||||
<xsl:value-of
|
<xsl:value-of
|
||||||
select="//oai:header/oai:identifier" />
|
select="//*[local-name() = 'about']/*[local-name() = 'provenance']//*[local-name() = 'originDescription' and not(./*[local-name() = 'originDescription'])]/*[local-name() = 'identifier']" />
|
||||||
</oaf:identifier>
|
</oaf:identifier>
|
||||||
|
|
||||||
<oaf:hostedBy>
|
<oaf:hostedBy>
|
||||||
|
@ -388,8 +253,13 @@ base_dc:link (I used dc:identifier)
|
||||||
<xsl:value-of select="concat('ror_________::https://ror.org/', normalize-space(.))" />
|
<xsl:value-of select="concat('ror_________::https://ror.org/', normalize-space(.))" />
|
||||||
</xsl:otherwise>
|
</xsl:otherwise>
|
||||||
</xsl:choose>
|
</xsl:choose>
|
||||||
</oaf:relation>
|
</oaf:relation>
|
||||||
</xsl:for-each>
|
</xsl:for-each>
|
||||||
|
|
||||||
|
<xsl:for-each select="//base_dc:country">
|
||||||
|
<oaf:country><xsl:value-of select="vocabulary:clean(., 'dnet:countries')" /></oaf:country>
|
||||||
|
</xsl:for-each>
|
||||||
|
|
||||||
</metadata>
|
</metadata>
|
||||||
<xsl:copy-of select="//*[local-name() = 'about']" />
|
<xsl:copy-of select="//*[local-name() = 'about']" />
|
||||||
</record>
|
</record>
|
||||||
|
|
|
@ -42,7 +42,6 @@
|
||||||
base_dc:global_id (I used oai:identifier)
|
base_dc:global_id (I used oai:identifier)
|
||||||
base_dc:collection/text()
|
base_dc:collection/text()
|
||||||
base_dc:continent
|
base_dc:continent
|
||||||
base_dc:country
|
|
||||||
dc:coverage
|
dc:coverage
|
||||||
dc:source
|
dc:source
|
||||||
dc:relation
|
dc:relation
|
||||||
|
@ -51,6 +50,10 @@
|
||||||
base_dc:link (I used dc:identifier)
|
base_dc:link (I used dc:identifier)
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
<xsl:variable name="varBaseNormType" select="vocabulary:clean(//base_dc:typenorm, 'base:normalized_types')" />
|
||||||
|
|
||||||
|
|
||||||
<metadata>
|
<metadata>
|
||||||
<datacite:resource>
|
<datacite:resource>
|
||||||
|
|
||||||
|
@ -82,17 +85,14 @@
|
||||||
|
|
||||||
<datacite:identifier alternateIdentifierType="oai-original">
|
<datacite:identifier alternateIdentifierType="oai-original">
|
||||||
<xsl:value-of
|
<xsl:value-of
|
||||||
select="//oai:header/oai:identifier" />
|
select="//*[local-name() = 'about']/*[local-name() = 'provenance']//*[local-name() = 'originDescription' and not(./*[local-name() = 'originDescription'])]/*[local-name() = 'identifier']" />
|
||||||
</datacite:identifier>
|
</datacite:identifier>
|
||||||
|
|
||||||
</datacite:alternateIdentifiers>
|
</datacite:alternateIdentifiers>
|
||||||
|
|
||||||
<datacite:relatedIdentifiers />
|
<datacite:relatedIdentifiers />
|
||||||
|
|
||||||
|
|
||||||
<xsl:for-each select="//base_dc:typenorm">
|
<datacite:resourceType><xsl:value-of select="$varBaseNormType" /></datacite:resourceType>
|
||||||
<datacite:resourceType><xsl:value-of select="vocabulary:clean(., 'base:normalized_types')" /></datacite:resourceType>
|
|
||||||
</xsl:for-each>
|
|
||||||
|
|
||||||
<datacite:titles>
|
<datacite:titles>
|
||||||
<xsl:for-each select="//dc:title">
|
<xsl:for-each select="//dc:title">
|
||||||
|
@ -153,18 +153,14 @@
|
||||||
</xsl:for-each>
|
</xsl:for-each>
|
||||||
</datacite:subjects>
|
</datacite:subjects>
|
||||||
|
|
||||||
<xsl:for-each select="//dc:publisher">
|
<datacite:publisher>
|
||||||
<datacite:publisher>
|
<xsl:value-of select="normalize-space(//dc:publisher)" />
|
||||||
<xsl:value-of select="normalize-space(.)" />
|
</datacite:publisher>
|
||||||
</datacite:publisher>
|
|
||||||
</xsl:for-each>
|
<datacite:publicationYear>
|
||||||
|
<xsl:value-of select="normalize-space(//base_dc:year)" />
|
||||||
|
</datacite:publicationYear>
|
||||||
|
|
||||||
<xsl:for-each select="//base_dc:year">
|
|
||||||
<datacite:publicationYear>
|
|
||||||
<xsl:value-of select="normalize-space(.)" />
|
|
||||||
</datacite:publicationYear>
|
|
||||||
</xsl:for-each>
|
|
||||||
|
|
||||||
<datacite:formats>
|
<datacite:formats>
|
||||||
<xsl:for-each select="//dc:format">
|
<xsl:for-each select="//dc:format">
|
||||||
<datacite:format>
|
<datacite:format>
|
||||||
|
@ -178,9 +174,6 @@
|
||||||
</datacite:language>
|
</datacite:language>
|
||||||
|
|
||||||
<oaf:accessrights>
|
<oaf:accessrights>
|
||||||
<xsl:if test="//base_dc:oa[.='0']">
|
|
||||||
<datacite:rights rightsURI="http://purl.org/coar/access_right/c_16ec">restricted access</datacite:rights>
|
|
||||||
</xsl:if>
|
|
||||||
<xsl:if test="//base_dc:oa[.='1']">
|
<xsl:if test="//base_dc:oa[.='1']">
|
||||||
<datacite:rights rightsURI="http://purl.org/coar/access_right/c_abf2">open access</datacite:rights>
|
<datacite:rights rightsURI="http://purl.org/coar/access_right/c_abf2">open access</datacite:rights>
|
||||||
</xsl:if>
|
</xsl:if>
|
||||||
|
@ -191,162 +184,29 @@
|
||||||
|
|
||||||
</datacite:resource>
|
</datacite:resource>
|
||||||
|
|
||||||
<xsl:for-each select="//dc:relation">
|
<xsl:for-each select="//dc:relation">
|
||||||
<xsl:if test="matches(normalize-space(.), '(info:eu-repo/grantagreement/ec/fp7/)(\d\d\d\d\d\d)(.*)', 'i')">
|
<xsl:if test="matches(normalize-space(.), '(info:eu-repo/grantagreement/ec/fp7/)(\d\d\d\d\d\d)(.*)', 'i')">
|
||||||
<oaf:projectid>
|
<oaf:projectid>
|
||||||
<xsl:value-of select="concat($varFP7, replace(normalize-space(.), '(info:eu-repo/grantagreement/ec/fp7/)(\d\d\d\d\d\d)(.*)', '$2', 'i'))" />
|
<xsl:value-of select="concat($varFP7, replace(normalize-space(.), '(info:eu-repo/grantagreement/ec/fp7/)(\d\d\d\d\d\d)(.*)', '$2', 'i'))" />
|
||||||
</oaf:projectid>
|
</oaf:projectid>
|
||||||
</xsl:if>
|
</xsl:if>
|
||||||
<xsl:if test="matches(normalize-space(.), '(info:eu-repo/grantagreement/ec/h2020/)(\d\d\d\d\d\d)(.*)', 'i')">
|
<xsl:if test="matches(normalize-space(.), '(info:eu-repo/grantagreement/ec/h2020/)(\d\d\d\d\d\d)(.*)', 'i')">
|
||||||
<oaf:projectid>
|
<oaf:projectid>
|
||||||
<xsl:value-of select="concat($varH2020, replace(normalize-space(.), '(info:eu-repo/grantagreement/ec/h2020/)(\d\d\d\d\d\d)(.*)', '$2', 'i'))" />
|
<xsl:value-of select="concat($varH2020, replace(normalize-space(.), '(info:eu-repo/grantagreement/ec/h2020/)(\d\d\d\d\d\d)(.*)', '$2', 'i'))" />
|
||||||
</oaf:projectid>
|
</oaf:projectid>
|
||||||
</xsl:if>
|
</xsl:if>
|
||||||
</xsl:for-each>
|
</xsl:for-each>
|
||||||
|
|
||||||
<xsl:choose>
|
<dr:CobjCategory>
|
||||||
<!-- I used an inline mapping because the field typenorm could be repeated and I have to specify a list of priority -->
|
<xsl:variable name="varCobjCategory" select="vocabulary:clean($varBaseNormType, 'dnet:publication_resource')" />
|
||||||
|
<xsl:variable name="varSuperType" select="vocabulary:clean($varCobjCategory, 'dnet:result_typologies')" />
|
||||||
<!-- Book part -->
|
<xsl:attribute name="type" select="$varSuperType" />
|
||||||
<xsl:when test="//base_dc:typenorm = '111'">
|
<xsl:value-of select="$varCobjCategory" />
|
||||||
<dr:CobjCategory type="publication">0013</dr:CobjCategory>
|
</dr:CobjCategory>
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Book -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '11'">
|
|
||||||
<dr:CobjCategory type="publication">0002</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Article contribution -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '121'">
|
|
||||||
<dr:CobjCategory type="publication">0001</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Journal/Newspaper -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '12'">
|
|
||||||
<dr:CobjCategory type="publication">0043</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Report -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '14'">
|
|
||||||
<dr:CobjCategory type="publication">0017</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Review -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '15'">
|
|
||||||
<dr:CobjCategory type="publication">0015</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Lecture -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '17'">
|
|
||||||
<dr:CobjCategory type="publication">0010</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Bachelor's thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '181'">
|
|
||||||
<dr:CobjCategory type="publication">0008</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Master's thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '182'">
|
|
||||||
<dr:CobjCategory type="publication">0007</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Doctoral and postdoctoral thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '183'">
|
|
||||||
<dr:CobjCategory type="publication">0006</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Thesis -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '18'">
|
|
||||||
<dr:CobjCategory type="publication">0044</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Patent -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '1A'">
|
|
||||||
<dr:CobjCategory type="publication">0019</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Text -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '1'">
|
|
||||||
<dr:CobjCategory type="publication">0001</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Software -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '6'">
|
|
||||||
<dr:CobjCategory type="software">0029</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Dataset -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '7'">
|
|
||||||
<dr:CobjCategory type="dataset">0021</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Still image -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '51'">
|
|
||||||
<dr:CobjCategory type="other">0025</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Moving image/Video -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '52'">
|
|
||||||
<dr:CobjCategory type="other">0024</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Image/Video -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '5'">
|
|
||||||
<dr:CobjCategory type="other">0033</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Audio -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '4'">
|
|
||||||
<dr:CobjCategory type="other">0030</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Musical notation -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '2'">
|
|
||||||
<dr:CobjCategory type="other">0020</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Map -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '3'">
|
|
||||||
<dr:CobjCategory type="other">0020</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Other non-article -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '122'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Course material -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '16'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Manuscript -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '19'">
|
|
||||||
<dr:CobjCategory type="publication">0038</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Conference object -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = '13'">
|
|
||||||
<dr:CobjCategory type="publication">0004</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
|
|
||||||
<!-- Unknown -->
|
|
||||||
<xsl:when test="//base_dc:typenorm = 'F'">
|
|
||||||
<dr:CobjCategory type="other">0000</dr:CobjCategory>
|
|
||||||
</xsl:when>
|
|
||||||
<xsl:otherwise>
|
|
||||||
<dr:CobjCategory type="other">0000</dr:CobjCategory>
|
|
||||||
</xsl:otherwise>
|
|
||||||
</xsl:choose>
|
|
||||||
|
|
||||||
<oaf:accessrights>
|
<oaf:accessrights>
|
||||||
<xsl:choose>
|
<xsl:choose>
|
||||||
<xsl:when test="//base_dc:oa[.='0']">CLOSED</xsl:when>
|
|
||||||
<xsl:when test="//base_dc:oa[.='1']">OPEN</xsl:when>
|
<xsl:when test="//base_dc:oa[.='1']">OPEN</xsl:when>
|
||||||
<xsl:when test="//base_dc:oa[.='2']">UNKNOWN</xsl:when>
|
|
||||||
<xsl:when test="//base_dc:rightsnorm">
|
<xsl:when test="//base_dc:rightsnorm">
|
||||||
<xsl:value-of select="vocabulary:clean(//base_dc:rightsnorm, 'dnet:access_modes')" />
|
<xsl:value-of select="vocabulary:clean(//base_dc:rightsnorm, 'dnet:access_modes')" />
|
||||||
</xsl:when>
|
</xsl:when>
|
||||||
|
@ -384,7 +244,7 @@
|
||||||
|
|
||||||
<oaf:identifier identifierType="oai-original">
|
<oaf:identifier identifierType="oai-original">
|
||||||
<xsl:value-of
|
<xsl:value-of
|
||||||
select="//oai:header/oai:identifier" />
|
select="//*[local-name() = 'about']/*[local-name() = 'provenance']//*[local-name() = 'originDescription' and not(./*[local-name() = 'originDescription'])]/*[local-name() = 'identifier']" />
|
||||||
</oaf:identifier>
|
</oaf:identifier>
|
||||||
|
|
||||||
<oaf:hostedBy>
|
<oaf:hostedBy>
|
||||||
|
@ -429,6 +289,11 @@
|
||||||
</xsl:choose>
|
</xsl:choose>
|
||||||
</oaf:relation>
|
</oaf:relation>
|
||||||
</xsl:for-each>
|
</xsl:for-each>
|
||||||
|
|
||||||
|
<xsl:for-each select="//base_dc:country">
|
||||||
|
<oaf:country><xsl:value-of select="vocabulary:clean(., 'dnet:countries')" /></oaf:country>
|
||||||
|
</xsl:for-each>
|
||||||
|
|
||||||
</metadata>
|
</metadata>
|
||||||
<xsl:copy-of select="//*[local-name() = 'about']" />
|
<xsl:copy-of select="//*[local-name() = 'about']" />
|
||||||
</record>
|
</record>
|
||||||
|
|
|
@ -1048,5 +1048,10 @@
|
||||||
"openaire_id": "re3data_____::r3d100010399",
|
"openaire_id": "re3data_____::r3d100010399",
|
||||||
"datacite_name": "ZEW Forschungsdatenzentrum",
|
"datacite_name": "ZEW Forschungsdatenzentrum",
|
||||||
"official_name": "ZEW Forschungsdatenzentrum"
|
"official_name": "ZEW Forschungsdatenzentrum"
|
||||||
|
},
|
||||||
|
"HBP.NEUROINF": {
|
||||||
|
"openaire_id": "fairsharing_::2975",
|
||||||
|
"datacite_name": "EBRAINS",
|
||||||
|
"official_name": "EBRAINS"
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
<workflow-app name="Transform_BioEntity_Workflow" xmlns="uri:oozie:workflow:0.5">
|
<workflow-app name="Transform_BioEntity_Workflow" xmlns="uri:oozie:workflow:0.5">
|
||||||
<parameters>
|
<parameters>
|
||||||
<property>
|
<property>
|
||||||
<name>sourcePath</name>
|
<name>sourcePath</name>
|
||||||
|
@ -8,19 +8,40 @@
|
||||||
<name>database</name>
|
<name>database</name>
|
||||||
<description>the PDB Database Working Path</description>
|
<description>the PDB Database Working Path</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>targetPath</name>
|
<name>mdStoreOutputId</name>
|
||||||
<description>the Target Working dir path</description>
|
<description>the identifier of the cleaned MDStore</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>mdStoreManagerURI</name>
|
||||||
|
<description>the path of the cleaned mdstore</description>
|
||||||
</property>
|
</property>
|
||||||
</parameters>
|
</parameters>
|
||||||
|
|
||||||
<start to="ConvertDB"/>
|
<start to="StartTransaction"/>
|
||||||
|
|
||||||
|
|
||||||
<kill name="Kill">
|
<kill name="Kill">
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
</kill>
|
</kill>
|
||||||
|
|
||||||
|
<action name="StartTransaction">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>NEW_VERSION</arg>
|
||||||
|
<arg>--mdStoreID</arg><arg>${mdStoreOutputId}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
<capture-output/>
|
||||||
|
</java>
|
||||||
|
<ok to="ConvertDB"/>
|
||||||
|
<error to="RollBack"/>
|
||||||
|
</action>
|
||||||
<action name="ConvertDB">
|
<action name="ConvertDB">
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
<master>yarn</master>
|
<master>yarn</master>
|
||||||
|
@ -41,11 +62,48 @@
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
<arg>--dbPath</arg><arg>${sourcePath}</arg>
|
<arg>--dbPath</arg><arg>${sourcePath}</arg>
|
||||||
<arg>--database</arg><arg>${database}</arg>
|
<arg>--database</arg><arg>${database}</arg>
|
||||||
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
<arg>--mdstoreOutputVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="CommitVersion"/>
|
||||||
<error to="Kill"/>
|
<error to="RollBack"/>
|
||||||
|
|
||||||
</action>
|
</action>
|
||||||
<end name="End"/>
|
<action name="CommitVersion">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>COMMIT</arg>
|
||||||
|
<arg>--namenode</arg><arg>${nameNode}</arg>
|
||||||
|
<arg>--mdStoreVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
</java>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="RollBack">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>ROLLBACK</arg>
|
||||||
|
<arg>--mdStoreVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
</java>
|
||||||
|
<ok to="Kill"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
<end name="End"/>
|
||||||
|
|
||||||
</workflow-app>
|
</workflow-app>
|
|
@ -2,5 +2,5 @@
|
||||||
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
||||||
{"paramName":"db", "paramLongName":"database", "paramDescription": "should be PDB or UNIPROT", "paramRequired": true},
|
{"paramName":"db", "paramLongName":"database", "paramDescription": "should be PDB or UNIPROT", "paramRequired": true},
|
||||||
{"paramName":"p", "paramLongName":"dbPath", "paramDescription": "the path of the database to transform", "paramRequired": true},
|
{"paramName":"p", "paramLongName":"dbPath", "paramDescription": "the path of the database to transform", "paramRequired": true},
|
||||||
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "the OAF target path ", "paramRequired": true}
|
{"paramName":"mo", "paramLongName":"mdstoreOutputVersion", "paramDescription": "the oaf path ", "paramRequired": true}
|
||||||
]
|
]
|
|
@ -1,5 +1,20 @@
|
||||||
[
|
[
|
||||||
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
{
|
||||||
{"paramName":"s", "paramLongName":"sourcePath","paramDescription": "the source Path", "paramRequired": true},
|
"paramName": "mt",
|
||||||
{"paramName":"t", "paramLongName":"targetPath","paramDescription": "the oaf path ", "paramRequired": true}
|
"paramLongName": "master",
|
||||||
|
"paramDescription": "should be local or yarn",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "s",
|
||||||
|
"paramLongName": "sourcePath",
|
||||||
|
"paramDescription": "the source Path",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "mo",
|
||||||
|
"paramLongName": "mdstoreOutputVersion",
|
||||||
|
"paramDescription": "the oaf path ",
|
||||||
|
"paramRequired": true
|
||||||
|
}
|
||||||
]
|
]
|
|
@ -9,34 +9,26 @@
|
||||||
<description>the Working Path</description>
|
<description>the Working Path</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>targetPath</name>
|
<name>mdStoreOutputId</name>
|
||||||
<description>the OAF MDStore Path</description>
|
<description>the identifier of the cleaned MDStore</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>sparkDriverMemory</name>
|
<name>mdStoreManagerURI</name>
|
||||||
<description>memory for driver process</description>
|
<description>the path of the cleaned mdstore</description>
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorMemory</name>
|
|
||||||
<description>memory for individual executor</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorCores</name>
|
|
||||||
<description>number of cores used by single executor</description>
|
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>resumeFrom</name>
|
<name>resumeFrom</name>
|
||||||
<value>DownloadEBILinks</value>
|
<value>CreateEBIDataSet</value>
|
||||||
<description>node to start</description>
|
<description>node to start</description>
|
||||||
</property>
|
</property>
|
||||||
</parameters>
|
</parameters>
|
||||||
|
|
||||||
<start to="resume_from"/>
|
<start to="StartTransaction"/>
|
||||||
|
|
||||||
<decision name="resume_from">
|
<decision name="resume_from">
|
||||||
<switch>
|
<switch>
|
||||||
<case to="DownloadEBILinks">${wf:conf('resumeFrom') eq 'DownloadEBILinks'}</case>
|
<case to="DownloadEBILinks">${wf:conf('resumeFrom') eq 'DownloadEBILinks'}</case>
|
||||||
<case to="CreateEBIDataSet">${wf:conf('resumeFrom') eq 'CreateEBIDataSet'}</case>
|
<case to="StartTransaction">${wf:conf('resumeFrom') eq 'CreateEBIDataSet'}</case>
|
||||||
<default to="DownloadEBILinks"/>
|
<default to="DownloadEBILinks"/>
|
||||||
</switch>
|
</switch>
|
||||||
</decision>
|
</decision>
|
||||||
|
@ -77,9 +69,29 @@
|
||||||
<move source="${sourcePath}/ebi_links_dataset" target="${sourcePath}/ebi_links_dataset_old"/>
|
<move source="${sourcePath}/ebi_links_dataset" target="${sourcePath}/ebi_links_dataset_old"/>
|
||||||
<move source="${workingPath}/links_final" target="${sourcePath}/ebi_links_dataset"/>
|
<move source="${workingPath}/links_final" target="${sourcePath}/ebi_links_dataset"/>
|
||||||
</fs>
|
</fs>
|
||||||
<ok to="CreateEBIDataSet"/>
|
<ok to="StartTransaction"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
<action name="StartTransaction">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>NEW_VERSION</arg>
|
||||||
|
<arg>--mdStoreID</arg><arg>${mdStoreOutputId}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
<capture-output/>
|
||||||
|
</java>
|
||||||
|
<ok to="CreateEBIDataSet"/>
|
||||||
|
<error to="RollBack"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
|
||||||
<action name="CreateEBIDataSet">
|
<action name="CreateEBIDataSet">
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
<master>yarn-cluster</master>
|
<master>yarn-cluster</master>
|
||||||
|
@ -95,11 +107,49 @@
|
||||||
${sparkExtraOPT}
|
${sparkExtraOPT}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--sourcePath</arg><arg>${sourcePath}/ebi_links_dataset</arg>
|
<arg>--sourcePath</arg><arg>${sourcePath}/ebi_links_dataset</arg>
|
||||||
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
<arg>--mdstoreOutputVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
<action name="CommitVersion">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>COMMIT</arg>
|
||||||
|
<arg>--namenode</arg><arg>${nameNode}</arg>
|
||||||
|
<arg>--mdStoreVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
</java>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="RollBack">
|
||||||
|
<java>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
<main-class>eu.dnetlib.dhp.aggregation.mdstore.MDStoreActionNode</main-class>
|
||||||
|
<arg>--action</arg><arg>ROLLBACK</arg>
|
||||||
|
<arg>--mdStoreVersion</arg><arg>${wf:actionData('StartTransaction')['mdStoreVersion']}</arg>
|
||||||
|
<arg>--mdStoreManagerURI</arg><arg>${mdStoreManagerURI}</arg>
|
||||||
|
</java>
|
||||||
|
<ok to="Kill"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
|
|
||||||
</workflow-app>
|
</workflow-app>
|
|
@ -1,120 +0,0 @@
|
||||||
package eu.dnetlib.dhp.collection.orcid
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.AbstractScalaApplication
|
|
||||||
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
|
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
|
||||||
|
|
||||||
class SparkApplyUpdate(propertyPath: String, args: Array[String], log: Logger)
|
|
||||||
extends AbstractScalaApplication(propertyPath, args, log: Logger) {
|
|
||||||
|
|
||||||
/** Here all the spark applications runs this method
|
|
||||||
* where the whole logic of the spark node is defined
|
|
||||||
*/
|
|
||||||
override def run(): Unit = {
|
|
||||||
|
|
||||||
val graphPath: String = parser.get("graphPath")
|
|
||||||
log.info("found parameters graphPath: {}", graphPath)
|
|
||||||
val updatePath: String = parser.get("updatePath")
|
|
||||||
log.info("found parameters updatePath: {}", updatePath)
|
|
||||||
val targetPath: String = parser.get("targetPath")
|
|
||||||
log.info("found parameters targetPath: {}", targetPath)
|
|
||||||
applyTableUpdate(spark, graphPath, updatePath, targetPath)
|
|
||||||
checkUpdate(spark, graphPath, targetPath)
|
|
||||||
moveTable(spark, graphPath, targetPath)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private def moveTable(spark: SparkSession, graphPath: String, updatePath: String): Unit = {
|
|
||||||
spark.read
|
|
||||||
.load(s"$updatePath/Authors")
|
|
||||||
.repartition(1000)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$graphPath/Authors")
|
|
||||||
spark.read
|
|
||||||
.load(s"$updatePath/Works")
|
|
||||||
.repartition(1000)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$graphPath/Works")
|
|
||||||
spark.read
|
|
||||||
.load(s"$updatePath/Employments")
|
|
||||||
.repartition(1000)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$graphPath/Employments")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private def updateDataset(
|
|
||||||
inputDataset: DataFrame,
|
|
||||||
idUpdate: DataFrame,
|
|
||||||
updateDataframe: DataFrame,
|
|
||||||
targetPath: String
|
|
||||||
): Unit = {
|
|
||||||
inputDataset
|
|
||||||
.join(idUpdate, inputDataset("orcid").equalTo(idUpdate("orcid")), "leftanti")
|
|
||||||
.select(inputDataset("*"))
|
|
||||||
.unionByName(updateDataframe)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(targetPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
private def checkUpdate(spark: SparkSession, graphPath: String, updatePath: String): Unit = {
|
|
||||||
val totalOriginalAuthors = spark.read.load(s"$graphPath/Authors").count
|
|
||||||
val totalOriginalWorks = spark.read.load(s"$graphPath/Works").count
|
|
||||||
val totalOriginalEmployments = spark.read.load(s"$graphPath/Employments").count
|
|
||||||
val totalUpdateAuthors = spark.read.load(s"$updatePath/Authors").count
|
|
||||||
val totalUpdateWorks = spark.read.load(s"$updatePath/Works").count
|
|
||||||
val totalUpdateEmployments = spark.read.load(s"$updatePath/Employments").count
|
|
||||||
|
|
||||||
log.info("totalOriginalAuthors: {}", totalOriginalAuthors)
|
|
||||||
log.info("totalOriginalWorks: {}", totalOriginalWorks)
|
|
||||||
log.info("totalOriginalEmployments: {}", totalOriginalEmployments)
|
|
||||||
log.info("totalUpdateAuthors: {}", totalUpdateAuthors)
|
|
||||||
log.info("totalUpdateWorks: {}", totalUpdateWorks)
|
|
||||||
log.info("totalUpdateEmployments: {}", totalUpdateEmployments)
|
|
||||||
if (
|
|
||||||
totalUpdateAuthors < totalOriginalAuthors || totalUpdateEmployments < totalOriginalEmployments || totalUpdateWorks < totalOriginalWorks
|
|
||||||
)
|
|
||||||
throw new RuntimeException("The updated Graph contains less elements of the original one")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private def applyTableUpdate(spark: SparkSession, graphPath: String, updatePath: String, targetPath: String): Unit = {
|
|
||||||
val orcidIDUpdate = spark.read.load(s"$updatePath/Authors").select("orcid")
|
|
||||||
updateDataset(
|
|
||||||
spark.read.load(s"$graphPath/Authors"),
|
|
||||||
orcidIDUpdate,
|
|
||||||
spark.read.load(s"$updatePath/Authors"),
|
|
||||||
s"$targetPath/Authors"
|
|
||||||
)
|
|
||||||
updateDataset(
|
|
||||||
spark.read.load(s"$graphPath/Employments"),
|
|
||||||
orcidIDUpdate,
|
|
||||||
spark.read.load(s"$updatePath/Employments"),
|
|
||||||
s"$targetPath/Employments"
|
|
||||||
)
|
|
||||||
updateDataset(
|
|
||||||
spark.read.load(s"$graphPath/Works"),
|
|
||||||
orcidIDUpdate,
|
|
||||||
spark.read.load(s"$updatePath/Works"),
|
|
||||||
s"$targetPath/Works"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
object SparkApplyUpdate {
|
|
||||||
|
|
||||||
val log: Logger = LoggerFactory.getLogger(SparkGenerateORCIDTable.getClass)
|
|
||||||
|
|
||||||
def main(args: Array[String]): Unit = {
|
|
||||||
|
|
||||||
new SparkApplyUpdate("/eu/dnetlib/dhp/collection/orcid/apply_orcid_table_parameter.json", args, log)
|
|
||||||
.initialize()
|
|
||||||
.run()
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -6,7 +6,6 @@ import org.apache.hadoop.io.Text
|
||||||
import org.apache.spark.SparkContext
|
import org.apache.spark.SparkContext
|
||||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
import scala.collection.JavaConverters._
|
|
||||||
|
|
||||||
class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Logger)
|
class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Logger)
|
||||||
extends AbstractScalaApplication(propertyPath, args, log: Logger) {
|
extends AbstractScalaApplication(propertyPath, args, log: Logger) {
|
||||||
|
@ -19,16 +18,12 @@ class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Lo
|
||||||
log.info("found parameters sourcePath: {}", sourcePath)
|
log.info("found parameters sourcePath: {}", sourcePath)
|
||||||
val targetPath: String = parser.get("targetPath")
|
val targetPath: String = parser.get("targetPath")
|
||||||
log.info("found parameters targetPath: {}", targetPath)
|
log.info("found parameters targetPath: {}", targetPath)
|
||||||
val fromUpdate = "true".equals(parser.get("fromUpdate"))
|
extractORCIDTable(spark, sourcePath, targetPath)
|
||||||
val sourceSummaryPath = if (fromUpdate) s"$sourcePath/summary*" else sourcePath
|
extractORCIDEmploymentsTable(spark, sourcePath, targetPath)
|
||||||
val sourceEmploymentsPath = if (fromUpdate) s"$sourcePath/employments*" else sourcePath
|
extractORCIDWorksTable(spark, sourcePath, targetPath)
|
||||||
val sourceWorksPath = if (fromUpdate) s"$sourcePath/works*" else sourcePath
|
|
||||||
extractORCIDTable(spark, sourceSummaryPath, targetPath, fromUpdate)
|
|
||||||
extractORCIDEmploymentsTable(spark, sourceEmploymentsPath, targetPath, fromUpdate)
|
|
||||||
extractORCIDWorksTable(spark, sourceWorksPath, targetPath, fromUpdate)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def extractORCIDTable(spark: SparkSession, sourcePath: String, targetPath: String, skipFilterByKey: Boolean): Unit = {
|
def extractORCIDTable(spark: SparkSession, sourcePath: String, targetPath: String): Unit = {
|
||||||
val sc: SparkContext = spark.sparkContext
|
val sc: SparkContext = spark.sparkContext
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
val df = sc
|
val df = sc
|
||||||
|
@ -37,8 +32,8 @@ class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Lo
|
||||||
.toDF
|
.toDF
|
||||||
.as[(String, String)]
|
.as[(String, String)]
|
||||||
implicit val orcidAuthor: Encoder[Author] = Encoders.bean(classOf[Author])
|
implicit val orcidAuthor: Encoder[Author] = Encoders.bean(classOf[Author])
|
||||||
val newDf = if (!skipFilterByKey) df.filter(r => r._1.contains("summaries")) else df
|
// implicit val orcidPID:Encoder[Pid] = Encoders.bean(classOf[Pid])
|
||||||
newDf
|
df.filter(r => r._1.contains("summaries"))
|
||||||
.map { r =>
|
.map { r =>
|
||||||
val p = new OrcidParser
|
val p = new OrcidParser
|
||||||
p.parseSummary(r._2)
|
p.parseSummary(r._2)
|
||||||
|
@ -49,12 +44,7 @@ class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Lo
|
||||||
.save(s"$targetPath/Authors")
|
.save(s"$targetPath/Authors")
|
||||||
}
|
}
|
||||||
|
|
||||||
def extractORCIDWorksTable(
|
def extractORCIDWorksTable(spark: SparkSession, sourcePath: String, targetPath: String): Unit = {
|
||||||
spark: SparkSession,
|
|
||||||
sourcePath: String,
|
|
||||||
targetPath: String,
|
|
||||||
skipFilterByKey: Boolean
|
|
||||||
): Unit = {
|
|
||||||
val sc: SparkContext = spark.sparkContext
|
val sc: SparkContext = spark.sparkContext
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
val df = sc
|
val df = sc
|
||||||
|
@ -63,37 +53,19 @@ class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Lo
|
||||||
.toDF
|
.toDF
|
||||||
.as[(String, String)]
|
.as[(String, String)]
|
||||||
implicit val orcidWorkAuthor: Encoder[Work] = Encoders.bean(classOf[Work])
|
implicit val orcidWorkAuthor: Encoder[Work] = Encoders.bean(classOf[Work])
|
||||||
|
implicit val orcidPID: Encoder[Pid] = Encoders.bean(classOf[Pid])
|
||||||
//We are in the case of parsing ORCID UPDATE
|
df.filter(r => r._1.contains("works"))
|
||||||
if (skipFilterByKey) {
|
.map { r =>
|
||||||
df.flatMap { r =>
|
|
||||||
val p = new OrcidParser
|
val p = new OrcidParser
|
||||||
p.parseWorks(r._2).asScala
|
p.parseWork(r._2)
|
||||||
}.filter(p => p != null)
|
}
|
||||||
.write
|
.filter(p => p != null)
|
||||||
.mode(SaveMode.Overwrite)
|
.write
|
||||||
.save(s"$targetPath/Works")
|
.mode(SaveMode.Overwrite)
|
||||||
}
|
.save(s"$targetPath/Works")
|
||||||
//We are in the case of parsing ORCID DUMP
|
|
||||||
else {
|
|
||||||
df.filter(r => r._1.contains("works"))
|
|
||||||
.map { r =>
|
|
||||||
val p = new OrcidParser
|
|
||||||
p.parseWork(r._2)
|
|
||||||
}
|
|
||||||
.filter(p => p != null)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$targetPath/Works")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def extractORCIDEmploymentsTable(
|
def extractORCIDEmploymentsTable(spark: SparkSession, sourcePath: String, targetPath: String): Unit = {
|
||||||
spark: SparkSession,
|
|
||||||
sourcePath: String,
|
|
||||||
targetPath: String,
|
|
||||||
skipFilterByKey: Boolean
|
|
||||||
): Unit = {
|
|
||||||
val sc: SparkContext = spark.sparkContext
|
val sc: SparkContext = spark.sparkContext
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
val df = sc
|
val df = sc
|
||||||
|
@ -102,27 +74,16 @@ class SparkGenerateORCIDTable(propertyPath: String, args: Array[String], log: Lo
|
||||||
.toDF
|
.toDF
|
||||||
.as[(String, String)]
|
.as[(String, String)]
|
||||||
implicit val orcidEmploymentAuthor: Encoder[Employment] = Encoders.bean(classOf[Employment])
|
implicit val orcidEmploymentAuthor: Encoder[Employment] = Encoders.bean(classOf[Employment])
|
||||||
if (skipFilterByKey) {
|
implicit val orcidPID: Encoder[Pid] = Encoders.bean(classOf[Pid])
|
||||||
df.flatMap { r =>
|
df.filter(r => r._1.contains("employments"))
|
||||||
|
.map { r =>
|
||||||
val p = new OrcidParser
|
val p = new OrcidParser
|
||||||
p.parseEmployments(r._2).asScala
|
p.parseEmployment(r._2)
|
||||||
}.filter(p => p != null)
|
}
|
||||||
.write
|
.filter(p => p != null)
|
||||||
.mode(SaveMode.Overwrite)
|
.write
|
||||||
.save(s"$targetPath/Employments")
|
.mode(SaveMode.Overwrite)
|
||||||
}
|
.save(s"$targetPath/Employments")
|
||||||
//We are in the case of parsing ORCID DUMP
|
|
||||||
else {
|
|
||||||
df.filter(r => r._1.contains("employments"))
|
|
||||||
.map { r =>
|
|
||||||
val p = new OrcidParser
|
|
||||||
p.parseEmployment(r._2)
|
|
||||||
}
|
|
||||||
.filter(p => p != null)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$targetPath/Employments")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -231,7 +231,7 @@ object BioDBToOAF {
|
||||||
def uniprotToOAF(input: String): List[Oaf] = {
|
def uniprotToOAF(input: String): List[Oaf] = {
|
||||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||||
lazy val json = parse(input)
|
lazy val json = parse(input)
|
||||||
val pid = (json \ "pid").extract[String]
|
val pid = (json \ "pid").extract[String].trim()
|
||||||
|
|
||||||
val d = new Dataset
|
val d = new Dataset
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,15 @@ package eu.dnetlib.dhp.sx.bio
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||||
|
import eu.dnetlib.dhp.common.Constants.{MDSTORE_DATA_PATH, MDSTORE_SIZE_PATH}
|
||||||
|
import eu.dnetlib.dhp.schema.mdstore.MDStoreVersion
|
||||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
import eu.dnetlib.dhp.schema.oaf.Oaf
|
||||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.ScholixResolved
|
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.ScholixResolved
|
||||||
import org.apache.commons.io.IOUtils
|
import org.apache.commons.io.IOUtils
|
||||||
import org.apache.spark.SparkConf
|
import org.apache.spark.SparkConf
|
||||||
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
|
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
import eu.dnetlib.dhp.utils.DHPUtils.{MAPPER, writeHdfsFile}
|
||||||
|
|
||||||
object SparkTransformBioDatabaseToOAF {
|
object SparkTransformBioDatabaseToOAF {
|
||||||
|
|
||||||
|
@ -25,8 +28,13 @@ object SparkTransformBioDatabaseToOAF {
|
||||||
|
|
||||||
val dbPath: String = parser.get("dbPath")
|
val dbPath: String = parser.get("dbPath")
|
||||||
log.info("dbPath: {}", database)
|
log.info("dbPath: {}", database)
|
||||||
val targetPath: String = parser.get("targetPath")
|
|
||||||
log.info("targetPath: {}", database)
|
val mdstoreOutputVersion = parser.get("mdstoreOutputVersion")
|
||||||
|
log.info("mdstoreOutputVersion: {}", mdstoreOutputVersion)
|
||||||
|
|
||||||
|
val cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, classOf[MDStoreVersion])
|
||||||
|
val outputBasePath = cleanedMdStoreVersion.getHdfsPath
|
||||||
|
log.info("outputBasePath: {}", outputBasePath)
|
||||||
|
|
||||||
val spark: SparkSession =
|
val spark: SparkSession =
|
||||||
SparkSession
|
SparkSession
|
||||||
|
@ -43,24 +51,28 @@ object SparkTransformBioDatabaseToOAF {
|
||||||
case "UNIPROT" =>
|
case "UNIPROT" =>
|
||||||
CollectionUtils.saveDataset(
|
CollectionUtils.saveDataset(
|
||||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.uniprotToOAF(i))),
|
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.uniprotToOAF(i))),
|
||||||
targetPath
|
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||||
)
|
)
|
||||||
case "PDB" =>
|
case "PDB" =>
|
||||||
CollectionUtils.saveDataset(
|
CollectionUtils.saveDataset(
|
||||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.pdbTOOaf(i))),
|
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.pdbTOOaf(i))),
|
||||||
targetPath
|
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||||
)
|
)
|
||||||
case "SCHOLIX" =>
|
case "SCHOLIX" =>
|
||||||
CollectionUtils.saveDataset(
|
CollectionUtils.saveDataset(
|
||||||
spark.read.load(dbPath).as[ScholixResolved].map(i => BioDBToOAF.scholixResolvedToOAF(i)),
|
spark.read.load(dbPath).as[ScholixResolved].map(i => BioDBToOAF.scholixResolvedToOAF(i)),
|
||||||
targetPath
|
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||||
)
|
)
|
||||||
case "CROSSREF_LINKS" =>
|
case "CROSSREF_LINKS" =>
|
||||||
CollectionUtils.saveDataset(
|
CollectionUtils.saveDataset(
|
||||||
spark.createDataset(sc.textFile(dbPath).map(i => BioDBToOAF.crossrefLinksToOaf(i))),
|
spark.createDataset(sc.textFile(dbPath).map(i => BioDBToOAF.crossrefLinksToOaf(i))),
|
||||||
targetPath
|
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val df = spark.read.text(s"$outputBasePath/$MDSTORE_DATA_PATH")
|
||||||
|
val mdStoreSize = df.count
|
||||||
|
writeHdfsFile(spark.sparkContext.hadoopConfiguration, s"$mdStoreSize", s"$outputBasePath/$MDSTORE_SIZE_PATH")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,9 @@ import org.apache.commons.io.IOUtils
|
||||||
import org.apache.spark.SparkConf
|
import org.apache.spark.SparkConf
|
||||||
import org.apache.spark.sql._
|
import org.apache.spark.sql._
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
import eu.dnetlib.dhp.common.Constants.{MDSTORE_DATA_PATH, MDSTORE_SIZE_PATH}
|
||||||
|
import eu.dnetlib.dhp.schema.mdstore.MDStoreVersion
|
||||||
|
import eu.dnetlib.dhp.utils.DHPUtils.{MAPPER, writeHdfsFile}
|
||||||
|
|
||||||
object SparkEBILinksToOaf {
|
object SparkEBILinksToOaf {
|
||||||
|
|
||||||
|
@ -32,8 +35,13 @@ object SparkEBILinksToOaf {
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
val sourcePath = parser.get("sourcePath")
|
val sourcePath = parser.get("sourcePath")
|
||||||
log.info(s"sourcePath -> $sourcePath")
|
log.info(s"sourcePath -> $sourcePath")
|
||||||
val targetPath = parser.get("targetPath")
|
val mdstoreOutputVersion = parser.get("mdstoreOutputVersion")
|
||||||
log.info(s"targetPath -> $targetPath")
|
log.info("mdstoreOutputVersion: {}", mdstoreOutputVersion)
|
||||||
|
|
||||||
|
val cleanedMdStoreVersion = MAPPER.readValue(mdstoreOutputVersion, classOf[MDStoreVersion])
|
||||||
|
val outputBasePath = cleanedMdStoreVersion.getHdfsPath
|
||||||
|
log.info("outputBasePath: {}", outputBasePath)
|
||||||
|
|
||||||
implicit val PMEncoder: Encoder[Oaf] = Encoders.kryo(classOf[Oaf])
|
implicit val PMEncoder: Encoder[Oaf] = Encoders.kryo(classOf[Oaf])
|
||||||
|
|
||||||
val ebLinks: Dataset[EBILinkItem] = spark.read
|
val ebLinks: Dataset[EBILinkItem] = spark.read
|
||||||
|
@ -46,7 +54,10 @@ object SparkEBILinksToOaf {
|
||||||
.flatMap(j => BioDBToOAF.parse_ebi_links(j.links))
|
.flatMap(j => BioDBToOAF.parse_ebi_links(j.links))
|
||||||
.filter(p => BioDBToOAF.EBITargetLinksFilter(p))
|
.filter(p => BioDBToOAF.EBITargetLinksFilter(p))
|
||||||
.flatMap(p => BioDBToOAF.convertEBILinksToOaf(p)),
|
.flatMap(p => BioDBToOAF.convertEBILinksToOaf(p)),
|
||||||
targetPath
|
s"$outputBasePath/$MDSTORE_DATA_PATH"
|
||||||
)
|
)
|
||||||
|
val df = spark.read.text(s"$outputBasePath/$MDSTORE_DATA_PATH")
|
||||||
|
val mdStoreSize = df.count
|
||||||
|
writeHdfsFile(spark.sparkContext.hadoopConfiguration, s"$mdStoreSize", s"$outputBasePath/$MDSTORE_SIZE_PATH")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,10 @@ public class PrepareAffiliationRelationsTest {
|
||||||
.getResource("/eu/dnetlib/dhp/actionmanager/bipaffiliations/doi_to_ror.json")
|
.getResource("/eu/dnetlib/dhp/actionmanager/bipaffiliations/doi_to_ror.json")
|
||||||
.getPath();
|
.getPath();
|
||||||
|
|
||||||
|
String pubmedAffiliationRelationsPath = getClass()
|
||||||
|
.getResource("/eu/dnetlib/dhp/actionmanager/bipaffiliations/doi_to_ror.json")
|
||||||
|
.getPath();
|
||||||
|
|
||||||
String outputPath = workingDir.toString() + "/actionSet";
|
String outputPath = workingDir.toString() + "/actionSet";
|
||||||
|
|
||||||
PrepareAffiliationRelations
|
PrepareAffiliationRelations
|
||||||
|
@ -85,8 +89,7 @@ public class PrepareAffiliationRelationsTest {
|
||||||
new String[] {
|
new String[] {
|
||||||
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
||||||
"-crossrefInputPath", crossrefAffiliationRelationPath,
|
"-crossrefInputPath", crossrefAffiliationRelationPath,
|
||||||
"-pubmedInputPath", crossrefAffiliationRelationPath,
|
"-pubmedInputPath", pubmedAffiliationRelationsPath,
|
||||||
"-openapcInputPath", crossrefAffiliationRelationPath,
|
|
||||||
"-outputPath", outputPath
|
"-outputPath", outputPath
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -103,7 +106,7 @@ public class PrepareAffiliationRelationsTest {
|
||||||
// );
|
// );
|
||||||
// }
|
// }
|
||||||
// count the number of relations
|
// count the number of relations
|
||||||
assertEquals(60, tmp.count());
|
assertEquals(40, tmp.count());
|
||||||
|
|
||||||
Dataset<Relation> dataset = spark.createDataset(tmp.rdd(), Encoders.bean(Relation.class));
|
Dataset<Relation> dataset = spark.createDataset(tmp.rdd(), Encoders.bean(Relation.class));
|
||||||
dataset.createOrReplaceTempView("result");
|
dataset.createOrReplaceTempView("result");
|
||||||
|
@ -114,7 +117,7 @@ public class PrepareAffiliationRelationsTest {
|
||||||
// verify that we have equal number of bi-directional relations
|
// verify that we have equal number of bi-directional relations
|
||||||
Assertions
|
Assertions
|
||||||
.assertEquals(
|
.assertEquals(
|
||||||
30, execVerification
|
20, execVerification
|
||||||
.filter(
|
.filter(
|
||||||
"relClass='" + ModelConstants.HAS_AUTHOR_INSTITUTION + "'")
|
"relClass='" + ModelConstants.HAS_AUTHOR_INSTITUTION + "'")
|
||||||
.collectAsList()
|
.collectAsList()
|
||||||
|
@ -122,7 +125,7 @@ public class PrepareAffiliationRelationsTest {
|
||||||
|
|
||||||
Assertions
|
Assertions
|
||||||
.assertEquals(
|
.assertEquals(
|
||||||
30, execVerification
|
20, execVerification
|
||||||
.filter(
|
.filter(
|
||||||
"relClass='" + ModelConstants.IS_AUTHOR_INSTITUTION_OF + "'")
|
"relClass='" + ModelConstants.IS_AUTHOR_INSTITUTION_OF + "'")
|
||||||
.collectAsList()
|
.collectAsList()
|
||||||
|
|
|
@ -1,104 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.fosnodoi;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.LocalFileSystem;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.junit.jupiter.api.*;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.GetFOSSparkJob;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.PrepareTest;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.ProduceTest;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.model.FOSDataModel;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author miriam.baglioni
|
|
||||||
* @Date 13/02/23
|
|
||||||
*/
|
|
||||||
public class GetFosTest {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(ProduceTest.class);
|
|
||||||
|
|
||||||
private static Path workingDir;
|
|
||||||
private static SparkSession spark;
|
|
||||||
private static LocalFileSystem fs;
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void beforeAll() throws IOException {
|
|
||||||
workingDir = Files.createTempDirectory(PrepareTest.class.getSimpleName());
|
|
||||||
|
|
||||||
fs = FileSystem.getLocal(new Configuration());
|
|
||||||
log.info("using work dir {}", workingDir);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
conf.setAppName(ProduceTest.class.getSimpleName());
|
|
||||||
|
|
||||||
conf.setMaster("local[*]");
|
|
||||||
conf.set("spark.driver.host", "localhost");
|
|
||||||
conf.set("hive.metastore.local", "true");
|
|
||||||
conf.set("spark.ui.enabled", "false");
|
|
||||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
|
||||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
|
||||||
|
|
||||||
spark = SparkSession
|
|
||||||
.builder()
|
|
||||||
.appName(PrepareTest.class.getSimpleName())
|
|
||||||
.config(conf)
|
|
||||||
.getOrCreate();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterAll
|
|
||||||
public static void afterAll() throws IOException {
|
|
||||||
FileUtils.deleteDirectory(workingDir.toFile());
|
|
||||||
spark.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@Disabled
|
|
||||||
void test3() throws Exception {
|
|
||||||
final String sourcePath = getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/actionmanager/fosnodoi/fosnodoi.csv")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
final String outputPath = workingDir.toString() + "/fos.json";
|
|
||||||
GetFOSSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"--isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
||||||
"--sourcePath", sourcePath,
|
|
||||||
|
|
||||||
"-outputPath", outputPath,
|
|
||||||
"-delimiter", ","
|
|
||||||
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<FOSDataModel> tmp = sc
|
|
||||||
.textFile(outputPath)
|
|
||||||
.map(item -> OBJECT_MAPPER.readValue(item, FOSDataModel.class));
|
|
||||||
|
|
||||||
tmp.foreach(t -> Assertions.assertTrue(t.getOaid() != null));
|
|
||||||
tmp.foreach(t -> Assertions.assertTrue(t.getLevel1() != null));
|
|
||||||
tmp.foreach(t -> Assertions.assertTrue(t.getLevel2() != null));
|
|
||||||
tmp.foreach(t -> Assertions.assertTrue(t.getLevel3() != null));
|
|
||||||
|
|
||||||
tmp.foreach(t -> System.out.println(new ObjectMapper().writeValueAsString(t)));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,99 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.fosnodoi;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.LocalFileSystem;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.junit.jupiter.api.AfterAll;
|
|
||||||
import org.junit.jupiter.api.Assertions;
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.PrepareFOSSparkJob;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.PrepareSDGSparkJob;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.createunresolvedentities.ProduceTest;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result;
|
|
||||||
|
|
||||||
public class PrepareTest {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(ProduceTest.class);
|
|
||||||
|
|
||||||
private static Path workingDir;
|
|
||||||
private static SparkSession spark;
|
|
||||||
private static LocalFileSystem fs;
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void beforeAll() throws IOException {
|
|
||||||
workingDir = Files.createTempDirectory(PrepareTest.class.getSimpleName());
|
|
||||||
|
|
||||||
fs = FileSystem.getLocal(new Configuration());
|
|
||||||
log.info("using work dir {}", workingDir);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
conf.setAppName(ProduceTest.class.getSimpleName());
|
|
||||||
|
|
||||||
conf.setMaster("local[*]");
|
|
||||||
conf.set("spark.driver.host", "localhost");
|
|
||||||
conf.set("hive.metastore.local", "true");
|
|
||||||
conf.set("spark.ui.enabled", "false");
|
|
||||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
|
||||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
|
||||||
|
|
||||||
spark = SparkSession
|
|
||||||
.builder()
|
|
||||||
.appName(PrepareTest.class.getSimpleName())
|
|
||||||
.config(conf)
|
|
||||||
.getOrCreate();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterAll
|
|
||||||
public static void afterAll() throws IOException {
|
|
||||||
FileUtils.deleteDirectory(workingDir.toFile());
|
|
||||||
spark.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void fosPrepareTest() throws Exception {
|
|
||||||
final String sourcePath = getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/actionmanager/fosnodoi/fosnodoi.json")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
PrepareFOSSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"--isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
||||||
"--sourcePath", sourcePath,
|
|
||||||
|
|
||||||
"-outputPath", workingDir.toString() + "/work",
|
|
||||||
"-distributeDoi", Boolean.FALSE.toString()
|
|
||||||
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Result> tmp = sc
|
|
||||||
.textFile(workingDir.toString() + "/work/fos")
|
|
||||||
.map(item -> OBJECT_MAPPER.readValue(item, Result.class));
|
|
||||||
|
|
||||||
tmp.foreach(t -> System.out.println(new ObjectMapper().writeValueAsString(t)));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -76,7 +76,7 @@ public class CreateOpenCitationsASTest {
|
||||||
|
|
||||||
String inputPath = getClass()
|
String inputPath = getClass()
|
||||||
.getResource(
|
.getResource(
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI/inputremap/jsonforas")
|
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
||||||
.getPath();
|
.getPath();
|
||||||
|
|
||||||
CreateActionSetSparkJob
|
CreateActionSetSparkJob
|
||||||
|
@ -84,6 +84,8 @@ public class CreateOpenCitationsASTest {
|
||||||
new String[] {
|
new String[] {
|
||||||
"-isSparkSessionManaged",
|
"-isSparkSessionManaged",
|
||||||
Boolean.FALSE.toString(),
|
Boolean.FALSE.toString(),
|
||||||
|
"-shouldDuplicateRels",
|
||||||
|
Boolean.TRUE.toString(),
|
||||||
"-inputPath",
|
"-inputPath",
|
||||||
inputPath,
|
inputPath,
|
||||||
"-outputPath",
|
"-outputPath",
|
||||||
|
@ -97,10 +99,9 @@ public class CreateOpenCitationsASTest {
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
.map(aa -> ((Relation) aa.getPayload()));
|
||||||
|
|
||||||
Assertions.assertEquals(27, tmp.count());
|
assertEquals(31, tmp.count());
|
||||||
tmp.foreach(r -> Assertions.assertEquals(1, r.getCollectedfrom().size()));
|
|
||||||
|
|
||||||
tmp.foreach(r -> System.out.println(OBJECT_MAPPER.writeValueAsString(r)));
|
// tmp.foreach(r -> System.out.println(OBJECT_MAPPER.writeValueAsString(r)));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,90 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.opencitations;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.LocalFileSystem;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
|
||||||
import org.apache.spark.sql.Encoders;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.junit.jupiter.api.AfterAll;
|
|
||||||
import org.junit.jupiter.api.Assertions;
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.model.COCI;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author miriam.baglioni
|
|
||||||
* @Date 07/03/24
|
|
||||||
*/
|
|
||||||
public class RemapTest {
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
private static SparkSession spark;
|
|
||||||
|
|
||||||
private static Path workingDir;
|
|
||||||
private static final Logger log = LoggerFactory
|
|
||||||
.getLogger(RemapTest.class);
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void beforeAll() throws IOException {
|
|
||||||
workingDir = Files
|
|
||||||
.createTempDirectory(RemapTest.class.getSimpleName());
|
|
||||||
log.info("using work dir {}", workingDir);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
conf.setAppName(RemapTest.class.getSimpleName());
|
|
||||||
|
|
||||||
conf.setMaster("local[*]");
|
|
||||||
conf.set("spark.driver.host", "localhost");
|
|
||||||
conf.set("hive.metastore.local", "true");
|
|
||||||
conf.set("spark.ui.enabled", "false");
|
|
||||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
|
||||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
|
||||||
|
|
||||||
spark = SparkSession
|
|
||||||
.builder()
|
|
||||||
.appName(RemapTest.class.getSimpleName())
|
|
||||||
.config(conf)
|
|
||||||
.getOrCreate();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterAll
|
|
||||||
public static void afterAll() throws IOException {
|
|
||||||
FileUtils.deleteDirectory(workingDir.toFile());
|
|
||||||
spark.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRemap() throws Exception {
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI/inputremap")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
MapOCIdsInPids
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/out/",
|
|
||||||
"-nameNode", "input1;input2;input3;input4;input5"
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,324 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.transformativeagreement;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.junit.jupiter.api.AfterAll;
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob;
|
|
||||||
import eu.dnetlib.dhp.actionmanager.opencitations.CreateOpenCitationsASTest;
|
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.CleaningFunctions;
|
|
||||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author miriam.baglioni
|
|
||||||
* @Date 13/02/24
|
|
||||||
*/
|
|
||||||
public class CreateTAActionSetTest {
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
||||||
|
|
||||||
private static SparkSession spark;
|
|
||||||
|
|
||||||
private static Path workingDir;
|
|
||||||
private static final Logger log = LoggerFactory
|
|
||||||
.getLogger(CreateOpenCitationsASTest.class);
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void beforeAll() throws IOException {
|
|
||||||
workingDir = Files
|
|
||||||
.createTempDirectory(CreateTAActionSetTest.class.getSimpleName());
|
|
||||||
log.info("using work dir {}", workingDir);
|
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
conf.setAppName(CreateTAActionSetTest.class.getSimpleName());
|
|
||||||
|
|
||||||
conf.setMaster("local[*]");
|
|
||||||
conf.set("spark.driver.host", "localhost");
|
|
||||||
conf.set("hive.metastore.local", "true");
|
|
||||||
conf.set("spark.ui.enabled", "false");
|
|
||||||
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
|
||||||
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
|
||||||
|
|
||||||
spark = SparkSession
|
|
||||||
.builder()
|
|
||||||
.appName(CreateTAActionSetTest.class.getSimpleName())
|
|
||||||
.config(conf)
|
|
||||||
.getOrCreate();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterAll
|
|
||||||
public static void afterAll() throws IOException {
|
|
||||||
FileUtils.deleteDirectory(workingDir.toFile());
|
|
||||||
spark.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void createActionSet() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/transformativeagreement/facts.json")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.transformativeagreement.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet1"
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testNumberofRelations2() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet2"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet2", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
assertEquals(23, tmp.count());
|
|
||||||
|
|
||||||
// tmp.foreach(r -> System.out.println(OBJECT_MAPPER.writeValueAsString(r)));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRelationsCollectedFrom() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet3"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet3", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
tmp.foreach(r -> {
|
|
||||||
assertEquals(ModelConstants.OPENOCITATIONS_NAME, r.getCollectedfrom().get(0).getValue());
|
|
||||||
assertEquals(ModelConstants.OPENOCITATIONS_ID, r.getCollectedfrom().get(0).getKey());
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRelationsDataInfo() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet4"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet4", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
tmp.foreach(r -> {
|
|
||||||
assertEquals(false, r.getDataInfo().getInferred());
|
|
||||||
assertEquals(false, r.getDataInfo().getDeletedbyinference());
|
|
||||||
assertEquals("0.91", r.getDataInfo().getTrust());
|
|
||||||
assertEquals(
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob.OPENCITATIONS_CLASSID,
|
|
||||||
r.getDataInfo().getProvenanceaction().getClassid());
|
|
||||||
assertEquals(
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob.OPENCITATIONS_CLASSNAME,
|
|
||||||
r.getDataInfo().getProvenanceaction().getClassname());
|
|
||||||
assertEquals(ModelConstants.DNET_PROVENANCE_ACTIONS, r.getDataInfo().getProvenanceaction().getSchemeid());
|
|
||||||
assertEquals(ModelConstants.DNET_PROVENANCE_ACTIONS, r.getDataInfo().getProvenanceaction().getSchemename());
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRelationsSemantics() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet5"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet5", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
tmp.foreach(r -> {
|
|
||||||
assertEquals("citation", r.getSubRelType());
|
|
||||||
assertEquals("resultResult", r.getRelType());
|
|
||||||
});
|
|
||||||
assertEquals(23, tmp.filter(r -> r.getRelClass().equals("Cites")).count());
|
|
||||||
assertEquals(0, tmp.filter(r -> r.getRelClass().equals("IsCitedBy")).count());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRelationsSourceTargetPrefix() throws Exception {
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
eu.dnetlib.dhp.actionmanager.opencitations.CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet6"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet6", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
tmp.foreach(r -> {
|
|
||||||
assertEquals("50|doi_________::", r.getSource().substring(0, 17));
|
|
||||||
assertEquals("50|doi_________::", r.getTarget().substring(0, 17));
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void testRelationsSourceTargetCouple() throws Exception {
|
|
||||||
final String doi1 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1007/s10854-015-3684-x"));
|
|
||||||
final String doi2 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1111/j.1551-2916.2008.02408.x"));
|
|
||||||
final String doi3 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1007/s10854-014-2114-9"));
|
|
||||||
final String doi4 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1016/j.ceramint.2013.09.069"));
|
|
||||||
final String doi5 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1007/s10854-009-9913-4"));
|
|
||||||
final String doi6 = "50|doi_________::"
|
|
||||||
+ IdentifierFactory.md5(CleaningFunctions.normalizePidValue("doi", "10.1016/0038-1098(72)90370-5"));
|
|
||||||
|
|
||||||
String inputPath = getClass()
|
|
||||||
.getResource(
|
|
||||||
"/eu/dnetlib/dhp/actionmanager/opencitations/COCI")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
CreateActionSetSparkJob
|
|
||||||
.main(
|
|
||||||
new String[] {
|
|
||||||
"-isSparkSessionManaged",
|
|
||||||
Boolean.FALSE.toString(),
|
|
||||||
"-inputPath",
|
|
||||||
inputPath,
|
|
||||||
"-outputPath",
|
|
||||||
workingDir.toString() + "/actionSet7"
|
|
||||||
});
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
|
||||||
|
|
||||||
JavaRDD<Relation> tmp = sc
|
|
||||||
.sequenceFile(workingDir.toString() + "/actionSet7", Text.class, Text.class)
|
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
|
||||||
.map(aa -> ((Relation) aa.getPayload()));
|
|
||||||
|
|
||||||
JavaRDD<Relation> check = tmp.filter(r -> r.getSource().equals(doi1) || r.getTarget().equals(doi1));
|
|
||||||
|
|
||||||
assertEquals(5, check.count());
|
|
||||||
|
|
||||||
// check.foreach(r -> {
|
|
||||||
// if (r.getSource().equals(doi2) || r.getSource().equals(doi3) || r.getSource().equals(doi4) ||
|
|
||||||
// r.getSource().equals(doi5) || r.getSource().equals(doi6)) {
|
|
||||||
// assertEquals(ModelConstants.IS_CITED_BY, r.getRelClass());
|
|
||||||
// assertEquals(doi1, r.getTarget());
|
|
||||||
// }
|
|
||||||
// });
|
|
||||||
|
|
||||||
assertEquals(5, check.filter(r -> r.getSource().equals(doi1)).count());
|
|
||||||
check.filter(r -> r.getSource().equals(doi1)).foreach(r -> assertEquals(ModelConstants.CITES, r.getRelClass()));
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@
|
||||||
package eu.dnetlib.dhp.collection.orcid;
|
package eu.dnetlib.dhp.collection.orcid;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
@ -10,12 +9,7 @@ import java.util.Objects;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.LocalFileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
|
||||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
|
||||||
import org.apache.spark.SparkContext;
|
import org.apache.spark.SparkContext;
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
|
@ -33,7 +27,6 @@ import com.ximpleware.XPathParseException;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.collection.orcid.model.Author;
|
import eu.dnetlib.dhp.collection.orcid.model.Author;
|
||||||
import eu.dnetlib.dhp.collection.orcid.model.ORCIDItem;
|
import eu.dnetlib.dhp.collection.orcid.model.ORCIDItem;
|
||||||
import eu.dnetlib.dhp.collection.orcid.model.Work;
|
|
||||||
import eu.dnetlib.dhp.parser.utility.VtdException;
|
import eu.dnetlib.dhp.parser.utility.VtdException;
|
||||||
|
|
||||||
public class DownloadORCIDTest {
|
public class DownloadORCIDTest {
|
||||||
|
@ -89,34 +82,6 @@ public class DownloadORCIDTest {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testParsingOrcidUpdateEmployments() throws Exception {
|
|
||||||
final String xml = IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
getClass().getResourceAsStream("/eu/dnetlib/dhp/collection/orcid/update_employments.xml")));
|
|
||||||
|
|
||||||
final OrcidParser parser = new OrcidParser();
|
|
||||||
final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
System.out.println(mapper.writeValueAsString(parser.parseEmployments(xml)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testParsingOrcidUpdateWorks() throws Exception {
|
|
||||||
final String xml = IOUtils
|
|
||||||
.toString(
|
|
||||||
Objects
|
|
||||||
.requireNonNull(
|
|
||||||
getClass().getResourceAsStream("/eu/dnetlib/dhp/collection/orcid/update_work.xml")));
|
|
||||||
|
|
||||||
final OrcidParser parser = new OrcidParser();
|
|
||||||
final List<Work> works = parser.parseWorks(xml);
|
|
||||||
|
|
||||||
final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
System.out.println(mapper.writeValueAsString(works));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParsingEmployments() throws Exception {
|
public void testParsingEmployments() throws Exception {
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.collection.plugin.base;
|
package eu.dnetlib.dhp.collection.plugin.base;
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
|
@ -65,21 +63,6 @@ public class BaseTransfomationTest extends AbstractVocabularyTest {
|
||||||
System.out.println(result.getBody());
|
System.out.println(result.getBody());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
void testBase2ODF_wrong_date() throws Exception {
|
|
||||||
|
|
||||||
final MetadataRecord mr = new MetadataRecord();
|
|
||||||
mr.setProvenance(new Provenance("DSID", "DSNAME", "PREFIX"));
|
|
||||||
mr.setBody(IOUtils.toString(getClass().getResourceAsStream("record_wrong_1.xml")));
|
|
||||||
|
|
||||||
final XSLTTransformationFunction tr = loadTransformationRule("xml/base2oaf.transformationRule.xml");
|
|
||||||
|
|
||||||
assertThrows(NullPointerException.class, () -> {
|
|
||||||
final MetadataRecord result = tr.call(mr);
|
|
||||||
System.out.println(result.getBody());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private XSLTTransformationFunction loadTransformationRule(final String path) throws Exception {
|
private XSLTTransformationFunction loadTransformationRule(final String path) throws Exception {
|
||||||
final String xslt = new SAXReader()
|
final String xslt = new SAXReader()
|
||||||
.read(this.getClass().getResourceAsStream(path))
|
.read(this.getClass().getResourceAsStream(path))
|
||||||
|
|
|
@ -9,7 +9,6 @@ import org.junit.jupiter.api.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.common.collection.CollectorException;
|
|
||||||
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
import eu.dnetlib.dhp.common.collection.HttpClientParams;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -38,7 +37,7 @@ public class RestIteratorTest {
|
||||||
|
|
||||||
@Disabled
|
@Disabled
|
||||||
@Test
|
@Test
|
||||||
public void test() throws CollectorException {
|
public void test() {
|
||||||
|
|
||||||
HttpClientParams clientParams = new HttpClientParams();
|
HttpClientParams clientParams = new HttpClientParams();
|
||||||
|
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.collection.plugin.utils;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
|
|
||||||
class JsonUtilsTest {
|
|
||||||
|
|
||||||
static private String wrapped(String xml) {
|
|
||||||
return "<?xml version=\"1.0\" encoding=\"UTF-8\"?><recordWrap>" + xml + "</recordWrap>";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void keyStartWithDigit() {
|
|
||||||
assertEquals(
|
|
||||||
wrapped("<m_100><n_200v>null</n_200v></m_100>"),
|
|
||||||
JsonUtils.convertToXML("{\"100\" : {\"200v\" : null}}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void keyStartWithSpecialchars() {
|
|
||||||
assertEquals(
|
|
||||||
wrapped("<_parent><_nest1><_nest2>null</_nest2></_nest1></_parent>"),
|
|
||||||
JsonUtils.convertToXML("{\" parent\" : {\"-nest1\" : {\".nest2\" : null}}}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void encodeArray() {
|
|
||||||
assertEquals(
|
|
||||||
wrapped("<_parent.child>1</_parent.child><_parent.child>2</_parent.child>"),
|
|
||||||
JsonUtils.convertToXML("{\" parent.child\":[1, 2]}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void arrayOfObjects() {
|
|
||||||
assertEquals(
|
|
||||||
wrapped("<parent><id>1</id></parent><parent><id>2</id></parent>"),
|
|
||||||
JsonUtils.convertToXML("{\"parent\": [{\"id\": 1}, {\"id\": 2}]}"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void removeControlCharacters() {
|
|
||||||
assertEquals(
|
|
||||||
wrapped("<m_100><n_200v>Test</n_200v></m_100>"),
|
|
||||||
JsonUtils.convertToXML("{\"100\" : {\"200v\" : \"\\u0000\\u000cTest\"}}"));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -3,7 +3,6 @@ package eu.dnetlib.dhp.transformation;
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.Constants.MDSTORE_DATA_PATH;
|
import static eu.dnetlib.dhp.common.Constants.MDSTORE_DATA_PATH;
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
@ -280,19 +279,6 @@ class TransformationJobTest extends AbstractVocabularyTest {
|
||||||
// TODO Create significant Assert
|
// TODO Create significant Assert
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidXSLT() throws Exception{
|
|
||||||
final MetadataRecord mr = new MetadataRecord();
|
|
||||||
|
|
||||||
mr.setProvenance(new Provenance("openaire____::cnr_explora", "CNR ExploRA", "cnr_________"));
|
|
||||||
mr.setBody(IOUtils.toString(getClass().getResourceAsStream("/eu/dnetlib/dhp/transform/input_cnr_explora.xml")));
|
|
||||||
// We Load the XSLT transformation Rule from the classpath
|
|
||||||
final XSLTTransformationFunction tr = loadTransformationRule("/eu/dnetlib/dhp/transform/invalid.xslt");
|
|
||||||
|
|
||||||
assertThrows(RuntimeException.class,()->tr.call(mr));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private XSLTTransformationFunction loadTransformationRule(final String path) throws Exception {
|
private XSLTTransformationFunction loadTransformationRule(final String path) throws Exception {
|
||||||
final String trValue = IOUtils.toString(this.getClass().getResourceAsStream(path));
|
final String trValue = IOUtils.toString(this.getClass().getResourceAsStream(path));
|
||||||
final LongAccumulator la = new LongAccumulator();
|
final LongAccumulator la = new LongAccumulator();
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
DOI,OAID,level1,level2,level3,level4,score_for_L3,score_for_L4
|
|
||||||
N/A,78975075580c::e680668c98366c9cd6349afc62486a7f,03 medical and health sciences,0301 basic medicine,030104 developmental biology,N/A,0.5,0.0
|
|
||||||
N/A,78975075580c::e680668c98366c9cd6349afc62486a7f,03 medical and health sciences,0303 health sciences,030304 developmental biology,N/A,0.5,0.0
|
|
||||||
N/A,od______2806::a1da9d2678b12969a9ab5f50b5e71d0a,05 social sciences,0501 psychology and cognitive sciences,050109 social psychology,05010904 Group processes/Collective identity,0.5589094161987305,0.5166763067245483
|
|
||||||
N/A,od______2806::a1da9d2678b12969a9ab5f50b5e71d0a,05 social sciences,0501 psychology and cognitive sciences,050105 experimental psychology,05010501 Emotion/Affective science,0.44109055399894714,0.4833236634731293
|
|
||||||
N/A,doajarticles::76535d77fd2a5fe9810aefafffb8ef6c,05 social sciences,0502 economics and business,050203 business & management,05020302 Supply chain management/Business terms,0.5459638833999634,0.5460261106491089
|
|
||||||
N/A,doajarticles::76535d77fd2a5fe9810aefafffb8ef6c,05 social sciences,0502 economics and business,050211 marketing,05021102 Services marketing/Retailing,0.4540362060070038,0.4539738595485687
|
|
||||||
N/A,od_______156::a3a0119c6d9d3a66943f8da042e97a5e,01 natural sciences,0105 earth and related environmental sciences,010504 meteorology & atmospheric sciences,01050407 Geomagnetism/Ionosphere,0.5131047964096069,0.4990350902080536
|
|
||||||
N/A,od_______156::a3a0119c6d9d3a66943f8da042e97a5e,01 natural sciences,0105 earth and related environmental sciences,010502 geochemistry & geophysics,01050203 Seismology/Seismology measurement,0.4868951737880707,0.500964879989624
|
|
||||||
N/A,od______2806::4b9a664dd6b8b04204cb613e7bc9c873,03 medical and health sciences,0302 clinical medicine,030220 oncology & carcinogenesis,03022002 Medical imaging/Medical physics,0.5068133473396301,0.10231181626910052
|
|
||||||
N/A,od______2806::4b9a664dd6b8b04204cb613e7bc9c873,03 medical and health sciences,0302 clinical medicine,030204 cardiovascular system & hematology,N/A,0.49318668246269226,0.0
|
|
||||||
N/A,od______3341::ef754de29464abf9bc9b99664630ce74,03 medical and health sciences,0302 clinical medicine,030220 oncology & carcinogenesis,03022012 Oncology/Infectious causes of cancer,0.5,0.5
|
|
||||||
N/A,od______3341::ef754de29464abf9bc9b99664630ce74,03 medical and health sciences,0302 clinical medicine,030220 oncology & carcinogenesis,03022012 Oncology/Infectious causes of cancer,0.5,0.5
|
|
||||||
N/A,od______3978::6704dcced0fe3dd6fbf985dc2507f61c,03 medical and health sciences,0302 clinical medicine,030217 neurology & neurosurgery,03021702 Aging-associated diseases/Cognitive disorders,0.5134317874908447,0.09614889098529535
|
|
||||||
N/A,od______3978::6704dcced0fe3dd6fbf985dc2507f61c,03 medical and health sciences,0301 basic medicine,030104 developmental biology,N/A,0.48656824231147766,0.0
|
|
||||||
N/A,dedup_wf_001::b77264819800b90c0328c4d17eea5c1a,02 engineering and technology,0209 industrial biotechnology,020901 industrial engineering & automation,02090105 Control theory/Advanced driver assistance systems,0.5178514122962952,0.5198937654495239
|
|
||||||
N/A,dedup_wf_001::b77264819800b90c0328c4d17eea5c1a,02 engineering and technology,"0202 electrical engineering, electronic engineering, information engineering",020201 artificial intelligence & image processing,02020108 Fuzzy logic/Artificial neural networks/Computational neuroscience,0.48214852809906006,0.4801062345504761
|
|
||||||
N/A,od______2806::a938609e9f36ada6629a1bcc50c88230,03 medical and health sciences,0302 clinical medicine,030217 neurology & neurosurgery,03021708 Neurotrauma/Stroke,0.5014800429344177,0.5109656453132629
|
|
||||||
N/A,od______2806::a938609e9f36ada6629a1bcc50c88230,02 engineering and technology,0206 medical engineering,020601 biomedical engineering,02060102 Medical terminology/Patient,0.4985199570655823,0.4890343248844147
|
|
|
|
@ -1,18 +0,0 @@
|
||||||
{"doi":"n/a","oaid":"od______3341::ef754de29464abf9bc9b99664630ce74","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030220 oncology & carcinogenesis","level4":"03022012 Oncology/Infectious causes of cancer","scoreL3":"0.5","scoreL4":"0.5"}
|
|
||||||
{"doi":"n/a","oaid":"78975075580c::e680668c98366c9cd6349afc62486a7f","level1":"03 medical and health sciences","level2":"0301 basic medicine","level3":"030104 developmental biology","level4":"N/A","scoreL3":"0.5","scoreL4":"0.0"}
|
|
||||||
{"doi":"n/a","oaid":"od______3341::ef754de29464abf9bc9b99664630ce74","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030220 oncology & carcinogenesis","level4":"03022012 Oncology/Infectious causes of cancer","scoreL3":"0.5","scoreL4":"0.5"}
|
|
||||||
{"doi":"n/a","oaid":"78975075580c::e680668c98366c9cd6349afc62486a7f","level1":"03 medical and health sciences","level2":"0303 health sciences","level3":"030304 developmental biology","level4":"N/A","scoreL3":"0.5","scoreL4":"0.0"}
|
|
||||||
{"doi":"n/a","oaid":"od______3978::6704dcced0fe3dd6fbf985dc2507f61c","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030217 neurology & neurosurgery","level4":"03021702 Aging-associated diseases/Cognitive disorders","scoreL3":"0.5134317874908447","scoreL4":"0.09614889098529535"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::a1da9d2678b12969a9ab5f50b5e71d0a","level1":"05 social sciences","level2":"0501 psychology and cognitive sciences","level3":"050109 social psychology","level4":"05010904 Group processes/Collective identity","scoreL3":"0.5589094161987305","scoreL4":"0.5166763067245483"}
|
|
||||||
{"doi":"n/a","oaid":"od______3978::6704dcced0fe3dd6fbf985dc2507f61c","level1":"03 medical and health sciences","level2":"0301 basic medicine","level3":"030104 developmental biology","level4":"N/A","scoreL3":"0.48656824231147766","scoreL4":"0.0"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::a1da9d2678b12969a9ab5f50b5e71d0a","level1":"05 social sciences","level2":"0501 psychology and cognitive sciences","level3":"050105 experimental psychology","level4":"05010501 Emotion/Affective science","scoreL3":"0.44109055399894714","scoreL4":"0.4833236634731293"}
|
|
||||||
{"doi":"n/a","oaid":"dedup_wf_001::b77264819800b90c0328c4d17eea5c1a","level1":"02 engineering and technology","level2":"0209 industrial biotechnology","level3":"020901 industrial engineering & automation","level4":"02090105 Control theory/Advanced driver assistance systems","scoreL3":"0.5178514122962952","scoreL4":"0.5198937654495239"}
|
|
||||||
{"doi":"n/a","oaid":"doajarticles::76535d77fd2a5fe9810aefafffb8ef6c","level1":"05 social sciences","level2":"0502 economics and business","level3":"050203 business & management","level4":"05020302 Supply chain management/Business terms","scoreL3":"0.5459638833999634","scoreL4":"0.5460261106491089"}
|
|
||||||
{"doi":"n/a","oaid":"doajarticles::76535d77fd2a5fe9810aefafffb8ef6c","level1":"05 social sciences","level2":"0502 economics and business","level3":"050211 marketing","level4":"05021102 Services marketing/Retailing","scoreL3":"0.4540362060070038","scoreL4":"0.4539738595485687"}
|
|
||||||
{"doi":"n/a","oaid":"dedup_wf_001::b77264819800b90c0328c4d17eea5c1a","level1":"02 engineering and technology","level2":"0202 electrical engineering, electronic engineering, information engineering","level3":"020201 artificial intelligence & image processing","level4":"02020108 Fuzzy logic/Artificial neural networks/Computational neuroscience","scoreL3":"0.48214852809906006","scoreL4":"0.4801062345504761"}
|
|
||||||
{"doi":"n/a","oaid":"od_______156::a3a0119c6d9d3a66943f8da042e97a5e","level1":"01 natural sciences","level2":"0105 earth and related environmental sciences","level3":"010504 meteorology & atmospheric sciences","level4":"01050407 Geomagnetism/Ionosphere","scoreL3":"0.5131047964096069","scoreL4":"0.4990350902080536"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::a938609e9f36ada6629a1bcc50c88230","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030217 neurology & neurosurgery","level4":"03021708 Neurotrauma/Stroke","scoreL3":"0.5014800429344177","scoreL4":"0.5109656453132629"}
|
|
||||||
{"doi":"n/a","oaid":"od_______156::a3a0119c6d9d3a66943f8da042e97a5e","level1":"01 natural sciences","level2":"0105 earth and related environmental sciences","level3":"010502 geochemistry & geophysics","level4":"01050203 Seismology/Seismology measurement","scoreL3":"0.4868951737880707","scoreL4":"0.500964879989624"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::a938609e9f36ada6629a1bcc50c88230","level1":"02 engineering and technology","level2":"0206 medical engineering","level3":"020601 biomedical engineering","level4":"02060102 Medical terminology/Patient","scoreL3":"0.4985199570655823","scoreL4":"0.4890343248844147"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::4b9a664dd6b8b04204cb613e7bc9c873","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030220 oncology & carcinogenesis","level4":"03022002 Medical imaging/Medical physics","scoreL3":"0.5068133473396301","scoreL4":"0.10231181626910052"}
|
|
||||||
{"doi":"n/a","oaid":"od______2806::4b9a664dd6b8b04204cb613e7bc9c873","level1":"03 medical and health sciences","level2":"0302 clinical medicine","level3":"030204 cardiovascular system & hematology","level4":"N/A","scoreL3":"0.49318668246269226","scoreL4":"0.0"}
|
|
File diff suppressed because one or more lines are too long
|
@ -1,31 +0,0 @@
|
||||||
{"cited":"br/061201599020", "citing":"br/06203041400","oci":"oci:06701327944-06504326071"}
|
|
||||||
{"cited":"br/061201599020","citing":"br/06502272390","oci":"oci:06502272390-061301355525"}
|
|
||||||
{"cited":"br/061201599020", "citing":"br/06120941789","oci":"oci:0670804699-067055659"}
|
|
||||||
{"cited":"br/06210273177","citing":"br/06203041400","oci":"oci:061502003994-062201281456"}
|
|
||||||
{"cited":"br/06210273177", "citing":"br/06502272390","oci":"oci:06502272390-0660806688"}
|
|
||||||
{"cited":"br/06210273177", "citing":"br/06120941789","oci":"oci:06502307119-0620223645"}
|
|
||||||
{"cited":"br/0660613430","citing":"br/06203041400","oci":"oci:061502004011-061902692285"}
|
|
||||||
{"cited":"br/0660613430", "citing":"br/06502272390","oci":"oci:0660549063-0610398792"}
|
|
||||||
{"cited":"br/0660613430", "citing":"br/06120941789","oci":"oci:06420189324-06301543046"}
|
|
||||||
{"cited":"br/062602732073","citing":"br/06203041400","oci":"oci:06380130275-061502004367"}
|
|
||||||
{"cited":"br/062602732073","citing":"br/06502272390","oci":"oci:062403449086-062501448395"}
|
|
||||||
{"cited":"br/062602732073","citing":"br/06120941789","oci":"oci:06420189328-061202007182"}
|
|
||||||
{"cited":"br/061103703697","citing":"br/06203041400","oci":"oci:062603906965-061701362658"}
|
|
||||||
{"cited":"br/061103703697", "citing":"br/06502272390","oci":"oci:0670294309-06104327031"}
|
|
||||||
{"cited":"br/061103703697","citing":"br/06120941789","oci":"oci:061702060228-061301712529"}
|
|
||||||
{"cited":"br/06230199640", "citing":"br/0670517081","oci":"oci:06901104174-06503692526"}
|
|
||||||
{"cited":"br/061703513967","citing":"br/061702310822","oci":"oci:061702310822-061703513967"}
|
|
||||||
{"cited":"br/062104002953","citing":"br/061702311472","oci":"oci:061702311472-062104002953"}
|
|
||||||
{"cited":"br/061101204417","citing":"br/062102701590","oci":"oci:062102701590-061101204417"}
|
|
||||||
{"cited":"br/062403787088","citing":"br/061401499173","oci":"oci:061401499173-062403787088"}
|
|
||||||
{"cited":"br/061203576338","citing":"br/06110279619","oci":"oci:06110279619-061203576338"}
|
|
||||||
{"cited":"br/061601962207","citing":"br/061502004018","oci":"oci:061502004018-061601962207"}
|
|
||||||
{"cited":"br/06101014588", "citing":"br/061502004027","oci":"oci:061502004027-06101014588"}
|
|
||||||
{"cited":"br/06704040804", "citing":"br/06220799044","oci":"oci:06220799044-06704040804"}
|
|
||||||
{"cited":"br/061401105151","citing":"br/061502004037","oci":"oci:061502004037-061401105151"}
|
|
||||||
{"cited":"br/0640821079", "citing":"br/061702311537","oci":"oci:061702311537-0640821079"}
|
|
||||||
{"cited":"br/06604165310", "citing":"br/062501970289","oci":"oci:062501970289-06604165310"}
|
|
||||||
{"cited":"br/061501351689","citing":"br/061203895786","oci":"oci:061203895786-061501351689"}
|
|
||||||
{"cited":"br/06202223692", "citing":"br/06110298832","oci":"oci:06110298832-06202223692"}
|
|
||||||
{"cited":"br/06104310727", "citing":"br/0660439086","oci":"oci:0660439086-06104310727"}
|
|
||||||
{"cited":"br/06150216214", "citing":"br/06340150329","oci":"oci:06340150329-06150216214"}
|
|
|
@ -1,48 +0,0 @@
|
||||||
omid,id
|
|
||||||
br/061201599020,doi:10.1142/s0219887817501687
|
|
||||||
br/06203041400,doi:10.1111/j.1523-5378.2005.00327.x pmid:16104945
|
|
||||||
br/06210273177,doi:10.1090/qam/20394
|
|
||||||
br/06502272390,pmid:32235596 doi:10.3390/nano10040644
|
|
||||||
br/0660613430,doi:10.1007/bf00470411
|
|
||||||
br/06120941789,doi:10.1098/rspa.2006.1747
|
|
||||||
br/062602732073,doi:10.1007/978-3-642-38844-6_25
|
|
||||||
br/06230199640,pmid:25088780 doi:10.1016/j.ymeth.2014.07.008
|
|
||||||
br/061103703697,pmid:2682767
|
|
||||||
br/0670517081,doi:10.1016/j.foodpol.2021.102189
|
|
||||||
br/06502310477,doi:10.1142/s0218127416500450
|
|
||||||
br/06520113284,doi:10.1109/cfasta57821.2023.10243367
|
|
||||||
br/062303652439,pmid:5962654 doi:10.1016/0020-708x(66)90001-9
|
|
||||||
br/06250691436,doi:10.1042/bst20150052 pmid:26009172
|
|
||||||
br/061201665577,doi:10.1097/00115550-200205000-00018
|
|
||||||
br/06503490336,pmid:34689254 doi:10.1007/s10072-021-05687-0
|
|
||||||
br/06220615942,pmid:25626134 doi:10.1016/j.jcis.2015.01.008
|
|
||||||
br/061103389243,doi:10.4324/9780203702819-10
|
|
||||||
br/062303011271,doi:10.1109/icassp.2011.5946250
|
|
||||||
br/061302926083,doi:10.4018/978-1-6684-3937-1.ch002
|
|
||||||
br/061402485360,doi:10.1109/iciict.2015.7396079
|
|
||||||
br/06410101083,doi:10.1016/j.autcon.2023.104828
|
|
||||||
br/062202243386,doi:10.1016/0001-8791(81)90022-1
|
|
||||||
br/06170421486,doi:10.1130/0016-7606(2003)115<0166:dsagmf>2.0.co;2
|
|
||||||
br/061201983865,doi:10.4324/9781315109008 isbn:9781315109008
|
|
||||||
br/061701697230,doi:10.1016/j.trd.2012.07.006
|
|
||||||
br/061201137111,doi:10.1109/access.2020.2971656
|
|
||||||
br/06120436283,pmid:2254430 doi:10.1128/jcm.28.11.2551-2554.1990
|
|
||||||
br/061903968916,doi:10.1111/j.1742-1241.1988.tb08627.x
|
|
||||||
br/06201583482,doi:10.1016/0016-5085(78)93139-6
|
|
||||||
br/06130338317,doi:10.2134/agronj1952.00021962004400080013x
|
|
||||||
br/062601538320,doi:10.1371/journal.pone.0270593 pmid:35789338
|
|
||||||
br/062401098626,pmid:22385804 doi:10.1016/j.talanta.2011.12.034
|
|
||||||
br/06190436492,doi:10.1039/c7dt01499f pmid:28644489
|
|
||||||
br/06202819247,doi:10.1007/978-3-319-45823-6_57
|
|
||||||
br/0648013560,doi:10.1080/14772000.2012.705356
|
|
||||||
br/0690214059,doi:10.2752/175630608x329217
|
|
||||||
br/06601640415,doi:10.1080/18128600508685647
|
|
||||||
br/061503394761,doi:10.1002/0471443395.img018
|
|
||||||
br/061702861849,pmid:31203682 doi:10.1080/10428194.2019.1627538
|
|
||||||
br/06450133713,doi:10.1093/acprof:oso/9780199670888.003.0008
|
|
||||||
br/0628074892,doi:10.1097/hnp.0000000000000597
|
|
||||||
br/061601032219,doi:10.1002/bdm.2102
|
|
||||||
br/06602079930,doi:10.1101/2020.08.25.267500
|
|
||||||
br/0604192147,doi:10.11501/3307395
|
|
||||||
br/061101933800,doi:10.1142/s0217732398002242
|
|
||||||
br/06504184118,pmid:10091417
|
|
|
|
@ -1,27 +0,0 @@
|
||||||
{"oci":"oci:06701327944-06504326071","citing":"16104945","citing_pid":"pmid","cited":"10.1142/s0219887817501687","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06701327944-06504326071","citing":"10.1111/j.1523-5378.2005.00327.x","citing_pid":"doi","cited":"10.1142/s0219887817501687","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06502272390-061301355525","citing":"10.3390/nano10040644","citing_pid":"doi","cited":"10.1142/s0219887817501687","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06502272390-061301355525","citing":"32235596","citing_pid":"pmid","cited":"10.1142/s0219887817501687","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:0670804699-067055659","citing":"10.1098/rspa.2006.1747","citing_pid":"doi","cited":"10.1142/s0219887817501687","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:061502003994-062201281456","citing":"16104945","citing_pid":"pmid","cited":"10.1090/qam/20394","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:061502003994-062201281456","citing":"10.1111/j.1523-5378.2005.00327.x","citing_pid":"doi","cited":"10.1090/qam/20394","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06502272390-0660806688","citing":"10.3390/nano10040644","citing_pid":"doi","cited":"10.1090/qam/20394","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06502272390-0660806688","citing":"32235596","citing_pid":"pmid","cited":"10.1090/qam/20394","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06502307119-0620223645","citing":"10.1098/rspa.2006.1747","citing_pid":"doi","cited":"10.1090/qam/20394","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:061502004011-061902692285","citing":"16104945","citing_pid":"pmid","cited":"10.1007/bf00470411","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:061502004011-061902692285","citing":"10.1111/j.1523-5378.2005.00327.x","citing_pid":"doi","cited":"10.1007/bf00470411","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:0660549063-0610398792","citing":"10.3390/nano10040644","citing_pid":"doi","cited":"10.1007/bf00470411","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:0660549063-0610398792","citing":"32235596","citing_pid":"pmid","cited":"10.1007/bf00470411","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06420189324-06301543046","citing":"10.1098/rspa.2006.1747","citing_pid":"doi","cited":"10.1007/bf00470411","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06380130275-061502004367","citing":"16104945","citing_pid":"pmid","cited":"10.1007/978-3-642-38844-6_25","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06380130275-061502004367","citing":"10.1111/j.1523-5378.2005.00327.x","citing_pid":"doi","cited":"10.1007/978-3-642-38844-6_25","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:062403449086-062501448395","citing":"10.3390/nano10040644","citing_pid":"doi","cited":"10.1007/978-3-642-38844-6_25","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:062403449086-062501448395","citing":"32235596","citing_pid":"pmid","cited":"10.1007/978-3-642-38844-6_25","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06420189328-061202007182","citing":"10.1098/rspa.2006.1747","citing_pid":"doi","cited":"10.1007/978-3-642-38844-6_25","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:062603906965-061701362658","citing":"16104945","citing_pid":"pmid","cited":"2682767","cited_pid":"pmid"}
|
|
||||||
{"oci":"oci:062603906965-061701362658","citing":"10.1111/j.1523-5378.2005.00327.x","citing_pid":"doi","cited":"2682767","cited_pid":"pmid"}
|
|
||||||
{"oci":"oci:0670294309-06104327031","citing":"10.3390/nano10040644","citing_pid":"doi","cited":"2682767","cited_pid":"pmid"}
|
|
||||||
{"oci":"oci:0670294309-06104327031","citing":"32235596","citing_pid":"pmid","cited":"2682767","cited_pid":"pmid"}
|
|
||||||
{"oci":"oci:061702060228-061301712529","citing":"10.1098/rspa.2006.1747","citing_pid":"doi","cited":"2682767","cited_pid":"pmid"}
|
|
||||||
{"oci":"oci:06901104174-06503692526","citing":"10.1016/j.foodpol.2021.102189","citing_pid":"doi","cited":"10.1016/j.ymeth.2014.07.008","cited_pid":"doi"}
|
|
||||||
{"oci":"oci:06901104174-06503692526","citing":"10.1016/j.foodpol.2021.102189","citing_pid":"doi","cited":"25088780","cited_pid":"pmid"}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue