forked from D-Net/dnet-hadoop
added dependency version in main pom.xml for orcid no doi
This commit is contained in:
commit
9818e74a70
|
@ -92,6 +92,17 @@
|
||||||
<groupId>com.squareup.okhttp3</groupId>
|
<groupId>com.squareup.okhttp3</groupId>
|
||||||
<artifactId>okhttp</artifactId>
|
<artifactId>okhttp</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib</groupId>
|
||||||
|
<artifactId>dnet-pace-core</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<artifactId>dhp-schemas</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.dedup;
|
package eu.dnetlib.dhp.oa.merge;
|
||||||
|
|
||||||
import java.text.Normalizer;
|
import java.text.Normalizer;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
@ -94,7 +94,13 @@ public class AuthorMerger {
|
||||||
if (r.getPid() == null) {
|
if (r.getPid() == null) {
|
||||||
r.setPid(new ArrayList<>());
|
r.setPid(new ArrayList<>());
|
||||||
}
|
}
|
||||||
r.getPid().add(a._1());
|
|
||||||
|
// TERRIBLE HACK but for some reason when we create and Array with Arrays.asList,
|
||||||
|
// it creates of fixed size, and the add method raise UnsupportedOperationException at
|
||||||
|
// java.util.AbstractList.add
|
||||||
|
final List<StructuredProperty> tmp = new ArrayList<>(r.getPid());
|
||||||
|
tmp.add(a._1());
|
||||||
|
r.setPid(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
|
@ -5,6 +5,8 @@ import java.text.ParseException;
|
||||||
import java.text.SimpleDateFormat;
|
import java.text.SimpleDateFormat;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import net.sf.saxon.expr.XPathContext;
|
import net.sf.saxon.expr.XPathContext;
|
||||||
import net.sf.saxon.om.Sequence;
|
import net.sf.saxon.om.Sequence;
|
||||||
import net.sf.saxon.trans.XPathException;
|
import net.sf.saxon.trans.XPathException;
|
||||||
|
@ -19,6 +21,8 @@ public class NormalizeDate extends AbstractExtensionFunction {
|
||||||
|
|
||||||
private static final String normalizeOutFormat = "yyyy-MM-dd'T'hh:mm:ss'Z'";
|
private static final String normalizeOutFormat = "yyyy-MM-dd'T'hh:mm:ss'Z'";
|
||||||
|
|
||||||
|
public static final String BLANK = "";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return "normalizeDate";
|
return "normalizeDate";
|
||||||
|
@ -27,10 +31,10 @@ public class NormalizeDate extends AbstractExtensionFunction {
|
||||||
@Override
|
@Override
|
||||||
public Sequence doCall(XPathContext context, Sequence[] arguments) throws XPathException {
|
public Sequence doCall(XPathContext context, Sequence[] arguments) throws XPathException {
|
||||||
if (arguments == null | arguments.length == 0) {
|
if (arguments == null | arguments.length == 0) {
|
||||||
return new StringValue("");
|
return new StringValue(BLANK);
|
||||||
}
|
}
|
||||||
String s = arguments[0].head().getStringValue();
|
String s = arguments[0].head().getStringValue();
|
||||||
return new StringValue(_year(s));
|
return new StringValue(_normalizeDate(s));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -55,8 +59,8 @@ public class NormalizeDate extends AbstractExtensionFunction {
|
||||||
return SequenceType.SINGLE_STRING;
|
return SequenceType.SINGLE_STRING;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String _year(String s) {
|
private String _normalizeDate(String s) {
|
||||||
final String date = s != null ? s.trim() : "";
|
final String date = StringUtils.isNotBlank(s) ? s.trim() : BLANK;
|
||||||
|
|
||||||
for (String format : normalizeDateFormats) {
|
for (String format : normalizeDateFormats) {
|
||||||
try {
|
try {
|
||||||
|
@ -66,6 +70,6 @@ public class NormalizeDate extends AbstractExtensionFunction {
|
||||||
} catch (ParseException e) {
|
} catch (ParseException e) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "";
|
return BLANK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.schema.dump.oaf.graph;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To store information about the classification for the project. The classification depends on the programme. For example
|
||||||
|
* H2020-EU.3.4.5.3 can be classified as
|
||||||
|
* H2020-EU.3. => Societal Challenges (level1)
|
||||||
|
* H2020-EU.3.4. => Transport (level2)
|
||||||
|
* H2020-EU.3.4.5. => CLEANSKY2 (level3)
|
||||||
|
* H2020-EU.3.4.5.3. => IADP Fast Rotorcraft (level4)
|
||||||
|
*
|
||||||
|
* We decided to explicitly represent up to three levels in the classification.
|
||||||
|
*
|
||||||
|
* H2020Classification has the following parameters:
|
||||||
|
* - private Programme programme to store the information about the programme related to this classification
|
||||||
|
* - private String level1 to store the information about the level 1 of the classification (Priority or Pillar of the EC)
|
||||||
|
* - private String level2 to store the information about the level2 af the classification (Objectives (?))
|
||||||
|
* - private String level3 to store the information about the level3 of the classification
|
||||||
|
* - private String classification to store the entire classification related to the programme
|
||||||
|
*/
|
||||||
|
public class H2020Classification implements Serializable {
|
||||||
|
private Programme programme;
|
||||||
|
|
||||||
|
private String level1;
|
||||||
|
private String level2;
|
||||||
|
private String level3;
|
||||||
|
|
||||||
|
private String classification;
|
||||||
|
|
||||||
|
public Programme getProgramme() {
|
||||||
|
return programme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setProgramme(Programme programme) {
|
||||||
|
this.programme = programme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel1() {
|
||||||
|
return level1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel1(String level1) {
|
||||||
|
this.level1 = level1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel2() {
|
||||||
|
return level2;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel2(String level2) {
|
||||||
|
this.level2 = level2;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel3() {
|
||||||
|
return level3;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel3(String level3) {
|
||||||
|
this.level3 = level3;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getClassification() {
|
||||||
|
return classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClassification(String classification) {
|
||||||
|
this.classification = classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static H2020Classification newInstance(String programme_code, String programme_description, String level1,
|
||||||
|
String level2, String level3, String classification) {
|
||||||
|
H2020Classification h2020classification = new H2020Classification();
|
||||||
|
h2020classification.programme = Programme.newInstance(programme_code, programme_description);
|
||||||
|
h2020classification.level1 = level1;
|
||||||
|
h2020classification.level2 = level2;
|
||||||
|
h2020classification.level3 = level3;
|
||||||
|
h2020classification.classification = classification;
|
||||||
|
return h2020classification;
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,8 +4,6 @@ package eu.dnetlib.dhp.schema.dump.oaf.graph;
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.schema.dump.oaf.KeyValue;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the class representing the Project in the model used for the dumps of the whole graph. At the moment the dump
|
* This is the class representing the Project in the model used for the dumps of the whole graph. At the moment the dump
|
||||||
* of the Projects differs from the other dumps because we do not create relations between Funders (Organization) and
|
* of the Projects differs from the other dumps because we do not create relations between Funders (Organization) and
|
||||||
|
@ -33,7 +31,7 @@ import eu.dnetlib.dhp.schema.dump.oaf.KeyValue;
|
||||||
* - private List<Funder> funding to store the list of funder of the project
|
* - private List<Funder> funding to store the list of funder of the project
|
||||||
* - private String summary to store the summary of the project
|
* - private String summary to store the summary of the project
|
||||||
* - private Granted granted to store the granted amount
|
* - private Granted granted to store the granted amount
|
||||||
* - private List<Programme> programme to store the list of programmes the project is related to
|
* - private List<H2020Classification> h2020classification to store the list of H2020 classifications the project is related to
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public class Project implements Serializable {
|
public class Project implements Serializable {
|
||||||
|
@ -62,7 +60,7 @@ public class Project implements Serializable {
|
||||||
|
|
||||||
private Granted granted;
|
private Granted granted;
|
||||||
|
|
||||||
private List<Programme> programme;
|
private List<H2020Classification> h2020Classifications;
|
||||||
|
|
||||||
public String getId() {
|
public String getId() {
|
||||||
return id;
|
return id;
|
||||||
|
@ -184,12 +182,11 @@ public class Project implements Serializable {
|
||||||
this.granted = granted;
|
this.granted = granted;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Programme> getProgramme() {
|
public List<H2020Classification> getH2020Classifications() {
|
||||||
return programme;
|
return h2020Classifications;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setProgramme(List<Programme> programme) {
|
public void setH2020Classifications(List<H2020Classification> h2020Classifications) {
|
||||||
this.programme = programme;
|
this.h2020Classifications = h2020Classifications;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,88 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.schema.oaf;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To store information about the classification for the project. The classification depends on the programme. For example
|
||||||
|
* H2020-EU.3.4.5.3 can be classified as
|
||||||
|
* H2020-EU.3. => Societal Challenges (level1)
|
||||||
|
* H2020-EU.3.4. => Transport (level2)
|
||||||
|
* H2020-EU.3.4.5. => CLEANSKY2 (level3)
|
||||||
|
* H2020-EU.3.4.5.3. => IADP Fast Rotorcraft (level4)
|
||||||
|
*
|
||||||
|
* We decided to explicitly represent up to three levels in the classification.
|
||||||
|
*
|
||||||
|
* H2020Classification has the following parameters:
|
||||||
|
* - private Programme programme to store the information about the programme related to this classification
|
||||||
|
* - private String level1 to store the information about the level 1 of the classification (Priority or Pillar of the EC)
|
||||||
|
* - private String level2 to store the information about the level2 af the classification (Objectives (?))
|
||||||
|
* - private String level3 to store the information about the level3 of the classification
|
||||||
|
* - private String classification to store the entire classification related to the programme
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class H2020Classification implements Serializable {
|
||||||
|
private H2020Programme h2020Programme;
|
||||||
|
private String level1;
|
||||||
|
private String level2;
|
||||||
|
private String level3;
|
||||||
|
|
||||||
|
private String classification;
|
||||||
|
|
||||||
|
public H2020Programme getH2020Programme() {
|
||||||
|
return h2020Programme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setH2020Programme(H2020Programme h2020Programme) {
|
||||||
|
this.h2020Programme = h2020Programme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel1() {
|
||||||
|
return level1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel1(String level1) {
|
||||||
|
this.level1 = level1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel2() {
|
||||||
|
return level2;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel2(String level2) {
|
||||||
|
this.level2 = level2;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLevel3() {
|
||||||
|
return level3;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel3(String level3) {
|
||||||
|
this.level3 = level3;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getClassification() {
|
||||||
|
return classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClassification(String classification) {
|
||||||
|
this.classification = classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o)
|
||||||
|
return true;
|
||||||
|
if (o == null || getClass() != o.getClass())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
H2020Classification h2020classification = (H2020Classification) o;
|
||||||
|
|
||||||
|
return Objects.equals(level1, h2020classification.level1) &&
|
||||||
|
Objects.equals(level2, h2020classification.level2) &&
|
||||||
|
Objects.equals(level3, h2020classification.level3) &&
|
||||||
|
Objects.equals(classification, h2020classification.classification) &&
|
||||||
|
h2020Programme.equals(h2020classification.h2020Programme);
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,7 +4,13 @@ package eu.dnetlib.dhp.schema.oaf;
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
public class Programme implements Serializable {
|
/**
|
||||||
|
* To store information about the ec programme for the project. It has the following parameters:
|
||||||
|
* - private String code to store the code of the programme
|
||||||
|
* - private String description to store the description of the programme
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class H2020Programme implements Serializable {
|
||||||
private String code;
|
private String code;
|
||||||
private String description;
|
private String description;
|
||||||
|
|
||||||
|
@ -31,8 +37,8 @@ public class Programme implements Serializable {
|
||||||
if (o == null || getClass() != o.getClass())
|
if (o == null || getClass() != o.getClass())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
Programme programme = (Programme) o;
|
H2020Programme h2020Programme = (H2020Programme) o;
|
||||||
return Objects.equals(code, programme.code);
|
return Objects.equals(code, h2020Programme.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -58,7 +58,35 @@ public class Project extends OafEntity implements Serializable {
|
||||||
|
|
||||||
private Float fundedamount;
|
private Float fundedamount;
|
||||||
|
|
||||||
private List<Programme> programme;
|
private String h2020topiccode;
|
||||||
|
|
||||||
|
private String h2020topicdescription;
|
||||||
|
|
||||||
|
private List<H2020Classification> h2020classification;
|
||||||
|
|
||||||
|
public String getH2020topicdescription() {
|
||||||
|
return h2020topicdescription;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setH2020topicdescription(String h2020topicdescription) {
|
||||||
|
this.h2020topicdescription = h2020topicdescription;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getH2020topiccode() {
|
||||||
|
return h2020topiccode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setH2020topiccode(String h2020topiccode) {
|
||||||
|
this.h2020topiccode = h2020topiccode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<H2020Classification> getH2020classification() {
|
||||||
|
return h2020classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setH2020classification(List<H2020Classification> h2020classification) {
|
||||||
|
this.h2020classification = h2020classification;
|
||||||
|
}
|
||||||
|
|
||||||
public Field<String> getWebsiteurl() {
|
public Field<String> getWebsiteurl() {
|
||||||
return websiteurl;
|
return websiteurl;
|
||||||
|
@ -268,14 +296,6 @@ public class Project extends OafEntity implements Serializable {
|
||||||
this.fundedamount = fundedamount;
|
this.fundedamount = fundedamount;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Programme> getProgramme() {
|
|
||||||
return programme;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setProgramme(List<Programme> programme) {
|
|
||||||
this.programme = programme;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void mergeFrom(OafEntity e) {
|
public void mergeFrom(OafEntity e) {
|
||||||
super.mergeFrom(e);
|
super.mergeFrom(e);
|
||||||
|
@ -331,7 +351,9 @@ public class Project extends OafEntity implements Serializable {
|
||||||
? p.getFundedamount()
|
? p.getFundedamount()
|
||||||
: fundedamount;
|
: fundedamount;
|
||||||
|
|
||||||
programme = mergeLists(programme, p.getProgramme());
|
// programme = mergeLists(programme, p.getProgramme());
|
||||||
|
|
||||||
|
h2020classification = mergeLists(h2020classification, p.getH2020classification());
|
||||||
|
|
||||||
mergeOAFDataInfo(e);
|
mergeOAFDataInfo(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,8 @@ import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.schema.common.LicenseComparator;
|
||||||
|
|
||||||
public class Result extends OafEntity implements Serializable {
|
public class Result extends OafEntity implements Serializable {
|
||||||
|
|
||||||
private List<Measure> measures;
|
private List<Measure> measures;
|
||||||
|
@ -245,7 +247,8 @@ public class Result extends OafEntity implements Serializable {
|
||||||
|
|
||||||
instance = mergeLists(instance, r.getInstance());
|
instance = mergeLists(instance, r.getInstance());
|
||||||
|
|
||||||
if (r.getBestaccessright() != null && compareTrust(this, r) < 0)
|
if (r.getBestaccessright() != null
|
||||||
|
&& new LicenseComparator().compare(r.getBestaccessright(), bestaccessright) < 0)
|
||||||
bestaccessright = r.getBestaccessright();
|
bestaccessright = r.getBestaccessright();
|
||||||
|
|
||||||
if (r.getResulttype() != null && compareTrust(this, r) < 0)
|
if (r.getResulttype() != null && compareTrust(this, r) < 0)
|
||||||
|
|
|
@ -66,6 +66,19 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.apache.poi/poi-ooxml -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.poi</groupId>
|
||||||
|
<artifactId>poi-ooxml</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-compress -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-compress</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,13 @@ package eu.dnetlib.dhp.actionmanager.project;
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.sql.*;
|
import org.apache.spark.sql.*;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -16,11 +18,79 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Among all the programmes provided in the csv file, selects those in H2020 framework that have an english title.
|
||||||
|
*
|
||||||
|
* The title is then handled to get the programme description at a certain level. The set of programme titles will then
|
||||||
|
* be used to associate a classification for the programme.
|
||||||
|
*
|
||||||
|
* The programme code describes an hierarchy that can be exploited to provide the classification. To determine the hierarchy
|
||||||
|
* the code can be split by '.'. If the length of the splitted code is less than or equal to 2 it can be directly used
|
||||||
|
* as the classification: H2020-EU -> Horizon 2020 Framework Programme (It will never be repeated),
|
||||||
|
* H2020-EU.1. -> Excellent science, H2020-EU.2. -> Industrial leadership etc.
|
||||||
|
*
|
||||||
|
* The codes are ordered and for all of them the concatenation of all the titles (from the element in position 1 of
|
||||||
|
* the splitted code) handled as below is used to create the classification. For example:
|
||||||
|
*
|
||||||
|
* H2020-EU.1.1 -> Excellent science | European Research Council (ERC)
|
||||||
|
* from H2020-EU.1. -> Excellence science and H2020-EU.1.1. -> European Research Council (ERC)
|
||||||
|
*
|
||||||
|
* H2020-EU.3.1.3.1. -> Societal challenges | Health, demographic change and well-being | Treating and managing disease | Treating disease, including developing regenerative medicine
|
||||||
|
* from H2020-EU.3. -> Societal challenges,
|
||||||
|
* H2020-EU.3.1. -> Health, demographic change and well-being
|
||||||
|
* H2020-EU.3.1.3 -> Treating and managing disease
|
||||||
|
* H2020-EU.3.1.3.1. -> Treating disease, including developing regenerative medicine
|
||||||
|
*
|
||||||
|
* The classification up to level three, will be split in dedicated variables, while the complete classification will be stored
|
||||||
|
* in a variable called classification and provided as shown above.
|
||||||
|
*
|
||||||
|
* The programme title is not give in a standardized way:
|
||||||
|
*
|
||||||
|
* - Sometimes associated to the higher level in the hierarchy we can find Priority in title other times it is not the
|
||||||
|
* case. Since it is not uniform, we removed priority from the handled titles:
|
||||||
|
*
|
||||||
|
* H2020-EU.1. -> PRIORITY 'Excellent science'
|
||||||
|
* H2020-EU.2. -> PRIORITY 'Industrial leadership'
|
||||||
|
* H2020-EU.3. -> PRIORITY 'Societal challenges
|
||||||
|
*
|
||||||
|
* will become
|
||||||
|
*
|
||||||
|
* H2020-EU.1. -> Excellent science
|
||||||
|
* H2020-EU.2. -> Industrial leadership
|
||||||
|
* H2020-EU.3. -> Societal challenges
|
||||||
|
*
|
||||||
|
* - Sometimes the title of the parent is repeated in the title for the code, but it is not always the case, so, titles
|
||||||
|
* associated to previous levels in the hierarchy are removed from the code title.
|
||||||
|
*
|
||||||
|
* H2020-EU.1.2. -> EXCELLENT SCIENCE - Future and Emerging Technologies (FET)
|
||||||
|
* H2020-EU.2.2. -> INDUSTRIAL LEADERSHIP - Access to risk finance
|
||||||
|
* H2020-EU.3.4. -> SOCIETAL CHALLENGES - Smart, Green And Integrated Transport
|
||||||
|
*
|
||||||
|
* will become
|
||||||
|
*
|
||||||
|
* H2020-EU.1.2. -> Future and Emerging Technologies (FET)
|
||||||
|
* H2020-EU.2.2. -> Access to risk finance
|
||||||
|
* H2020-EU.3.4. -> Smart, Green And Integrated Transport
|
||||||
|
*
|
||||||
|
* This holds at all levels in the hierarchy. Hence
|
||||||
|
*
|
||||||
|
* H2020-EU.2.1.2. -> INDUSTRIAL LEADERSHIP - Leadership in enabling and industrial technologies – Nanotechnologies
|
||||||
|
*
|
||||||
|
* will become
|
||||||
|
*
|
||||||
|
* H2020-EU.2.1.2. -> Nanotechnologies
|
||||||
|
*
|
||||||
|
* - Euratom is not given in the way the other programmes are: H2020-EU. but H2020-Euratom- . So we need to write
|
||||||
|
* specific code for it
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*/
|
||||||
public class PrepareProgramme {
|
public class PrepareProgramme {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(PrepareProgramme.class);
|
private static final Logger log = LoggerFactory.getLogger(PrepareProgramme.class);
|
||||||
|
@ -69,49 +139,127 @@ public class PrepareProgramme {
|
||||||
private static void exec(SparkSession spark, String programmePath, String outputPath) {
|
private static void exec(SparkSession spark, String programmePath, String outputPath) {
|
||||||
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
|
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
|
||||||
|
|
||||||
programme
|
JavaRDD<CSVProgramme> h2020Programmes = programme
|
||||||
.toJavaRDD()
|
.toJavaRDD()
|
||||||
.filter(p -> !p.getCode().contains("FP7"))
|
.filter(p -> p.getFrameworkProgramme().trim().equalsIgnoreCase("H2020"))
|
||||||
.mapToPair(csvProgramme -> new Tuple2<>(csvProgramme.getCode(), csvProgramme))
|
.mapToPair(csvProgramme -> new Tuple2<>(csvProgramme.getCode(), csvProgramme))
|
||||||
.reduceByKey((a, b) -> {
|
.reduceByKey((a, b) -> {
|
||||||
if (StringUtils.isEmpty(a.getShortTitle())) {
|
if (!a.getLanguage().equals("en")) {
|
||||||
if (StringUtils.isEmpty(b.getShortTitle())) {
|
if (b.getLanguage().equalsIgnoreCase("en")) {
|
||||||
if (StringUtils.isEmpty(a.getTitle())) {
|
a.setTitle(b.getTitle());
|
||||||
if (StringUtils.isNotEmpty(b.getTitle())) {
|
a.setLanguage(b.getLanguage());
|
||||||
a.setShortTitle(b.getTitle());
|
|
||||||
a.setLanguage(b.getLanguage());
|
|
||||||
}
|
|
||||||
} else {// notIsEmpty a.getTitle
|
|
||||||
if (StringUtils.isEmpty(b.getTitle())) {
|
|
||||||
a.setShortTitle(a.getTitle());
|
|
||||||
} else {
|
|
||||||
if (b.getLanguage().equalsIgnoreCase("en")) {
|
|
||||||
a.setShortTitle(b.getTitle());
|
|
||||||
a.setLanguage(b.getLanguage());
|
|
||||||
} else {
|
|
||||||
a.setShortTitle(a.getTitle());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {// not isEmpty b.getShortTitle
|
|
||||||
a.setShortTitle(b.getShortTitle());
|
|
||||||
// a.setLanguage(b.getLanguage());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (StringUtils.isEmpty(a.getShortTitle())) {
|
||||||
|
if (!StringUtils.isEmpty(b.getShortTitle())) {
|
||||||
|
a.setShortTitle(b.getShortTitle());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return a;
|
return a;
|
||||||
|
|
||||||
})
|
})
|
||||||
.map(p -> {
|
.map(p -> {
|
||||||
CSVProgramme csvProgramme = p._2();
|
CSVProgramme csvProgramme = p._2();
|
||||||
if (StringUtils.isEmpty(csvProgramme.getShortTitle())) {
|
String programmeTitle = csvProgramme.getTitle().trim();
|
||||||
csvProgramme.setShortTitle(csvProgramme.getTitle());
|
if (programmeTitle.length() > 8 && programmeTitle.substring(0, 8).equalsIgnoreCase("PRIORITY")) {
|
||||||
|
programmeTitle = programmeTitle.substring(9);
|
||||||
|
if (programmeTitle.charAt(0) == '\'') {
|
||||||
|
programmeTitle = programmeTitle.substring(1);
|
||||||
|
}
|
||||||
|
if (programmeTitle.charAt(programmeTitle.length() - 1) == '\'') {
|
||||||
|
programmeTitle = programmeTitle.substring(0, programmeTitle.length() - 1);
|
||||||
|
}
|
||||||
|
csvProgramme.setTitle(programmeTitle);
|
||||||
}
|
}
|
||||||
return OBJECT_MAPPER.writeValueAsString(csvProgramme);
|
return csvProgramme;
|
||||||
})
|
});
|
||||||
|
|
||||||
|
prepareClassification(h2020Programmes);
|
||||||
|
|
||||||
|
h2020Programmes
|
||||||
|
.map(csvProgramme -> OBJECT_MAPPER.writeValueAsString(csvProgramme))
|
||||||
.saveAsTextFile(outputPath);
|
.saveAsTextFile(outputPath);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void prepareClassification(JavaRDD<CSVProgramme> h2020Programmes) {
|
||||||
|
Object[] codedescription = h2020Programmes
|
||||||
|
.map(value -> new Tuple2<>(value.getCode(), value.getTitle()))
|
||||||
|
.collect()
|
||||||
|
.toArray();
|
||||||
|
|
||||||
|
for (int i = 0; i < codedescription.length - 1; i++) {
|
||||||
|
for (int j = i + 1; j < codedescription.length; j++) {
|
||||||
|
Tuple2<String, String> t2i = (Tuple2<String, String>) codedescription[i];
|
||||||
|
Tuple2<String, String> t2j = (Tuple2<String, String>) codedescription[j];
|
||||||
|
if (t2i._1().compareTo(t2j._1()) > 0) {
|
||||||
|
Tuple2<String, String> temp = t2i;
|
||||||
|
codedescription[i] = t2j;
|
||||||
|
codedescription[j] = temp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String, String> map = new HashMap<>();
|
||||||
|
for (int j = 0; j < codedescription.length; j++) {
|
||||||
|
Tuple2<String, String> entry = (Tuple2<String, String>) codedescription[j];
|
||||||
|
String ent = entry._1();
|
||||||
|
if (ent.contains("Euratom-")) {
|
||||||
|
ent = ent.replace("-Euratom-", ".Euratom.");
|
||||||
|
}
|
||||||
|
String[] tmp = ent.split("\\.");
|
||||||
|
if (tmp.length <= 2) {
|
||||||
|
map.put(entry._1(), entry._2());
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if (ent.endsWith(".")) {
|
||||||
|
ent = ent.substring(0, ent.length() - 1);
|
||||||
|
}
|
||||||
|
String key = ent.substring(0, ent.lastIndexOf(".") + 1);
|
||||||
|
if (key.contains("Euratom")) {
|
||||||
|
key = key.replace(".Euratom.", "-Euratom-");
|
||||||
|
ent = ent.replace(".Euratom.", "-Euratom-");
|
||||||
|
if (key.endsWith("-")) {
|
||||||
|
key = key.substring(0, key.length() - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
String current = entry._2();
|
||||||
|
if (!ent.contains("Euratom")) {
|
||||||
|
|
||||||
|
String parent;
|
||||||
|
String tmp_key = tmp[0] + ".";
|
||||||
|
for (int i = 1; i < tmp.length - 1; i++) {
|
||||||
|
tmp_key += tmp[i] + ".";
|
||||||
|
parent = map.get(tmp_key).toLowerCase().trim();
|
||||||
|
if (parent.contains("|")) {
|
||||||
|
parent = parent.substring(parent.lastIndexOf("|") + 1).trim();
|
||||||
|
}
|
||||||
|
if (current.trim().length() > parent.length()
|
||||||
|
&& current.toLowerCase().trim().substring(0, parent.length()).equals(parent)) {
|
||||||
|
current = current.substring(parent.length() + 1);
|
||||||
|
if (current.trim().charAt(0) == '-' || current.trim().charAt(0) == '–') {
|
||||||
|
current = current.trim().substring(1).trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
map.put(ent + ".", map.get(key) + " | " + current);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
h2020Programmes.foreach(csvProgramme -> {
|
||||||
|
if (!csvProgramme.getCode().endsWith(".") && !csvProgramme.getCode().contains("Euratom")
|
||||||
|
&& !csvProgramme.getCode().equals("H2020-EC"))
|
||||||
|
csvProgramme.setClassification(map.get(csvProgramme.getCode() + "."));
|
||||||
|
else
|
||||||
|
csvProgramme.setClassification(map.get(csvProgramme.getCode()));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
public static <R> Dataset<R> readPath(
|
public static <R> Dataset<R> readPath(
|
||||||
SparkSession spark, String inputPath, Class<R> clazz) {
|
SparkSession spark, String inputPath, Class<R> clazz) {
|
||||||
return spark
|
return spark
|
||||||
|
|
|
@ -6,9 +6,7 @@ import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
|
||||||
import org.apache.spark.api.java.function.FlatMapFunction;
|
import org.apache.spark.api.java.function.FlatMapFunction;
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.sql.Dataset;
|
import org.apache.spark.sql.Dataset;
|
||||||
|
@ -20,12 +18,16 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProject;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects only the relevant information collected with the projects: project grant agreement, project programme code and
|
||||||
|
* project topic code for the projects that are also collected from OpenAIRE.
|
||||||
|
*/
|
||||||
public class PrepareProjects {
|
public class PrepareProjects {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(PrepareProgramme.class);
|
private static final Logger log = LoggerFactory.getLogger(PrepareProgramme.class);
|
||||||
|
@ -97,10 +99,14 @@ public class PrepareProjects {
|
||||||
if (csvProject.isPresent()) {
|
if (csvProject.isPresent()) {
|
||||||
|
|
||||||
String[] programme = csvProject.get().getProgramme().split(";");
|
String[] programme = csvProject.get().getProgramme().split(";");
|
||||||
|
String topic = csvProject.get().getTopics();
|
||||||
|
|
||||||
Arrays
|
Arrays
|
||||||
.stream(programme)
|
.stream(programme)
|
||||||
.forEach(p -> {
|
.forEach(p -> {
|
||||||
CSVProject proj = new CSVProject();
|
CSVProject proj = new CSVProject();
|
||||||
|
proj.setTopics(topic);
|
||||||
|
|
||||||
proj.setProgramme(p);
|
proj.setProgramme(p);
|
||||||
proj.setId(csvProject.get().getId());
|
proj.setId(csvProject.get().getId());
|
||||||
csvProjectList.add(proj);
|
csvProjectList.add(proj);
|
||||||
|
|
|
@ -3,6 +3,9 @@ package eu.dnetlib.dhp.actionmanager.project;
|
||||||
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class to store the grande agreement (code) of the collected projects
|
||||||
|
*/
|
||||||
public class ProjectSubset implements Serializable {
|
public class ProjectSubset implements Serializable {
|
||||||
|
|
||||||
private String code;
|
private String code;
|
||||||
|
@ -14,4 +17,5 @@ public class ProjectSubset implements Serializable {
|
||||||
public void setCode(String code) {
|
public void setCode(String code) {
|
||||||
this.code = code;
|
this.code = code;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,10 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.common.DbClient;
|
import eu.dnetlib.dhp.common.DbClient;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* queries the OpenAIRE database to get the grant agreement of projects collected from corda__h2020. The code collected
|
||||||
|
* are written on hdfs using the ProjectSubset model
|
||||||
|
*/
|
||||||
public class ReadProjectsFromDB implements Closeable {
|
public class ReadProjectsFromDB implements Closeable {
|
||||||
|
|
||||||
private final DbClient dbClient;
|
private final DbClient dbClient;
|
||||||
|
@ -33,7 +37,7 @@ public class ReadProjectsFromDB implements Closeable {
|
||||||
private final BufferedWriter writer;
|
private final BufferedWriter writer;
|
||||||
private final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
private final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||||
|
|
||||||
private final static String query = "SELECT code " +
|
private final static String query = "SELECT code " +
|
||||||
"from projects where id like 'corda__h2020%' ";
|
"from projects where id like 'corda__h2020%' ";
|
||||||
|
|
||||||
public static void main(final String[] args) throws Exception {
|
public static void main(final String[] args) throws Exception {
|
||||||
|
@ -72,7 +76,6 @@ public class ReadProjectsFromDB implements Closeable {
|
||||||
try {
|
try {
|
||||||
ProjectSubset p = new ProjectSubset();
|
ProjectSubset p = new ProjectSubset();
|
||||||
p.setCode(rs.getString("code"));
|
p.setCode(rs.getString("code"));
|
||||||
|
|
||||||
return Arrays.asList(p);
|
return Arrays.asList(p);
|
||||||
|
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
|
|
|
@ -3,47 +3,54 @@ package eu.dnetlib.dhp.actionmanager.project;
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.function.Consumer;
|
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.io.IntWritable;
|
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
||||||
import org.apache.hadoop.mapred.TextOutputFormat;
|
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.function.MapFunction;
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.api.java.function.MapGroupsFunction;
|
import org.apache.spark.api.java.function.MapGroupsFunction;
|
||||||
import org.apache.spark.rdd.SequenceFileRDDFunctions;
|
|
||||||
import org.apache.spark.sql.Dataset;
|
import org.apache.spark.sql.Dataset;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProject;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.EXCELTopic;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import eu.dnetlib.dhp.common.HdfsSupport;
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
import eu.dnetlib.dhp.schema.oaf.Programme;
|
import eu.dnetlib.dhp.schema.oaf.H2020Classification;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.H2020Programme;
|
||||||
import eu.dnetlib.dhp.schema.oaf.Project;
|
import eu.dnetlib.dhp.schema.oaf.Project;
|
||||||
import eu.dnetlib.dhp.utils.DHPUtils;
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
||||||
import scala.Function1;
|
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
import scala.runtime.BoxedUnit;
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class that makes the ActionSet. To prepare the AS two joins are needed
|
||||||
|
*
|
||||||
|
* 1. join betweem the collected project subset and the programme extenden with the classification on the grant agreement.
|
||||||
|
* For each entry a
|
||||||
|
* eu.dnetlib.dhp.Project entity is created and the information about H2020Classification is set together with the
|
||||||
|
* h2020topiccode variable
|
||||||
|
* 2. join between the output of the previous step and the topic information on the topic code. Each time a match is
|
||||||
|
* found the h2020topicdescription variable is set.
|
||||||
|
*
|
||||||
|
* To produce one single entry for each project code a step of groupoing is needed: each project can be associated to more
|
||||||
|
* than one programme.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*/
|
||||||
public class SparkAtomicActionJob {
|
public class SparkAtomicActionJob {
|
||||||
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class);
|
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class);
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||||
|
@ -77,6 +84,9 @@ public class SparkAtomicActionJob {
|
||||||
final String programmePath = parser.get("programmePath");
|
final String programmePath = parser.get("programmePath");
|
||||||
log.info("programmePath {}: ", programmePath);
|
log.info("programmePath {}: ", programmePath);
|
||||||
|
|
||||||
|
final String topicPath = parser.get("topicPath");
|
||||||
|
log.info("topic path {}: ", topicPath);
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
SparkConf conf = new SparkConf();
|
||||||
|
|
||||||
runWithSparkSession(
|
runWithSparkSession(
|
||||||
|
@ -88,6 +98,7 @@ public class SparkAtomicActionJob {
|
||||||
spark,
|
spark,
|
||||||
projectPath,
|
projectPath,
|
||||||
programmePath,
|
programmePath,
|
||||||
|
topicPath,
|
||||||
outputPath);
|
outputPath);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -98,31 +109,53 @@ public class SparkAtomicActionJob {
|
||||||
|
|
||||||
private static void getAtomicActions(SparkSession spark, String projectPatH,
|
private static void getAtomicActions(SparkSession spark, String projectPatH,
|
||||||
String programmePath,
|
String programmePath,
|
||||||
|
String topicPath,
|
||||||
String outputPath) {
|
String outputPath) {
|
||||||
|
|
||||||
Dataset<CSVProject> project = readPath(spark, projectPatH, CSVProject.class);
|
Dataset<CSVProject> project = readPath(spark, projectPatH, CSVProject.class);
|
||||||
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
|
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
|
||||||
|
Dataset<EXCELTopic> topic = readPath(spark, topicPath, EXCELTopic.class);
|
||||||
|
|
||||||
project
|
Dataset<Project> aaproject = project
|
||||||
.joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left")
|
.joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left")
|
||||||
.map(c -> {
|
.map((MapFunction<Tuple2<CSVProject, CSVProgramme>, Project>) c -> {
|
||||||
CSVProject csvProject = c._1();
|
|
||||||
Optional<CSVProgramme> csvProgramme = Optional.ofNullable(c._2());
|
|
||||||
if (csvProgramme.isPresent()) {
|
|
||||||
Project p = new Project();
|
|
||||||
p
|
|
||||||
.setId(
|
|
||||||
createOpenaireId(
|
|
||||||
ModelSupport.entityIdPrefix.get("project"),
|
|
||||||
"corda__h2020", csvProject.getId()));
|
|
||||||
Programme pm = new Programme();
|
|
||||||
pm.setCode(csvProject.getProgramme());
|
|
||||||
pm.setDescription(csvProgramme.get().getShortTitle());
|
|
||||||
p.setProgramme(Arrays.asList(pm));
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
return null;
|
CSVProject csvProject = c._1();
|
||||||
|
Optional<CSVProgramme> ocsvProgramme = Optional.ofNullable(c._2());
|
||||||
|
|
||||||
|
return Optional
|
||||||
|
.ofNullable(c._2())
|
||||||
|
.map(csvProgramme -> {
|
||||||
|
Project pp = new Project();
|
||||||
|
pp
|
||||||
|
.setId(
|
||||||
|
createOpenaireId(
|
||||||
|
ModelSupport.entityIdPrefix.get("project"),
|
||||||
|
"corda__h2020", csvProject.getId()));
|
||||||
|
pp.setH2020topiccode(csvProject.getTopics());
|
||||||
|
H2020Programme pm = new H2020Programme();
|
||||||
|
H2020Classification h2020classification = new H2020Classification();
|
||||||
|
pm.setCode(csvProject.getProgramme());
|
||||||
|
h2020classification.setClassification(ocsvProgramme.get().getClassification());
|
||||||
|
h2020classification.setH2020Programme(pm);
|
||||||
|
setLevelsAndProgramme(h2020classification, ocsvProgramme.get().getClassification());
|
||||||
|
pp.setH2020classification(Arrays.asList(h2020classification));
|
||||||
|
|
||||||
|
return pp;
|
||||||
|
})
|
||||||
|
.orElse(null);
|
||||||
|
|
||||||
|
}, Encoders.bean(Project.class));
|
||||||
|
|
||||||
|
aaproject
|
||||||
|
.joinWith(topic, aaproject.col("h2020topiccode").equalTo(topic.col("code")))
|
||||||
|
.map((MapFunction<Tuple2<Project, EXCELTopic>, Project>) p -> {
|
||||||
|
Optional<EXCELTopic> op = Optional.ofNullable(p._2());
|
||||||
|
Project rp = p._1();
|
||||||
|
if (op.isPresent()) {
|
||||||
|
rp.setH2020topicdescription(op.get().getTitle());
|
||||||
|
}
|
||||||
|
return rp;
|
||||||
}, Encoders.bean(Project.class))
|
}, Encoders.bean(Project.class))
|
||||||
.filter(Objects::nonNull)
|
.filter(Objects::nonNull)
|
||||||
.groupByKey(
|
.groupByKey(
|
||||||
|
@ -144,6 +177,18 @@ public class SparkAtomicActionJob {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void setLevelsAndProgramme(H2020Classification h2020Classification, String classification) {
|
||||||
|
String[] tmp = classification.split(" \\| ");
|
||||||
|
h2020Classification.setLevel1(tmp[0]);
|
||||||
|
if (tmp.length > 1) {
|
||||||
|
h2020Classification.setLevel2(tmp[1]);
|
||||||
|
}
|
||||||
|
if (tmp.length > 2) {
|
||||||
|
h2020Classification.setLevel3(tmp[2]);
|
||||||
|
}
|
||||||
|
h2020Classification.getH2020Programme().setDescription(tmp[tmp.length - 1]);
|
||||||
|
}
|
||||||
|
|
||||||
public static <R> Dataset<R> readPath(
|
public static <R> Dataset<R> readPath(
|
||||||
SparkSession spark, String inputPath, Class<R> clazz) {
|
SparkSession spark, String inputPath, Class<R> clazz) {
|
||||||
return spark
|
return spark
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project.csvutils;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
public class CSVProgramme implements Serializable {
|
|
||||||
private String rcn;
|
|
||||||
private String code;
|
|
||||||
private String title;
|
|
||||||
private String shortTitle;
|
|
||||||
private String language;
|
|
||||||
|
|
||||||
public String getRcn() {
|
|
||||||
return rcn;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRcn(String rcn) {
|
|
||||||
this.rcn = rcn;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getCode() {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setCode(String code) {
|
|
||||||
this.code = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getTitle() {
|
|
||||||
return title;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTitle(String title) {
|
|
||||||
this.title = title;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getShortTitle() {
|
|
||||||
return shortTitle;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setShortTitle(String shortTitle) {
|
|
||||||
this.shortTitle = shortTitle;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLanguage() {
|
|
||||||
return language;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setLanguage(String language) {
|
|
||||||
this.language = language;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,5 +1,5 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project.csvutils;
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -10,6 +10,9 @@ import org.apache.commons.csv.CSVFormat;
|
||||||
import org.apache.commons.csv.CSVRecord;
|
import org.apache.commons.csv.CSVRecord;
|
||||||
import org.apache.commons.lang.reflect.FieldUtils;
|
import org.apache.commons.lang.reflect.FieldUtils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads a generic csv and maps it into classes that mirror its schema
|
||||||
|
*/
|
||||||
public class CSVParser {
|
public class CSVParser {
|
||||||
|
|
||||||
public <R> List<R> parse(String csvFile, String classForName)
|
public <R> List<R> parse(String csvFile, String classForName)
|
|
@ -0,0 +1,137 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The model for the programme csv file
|
||||||
|
*/
|
||||||
|
public class CSVProgramme implements Serializable {
|
||||||
|
private String parentProgramme;
|
||||||
|
private String frameworkProgramme;
|
||||||
|
private String startDate;
|
||||||
|
private String endDate;
|
||||||
|
private String objective;
|
||||||
|
private String subjects;
|
||||||
|
private String legalBasis;
|
||||||
|
private String call;
|
||||||
|
private String rcn;
|
||||||
|
private String code;
|
||||||
|
|
||||||
|
private String title;
|
||||||
|
private String shortTitle;
|
||||||
|
private String language;
|
||||||
|
private String classification;
|
||||||
|
|
||||||
|
public String getClassification() {
|
||||||
|
return classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClassification(String classification) {
|
||||||
|
this.classification = classification;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRcn() {
|
||||||
|
return rcn;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setRcn(String rcn) {
|
||||||
|
this.rcn = rcn;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCode() {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCode(String code) {
|
||||||
|
this.code = code;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getTitle() {
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTitle(String title) {
|
||||||
|
this.title = title;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getShortTitle() {
|
||||||
|
return shortTitle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setShortTitle(String shortTitle) {
|
||||||
|
this.shortTitle = shortTitle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLanguage() {
|
||||||
|
return language;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLanguage(String language) {
|
||||||
|
this.language = language;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getParentProgramme() {
|
||||||
|
return parentProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setParentProgramme(String parentProgramme) {
|
||||||
|
this.parentProgramme = parentProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getFrameworkProgramme() {
|
||||||
|
return frameworkProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFrameworkProgramme(String frameworkProgramme) {
|
||||||
|
this.frameworkProgramme = frameworkProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getStartDate() {
|
||||||
|
return startDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStartDate(String startDate) {
|
||||||
|
this.startDate = startDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getEndDate() {
|
||||||
|
return endDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setEndDate(String endDate) {
|
||||||
|
this.endDate = endDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getObjective() {
|
||||||
|
return objective;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setObjective(String objective) {
|
||||||
|
this.objective = objective;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getSubjects() {
|
||||||
|
return subjects;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSubjects(String subjects) {
|
||||||
|
this.subjects = subjects;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLegalBasis() {
|
||||||
|
return legalBasis;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLegalBasis(String legalBasis) {
|
||||||
|
this.legalBasis = legalBasis;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCall() {
|
||||||
|
return call;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCall(String call) {
|
||||||
|
this.call = call;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,8 +1,11 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project.csvutils;
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the mmodel for the projects csv file
|
||||||
|
*/
|
||||||
public class CSVProject implements Serializable {
|
public class CSVProject implements Serializable {
|
||||||
private String rcn;
|
private String rcn;
|
||||||
private String id;
|
private String id;
|
|
@ -0,0 +1,75 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.apache.commons.lang.reflect.FieldUtils;
|
||||||
|
import org.apache.poi.openxml4j.exceptions.InvalidFormatException;
|
||||||
|
import org.apache.poi.openxml4j.opc.OPCPackage;
|
||||||
|
import org.apache.poi.ss.usermodel.Cell;
|
||||||
|
import org.apache.poi.ss.usermodel.DataFormatter;
|
||||||
|
import org.apache.poi.ss.usermodel.Row;
|
||||||
|
import org.apache.poi.xssf.usermodel.XSSFSheet;
|
||||||
|
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads a generic excel file and maps it into classes that mirror its schema
|
||||||
|
*/
|
||||||
|
public class EXCELParser {
|
||||||
|
|
||||||
|
public <R> List<R> parse(InputStream file, String classForName)
|
||||||
|
throws ClassNotFoundException, IOException, IllegalAccessException, InstantiationException,
|
||||||
|
InvalidFormatException {
|
||||||
|
|
||||||
|
// OPCPackage pkg = OPCPackage.open(httpConnector.getInputSourceAsStream(URL));
|
||||||
|
OPCPackage pkg = OPCPackage.open(file);
|
||||||
|
XSSFWorkbook wb = new XSSFWorkbook(pkg);
|
||||||
|
|
||||||
|
XSSFSheet sheet = wb.getSheet("cordisref-H2020topics");
|
||||||
|
|
||||||
|
List<R> ret = new ArrayList<>();
|
||||||
|
|
||||||
|
DataFormatter dataFormatter = new DataFormatter();
|
||||||
|
Iterator<Row> rowIterator = sheet.rowIterator();
|
||||||
|
List<String> headers = new ArrayList<>();
|
||||||
|
int count = 0;
|
||||||
|
while (rowIterator.hasNext()) {
|
||||||
|
Row row = rowIterator.next();
|
||||||
|
|
||||||
|
if (count == 0) {
|
||||||
|
Iterator<Cell> cellIterator = row.cellIterator();
|
||||||
|
|
||||||
|
while (cellIterator.hasNext()) {
|
||||||
|
Cell cell = cellIterator.next();
|
||||||
|
headers.add(dataFormatter.formatCellValue(cell));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Class<?> clazz = Class.forName("eu.dnetlib.dhp.actionmanager.project.utils.EXCELTopic");
|
||||||
|
final Object cc = clazz.newInstance();
|
||||||
|
|
||||||
|
for (int i = 0; i < headers.size(); i++) {
|
||||||
|
Cell cell = row.getCell(i);
|
||||||
|
String value = dataFormatter.formatCellValue(cell);
|
||||||
|
FieldUtils.writeField(cc, headers.get(i), dataFormatter.formatCellValue(cell), true);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
EXCELTopic et = (EXCELTopic) cc;
|
||||||
|
if (StringUtils.isNotBlank(et.getRcn())) {
|
||||||
|
ret.add((R) cc);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the model class for the topic excel file
|
||||||
|
*/
|
||||||
|
public class EXCELTopic implements Serializable {
|
||||||
|
private String rcn;
|
||||||
|
private String language;
|
||||||
|
private String code;
|
||||||
|
private String parentProgramme;
|
||||||
|
private String frameworkProgramme;
|
||||||
|
private String startDate;
|
||||||
|
private String endDate;
|
||||||
|
private String title;
|
||||||
|
private String shortTitle;
|
||||||
|
private String objective;
|
||||||
|
private String subjects;
|
||||||
|
private String legalBasis;
|
||||||
|
private String call;
|
||||||
|
|
||||||
|
public String getRcn() {
|
||||||
|
return rcn;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setRcn(String rcn) {
|
||||||
|
this.rcn = rcn;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLanguage() {
|
||||||
|
return language;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLanguage(String language) {
|
||||||
|
this.language = language;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCode() {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCode(String code) {
|
||||||
|
this.code = code;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getParentProgramme() {
|
||||||
|
return parentProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setParentProgramme(String parentProgramme) {
|
||||||
|
this.parentProgramme = parentProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getFrameworkProgramme() {
|
||||||
|
return frameworkProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFrameworkProgramme(String frameworkProgramme) {
|
||||||
|
this.frameworkProgramme = frameworkProgramme;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getStartDate() {
|
||||||
|
return startDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStartDate(String startDate) {
|
||||||
|
this.startDate = startDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getEndDate() {
|
||||||
|
return endDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setEndDate(String endDate) {
|
||||||
|
this.endDate = endDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getTitle() {
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTitle(String title) {
|
||||||
|
this.title = title;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getShortTitle() {
|
||||||
|
return shortTitle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setShortTitle(String shortTitle) {
|
||||||
|
this.shortTitle = shortTitle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getObjective() {
|
||||||
|
return objective;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setObjective(String objective) {
|
||||||
|
this.objective = objective;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getSubjects() {
|
||||||
|
return subjects;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSubjects(String subjects) {
|
||||||
|
this.subjects = subjects;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLegalBasis() {
|
||||||
|
return legalBasis;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLegalBasis(String legalBasis) {
|
||||||
|
this.legalBasis = legalBasis;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCall() {
|
||||||
|
return call;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCall(String call) {
|
||||||
|
this.call = call;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project.csvutils;
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
import java.io.BufferedWriter;
|
import java.io.BufferedWriter;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -20,6 +20,9 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.httpconnector.HttpConnector;
|
import eu.dnetlib.dhp.actionmanager.project.httpconnector.HttpConnector;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Applies the parsing of a csv file and writes the Serialization of it in hdfs
|
||||||
|
*/
|
||||||
public class ReadCSV implements Closeable {
|
public class ReadCSV implements Closeable {
|
||||||
private static final Log log = LogFactory.getLog(ReadCSV.class);
|
private static final Log log = LogFactory.getLog(ReadCSV.class);
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
|
@ -0,0 +1,98 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.actionmanager.project.utils;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.httpconnector.HttpConnector;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Applies the parsing of an excel file and writes the Serialization of it in hdfs
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class ReadExcel implements Closeable {
|
||||||
|
private static final Log log = LogFactory.getLog(ReadCSV.class);
|
||||||
|
private final Configuration conf;
|
||||||
|
private final BufferedWriter writer;
|
||||||
|
private final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||||
|
private InputStream excelFile;
|
||||||
|
|
||||||
|
public static void main(final String[] args) throws Exception {
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||||
|
IOUtils
|
||||||
|
.toString(
|
||||||
|
ReadCSV.class
|
||||||
|
.getResourceAsStream(
|
||||||
|
"/eu/dnetlib/dhp/actionmanager/project/parameters.json")));
|
||||||
|
|
||||||
|
parser.parseArgument(args);
|
||||||
|
|
||||||
|
final String fileURL = parser.get("fileURL");
|
||||||
|
final String hdfsPath = parser.get("hdfsPath");
|
||||||
|
final String hdfsNameNode = parser.get("hdfsNameNode");
|
||||||
|
final String classForName = parser.get("classForName");
|
||||||
|
|
||||||
|
try (final ReadExcel readExcel = new ReadExcel(hdfsPath, hdfsNameNode, fileURL)) {
|
||||||
|
|
||||||
|
log.info("Getting Excel file...");
|
||||||
|
readExcel.execute(classForName);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void execute(final String classForName) throws Exception {
|
||||||
|
EXCELParser excelParser = new EXCELParser();
|
||||||
|
excelParser
|
||||||
|
.parse(excelFile, classForName)
|
||||||
|
.stream()
|
||||||
|
.forEach(p -> write(p));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
writer.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
public ReadExcel(
|
||||||
|
final String hdfsPath,
|
||||||
|
final String hdfsNameNode,
|
||||||
|
final String fileURL)
|
||||||
|
throws Exception {
|
||||||
|
this.conf = new Configuration();
|
||||||
|
this.conf.set("fs.defaultFS", hdfsNameNode);
|
||||||
|
HttpConnector httpConnector = new HttpConnector();
|
||||||
|
FileSystem fileSystem = FileSystem.get(this.conf);
|
||||||
|
Path hdfsWritePath = new Path(hdfsPath);
|
||||||
|
FSDataOutputStream fsDataOutputStream = null;
|
||||||
|
if (fileSystem.exists(hdfsWritePath)) {
|
||||||
|
fileSystem.delete(hdfsWritePath, false);
|
||||||
|
}
|
||||||
|
fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
||||||
|
|
||||||
|
this.writer = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
|
||||||
|
this.excelFile = httpConnector.getInputSourceAsStream(fileURL);
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void write(final Object p) {
|
||||||
|
try {
|
||||||
|
writer.write(OBJECT_MAPPER.writeValueAsString(p));
|
||||||
|
writer.newLine();
|
||||||
|
} catch (final Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -17,6 +17,12 @@
|
||||||
"paramDescription": "the URL from where to get the programme file",
|
"paramDescription": "the URL from where to get the programme file",
|
||||||
"paramRequired": true
|
"paramRequired": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"paramName": "tp",
|
||||||
|
"paramLongName": "topicPath",
|
||||||
|
"paramDescription": "the URL from where to get the topic file",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"paramName": "o",
|
"paramName": "o",
|
||||||
"paramLongName": "outputPath",
|
"paramLongName": "outputPath",
|
||||||
|
|
|
@ -31,6 +31,10 @@
|
||||||
<name>spark2SqlQueryExecutionListeners</name>
|
<name>spark2SqlQueryExecutionListeners</name>
|
||||||
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapreduce.user.classpath.first</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>sparkExecutorNumber</name>
|
<name>sparkExecutorNumber</name>
|
||||||
<value>4</value>
|
<value>4</value>
|
||||||
|
|
|
@ -10,6 +10,10 @@
|
||||||
<description>the url where to get the programme file</description>
|
<description>the url where to get the programme file</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>topicFileURL</name>
|
||||||
|
<description>the url where to get the topic file</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>outputPath</name>
|
<name>outputPath</name>
|
||||||
<description>path where to store the action set</description>
|
<description>path where to store the action set</description>
|
||||||
|
@ -33,11 +37,11 @@
|
||||||
|
|
||||||
<action name="get_project_file">
|
<action name="get_project_file">
|
||||||
<java>
|
<java>
|
||||||
<main-class>eu.dnetlib.dhp.actionmanager.project.csvutils.ReadCSV</main-class>
|
<main-class>eu.dnetlib.dhp.actionmanager.project.utils.ReadCSV</main-class>
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
<arg>--fileURL</arg><arg>${projectFileURL}</arg>
|
<arg>--fileURL</arg><arg>${projectFileURL}</arg>
|
||||||
<arg>--hdfsPath</arg><arg>${workingDir}/projects</arg>
|
<arg>--hdfsPath</arg><arg>${workingDir}/projects</arg>
|
||||||
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject</arg>
|
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.utils.CSVProject</arg>
|
||||||
</java>
|
</java>
|
||||||
<ok to="get_programme_file"/>
|
<ok to="get_programme_file"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -45,11 +49,23 @@
|
||||||
|
|
||||||
<action name="get_programme_file">
|
<action name="get_programme_file">
|
||||||
<java>
|
<java>
|
||||||
<main-class>eu.dnetlib.dhp.actionmanager.project.csvutils.ReadCSV</main-class>
|
<main-class>eu.dnetlib.dhp.actionmanager.project.utils.ReadCSV</main-class>
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
<arg>--fileURL</arg><arg>${programmeFileURL}</arg>
|
<arg>--fileURL</arg><arg>${programmeFileURL}</arg>
|
||||||
<arg>--hdfsPath</arg><arg>${workingDir}/programme</arg>
|
<arg>--hdfsPath</arg><arg>${workingDir}/programme</arg>
|
||||||
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme</arg>
|
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme</arg>
|
||||||
|
</java>
|
||||||
|
<ok to="get_topic_file"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="get_topic_file">
|
||||||
|
<java>
|
||||||
|
<main-class>eu.dnetlib.dhp.actionmanager.project.utils.ReadExcel</main-class>
|
||||||
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
|
<arg>--fileURL</arg><arg>${topicFileURL}</arg>
|
||||||
|
<arg>--hdfsPath</arg><arg>${workingDir}/topic</arg>
|
||||||
|
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.utils.EXCELTopic</arg>
|
||||||
</java>
|
</java>
|
||||||
<ok to="read_projects"/>
|
<ok to="read_projects"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -136,6 +152,7 @@
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--projectPath</arg><arg>${workingDir}/preparedProjects</arg>
|
<arg>--projectPath</arg><arg>${workingDir}/preparedProjects</arg>
|
||||||
<arg>--programmePath</arg><arg>${workingDir}/preparedProgramme</arg>
|
<arg>--programmePath</arg><arg>${workingDir}/preparedProgramme</arg>
|
||||||
|
<arg>--topicPath</arg><arg>${workingDir}/topic</arg>
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
|
|
|
@ -1,27 +1,16 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.actionmanager.project;
|
package eu.dnetlib.dhp.actionmanager.project;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
import org.junit.jupiter.api.Assertions;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVParser;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVParser;
|
||||||
|
|
||||||
public class CSVParserTest {
|
public class CSVParserTest {
|
||||||
|
|
||||||
private static Path workingDir;
|
|
||||||
|
|
||||||
@BeforeAll
|
|
||||||
public static void beforeAll() throws IOException {
|
|
||||||
workingDir = Files.createTempDirectory(CSVParserTest.class.getSimpleName());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void readProgrammeTest() throws Exception {
|
public void readProgrammeTest() throws Exception {
|
||||||
|
|
||||||
|
@ -33,9 +22,10 @@ public class CSVParserTest {
|
||||||
|
|
||||||
CSVParser csvParser = new CSVParser();
|
CSVParser csvParser = new CSVParser();
|
||||||
|
|
||||||
List<Object> pl = csvParser.parse(programmecsv, "eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme");
|
List<Object> pl = csvParser.parse(programmecsv, "eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme");
|
||||||
|
|
||||||
System.out.println(pl.size());
|
Assertions.assertEquals(24, pl.size());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.actionmanager.project;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.poi.openxml4j.exceptions.InvalidFormatException;
|
||||||
|
import org.junit.jupiter.api.Assertions;
|
||||||
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.httpconnector.CollectorServiceException;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.httpconnector.HttpConnector;
|
||||||
|
import eu.dnetlib.dhp.actionmanager.project.utils.EXCELParser;
|
||||||
|
|
||||||
|
public class EXCELParserTest {
|
||||||
|
|
||||||
|
private static Path workingDir;
|
||||||
|
private HttpConnector httpConnector = new HttpConnector();
|
||||||
|
private static final String URL = "http://cordis.europa.eu/data/reference/cordisref-H2020topics.xlsx";
|
||||||
|
|
||||||
|
@BeforeAll
|
||||||
|
public static void beforeAll() throws IOException {
|
||||||
|
workingDir = Files.createTempDirectory(CSVParserTest.class.getSimpleName());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test1() throws CollectorServiceException, IOException, InvalidFormatException, ClassNotFoundException,
|
||||||
|
IllegalAccessException, InstantiationException {
|
||||||
|
|
||||||
|
EXCELParser excelParser = new EXCELParser();
|
||||||
|
|
||||||
|
List<Object> pl = excelParser
|
||||||
|
.parse(httpConnector.getInputSourceAsStream(URL), "eu.dnetlib.dhp.actionmanager.project.utils.ExcelTopic");
|
||||||
|
|
||||||
|
Assertions.assertEquals(3837, pl.size());
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,29 +21,29 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme;
|
||||||
|
|
||||||
public class PrepareProgrammeTest {
|
public class PrepareH2020ProgrammeTest {
|
||||||
|
|
||||||
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
||||||
|
|
||||||
private static final ClassLoader cl = eu.dnetlib.dhp.actionmanager.project.PrepareProgrammeTest.class
|
private static final ClassLoader cl = PrepareH2020ProgrammeTest.class
|
||||||
.getClassLoader();
|
.getClassLoader();
|
||||||
|
|
||||||
private static SparkSession spark;
|
private static SparkSession spark;
|
||||||
|
|
||||||
private static Path workingDir;
|
private static Path workingDir;
|
||||||
private static final Logger log = LoggerFactory
|
private static final Logger log = LoggerFactory
|
||||||
.getLogger(eu.dnetlib.dhp.actionmanager.project.PrepareProgrammeTest.class);
|
.getLogger(PrepareH2020ProgrammeTest.class);
|
||||||
|
|
||||||
@BeforeAll
|
@BeforeAll
|
||||||
public static void beforeAll() throws IOException {
|
public static void beforeAll() throws IOException {
|
||||||
workingDir = Files
|
workingDir = Files
|
||||||
.createTempDirectory(eu.dnetlib.dhp.actionmanager.project.PrepareProgrammeTest.class.getSimpleName());
|
.createTempDirectory(PrepareH2020ProgrammeTest.class.getSimpleName());
|
||||||
log.info("using work dir {}", workingDir);
|
log.info("using work dir {}", workingDir);
|
||||||
|
|
||||||
SparkConf conf = new SparkConf();
|
SparkConf conf = new SparkConf();
|
||||||
conf.setAppName(eu.dnetlib.dhp.actionmanager.project.PrepareProgrammeTest.class.getSimpleName());
|
conf.setAppName(PrepareH2020ProgrammeTest.class.getSimpleName());
|
||||||
|
|
||||||
conf.setMaster("local[*]");
|
conf.setMaster("local[*]");
|
||||||
conf.set("spark.driver.host", "localhost");
|
conf.set("spark.driver.host", "localhost");
|
||||||
|
@ -54,7 +54,7 @@ public class PrepareProgrammeTest {
|
||||||
|
|
||||||
spark = SparkSession
|
spark = SparkSession
|
||||||
.builder()
|
.builder()
|
||||||
.appName(PrepareProgrammeTest.class.getSimpleName())
|
.appName(PrepareH2020ProgrammeTest.class.getSimpleName())
|
||||||
.config(conf)
|
.config(conf)
|
||||||
.getOrCreate();
|
.getOrCreate();
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,60 @@ public class PrepareProgrammeTest {
|
||||||
|
|
||||||
Dataset<CSVProgramme> verificationDataset = spark.createDataset(tmp.rdd(), Encoders.bean(CSVProgramme.class));
|
Dataset<CSVProgramme> verificationDataset = spark.createDataset(tmp.rdd(), Encoders.bean(CSVProgramme.class));
|
||||||
|
|
||||||
Assertions.assertEquals(0, verificationDataset.filter("shortTitle =''").count());
|
Assertions.assertEquals(0, verificationDataset.filter("title =''").count());
|
||||||
|
|
||||||
|
Assertions.assertEquals(0, verificationDataset.filter("classification = ''").count());
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Societal challenges | Smart, Green And Integrated Transport | CLEANSKY2 | IADP Fast Rotorcraft",
|
||||||
|
verificationDataset
|
||||||
|
.filter("code = 'H2020-EU.3.4.5.3.'")
|
||||||
|
.select("classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Euratom | Indirect actions | European Fusion Development Agreement",
|
||||||
|
verificationDataset
|
||||||
|
.filter("code = 'H2020-Euratom-1.9.'")
|
||||||
|
.select("classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Industrial leadership | Leadership in enabling and industrial technologies | Advanced manufacturing and processing | New sustainable business models",
|
||||||
|
verificationDataset
|
||||||
|
.filter("code = 'H2020-EU.2.1.5.4.'")
|
||||||
|
.select("classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Excellent science | Future and Emerging Technologies (FET) | FET Open",
|
||||||
|
verificationDataset
|
||||||
|
.filter("code = 'H2020-EU.1.2.1.'")
|
||||||
|
.select("classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Industrial leadership | Leadership in enabling and industrial technologies | Biotechnology",
|
||||||
|
verificationDataset
|
||||||
|
.filter("code = 'H2020-EU.2.1.4.'")
|
||||||
|
.select("classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -21,8 +21,7 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProject;
|
||||||
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject;
|
|
||||||
|
|
||||||
public class PrepareProjectTest {
|
public class PrepareProjectTest {
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,16 @@ package eu.dnetlib.dhp.actionmanager.project;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.JavaRDD;
|
import org.apache.spark.api.java.JavaRDD;
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
|
import org.apache.spark.sql.Dataset;
|
||||||
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.Row;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
import org.junit.jupiter.api.AfterAll;
|
import org.junit.jupiter.api.AfterAll;
|
||||||
import org.junit.jupiter.api.Assertions;
|
import org.junit.jupiter.api.Assertions;
|
||||||
|
@ -73,10 +77,13 @@ public class SparkUpdateProjectTest {
|
||||||
Boolean.FALSE.toString(),
|
Boolean.FALSE.toString(),
|
||||||
"-programmePath",
|
"-programmePath",
|
||||||
getClass()
|
getClass()
|
||||||
.getResource("/eu/dnetlib/dhp/actionmanager/project/preparedProgramme_whole.json.gz")
|
.getResource(
|
||||||
|
"/eu/dnetlib/dhp/actionmanager/project/preparedProgramme_classification_whole.json.gz")
|
||||||
.getPath(),
|
.getPath(),
|
||||||
"-projectPath",
|
"-projectPath",
|
||||||
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/prepared_projects.json").getPath(),
|
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/prepared_projects.json").getPath(),
|
||||||
|
"-topicPath",
|
||||||
|
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/topic.json.gz").getPath(),
|
||||||
"-outputPath",
|
"-outputPath",
|
||||||
workingDir.toString() + "/actionSet"
|
workingDir.toString() + "/actionSet"
|
||||||
});
|
});
|
||||||
|
@ -88,7 +95,231 @@ public class SparkUpdateProjectTest {
|
||||||
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
.map(value -> OBJECT_MAPPER.readValue(value._2().toString(), AtomicAction.class))
|
||||||
.map(aa -> ((Project) aa.getPayload()));
|
.map(aa -> ((Project) aa.getPayload()));
|
||||||
|
|
||||||
Assertions.assertEquals(14, tmp.count());
|
Assertions.assertEquals(15, tmp.count());
|
||||||
|
|
||||||
|
Dataset<Project> verificationDataset = spark.createDataset(tmp.rdd(), Encoders.bean(Project.class));
|
||||||
|
verificationDataset.createOrReplaceTempView("project");
|
||||||
|
|
||||||
|
Dataset<Row> execverification = spark
|
||||||
|
.sql(
|
||||||
|
"SELECT id, class classification, h2020topiccode, h2020topicdescription FROM project LATERAL VIEW EXPLODE(h2020classification) c as class ");
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"H2020-EU.3.4.7.",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"SESAR JU",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.h2020Programme.description")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Societal challenges",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.level1")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Smart, Green And Integrated Transport",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.level2")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"SESAR JU",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.level3")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Societal challenges | Smart, Green And Integrated Transport | SESAR JU",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("classification.classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"SESAR-ER4-31-2019",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("h2020topiccode")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"U-space",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::2c7298913008865ba784e5c1350a0aa5'")
|
||||||
|
.select("h2020topicdescription")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"H2020-EU.1.3.2.",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Nurturing excellence by means of cross-border and cross-sector mobility",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.h2020Programme.description")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Excellent science",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.level1")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Marie Skłodowska-Curie Actions",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.level2")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Nurturing excellence by means of cross-border and cross-sector mobility",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.level3")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Excellent science | Marie Skłodowska-Curie Actions | Nurturing excellence by means of cross-border and cross-sector mobility",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("classification.classification")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"MSCA-IF-2019",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("h2020topiccode")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Individual Fellowships",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::1a1f235fdd06ef14790baec159aa1202'")
|
||||||
|
.select("h2020topicdescription")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
|
Assertions
|
||||||
|
.assertTrue(
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0)
|
||||||
|
.equals("H2020-EU.2.1.4.") ||
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(1)
|
||||||
|
.getString(0)
|
||||||
|
.equals("H2020-EU.2.1.4."));
|
||||||
|
Assertions
|
||||||
|
.assertTrue(
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0)
|
||||||
|
.equals("H2020-EU.3.2.6.") ||
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("classification.h2020Programme.code")
|
||||||
|
.collectAsList()
|
||||||
|
.get(1)
|
||||||
|
.getString(0)
|
||||||
|
.equals("H2020-EU.3.2.6."));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Biotechnology",
|
||||||
|
execverification
|
||||||
|
.filter(
|
||||||
|
"id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5' and classification.h2020Programme.code = 'H2020-EU.2.1.4.'")
|
||||||
|
.select("classification.h2020Programme.description")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Bio-based Industries Joint Technology Initiative (BBI-JTI)",
|
||||||
|
execverification
|
||||||
|
.filter(
|
||||||
|
"id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5' and classification.h2020Programme.code = 'H2020-EU.3.2.6.'")
|
||||||
|
.select("classification.h2020Programme.description")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"BBI-2019-SO3-D4",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("h2020topiccode")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
Assertions
|
||||||
|
.assertEquals(
|
||||||
|
"Demonstrate bio-based pesticides and/or biostimulant agents for sustainable increase in agricultural productivity",
|
||||||
|
execverification
|
||||||
|
.filter("id = '40|corda__h2020::a657c271769fec90b60c1f2dbc25f4d5'")
|
||||||
|
.select("h2020topicdescription")
|
||||||
|
.collectAsList()
|
||||||
|
.get(0)
|
||||||
|
.getString(0));
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ public class HttpConnectorTest {
|
||||||
private static final Log log = LogFactory.getLog(HttpConnectorTest.class);
|
private static final Log log = LogFactory.getLog(HttpConnectorTest.class);
|
||||||
private static HttpConnector connector;
|
private static HttpConnector connector;
|
||||||
|
|
||||||
private static final String URL = "http://cordis.europa.eu/data/reference/cordisref-H2020programmes.csv";
|
private static final String URL = "http://cordis.europa.eu/data/reference/cordisref-H2020topics.xlsx";
|
||||||
private static final String URL_MISCONFIGURED_SERVER = "https://www.alexandria.unisg.ch/cgi/oai2?verb=Identify";
|
private static final String URL_MISCONFIGURED_SERVER = "https://www.alexandria.unisg.ch/cgi/oai2?verb=Identify";
|
||||||
private static final String URL_GOODSNI_SERVER = "https://air.unimi.it/oai/openaire?verb=Identify";
|
private static final String URL_GOODSNI_SERVER = "https://air.unimi.it/oai/openaire?verb=Identify";
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -3,7 +3,7 @@
|
||||||
{"rcn":"229281","id":"896300","acronym":"STRETCH","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Smart Textiles for RETrofitting and Monitoring of Cultural Heritage Buildings","startDate":"2020-09-01","endDate":"2022-08-31","projectUrl":"","objective":"This project aims to develop novel techniques using smart multifunctional materials for the combined seismic-plus-energy retrofitting, and Structural Health Monitoring (SHM) of the European cultural heritage buildings (CHB). The need for upgrading the existing old and CHB is becoming increasingly important for the EU countries, due to: (1) their poor structural performance during recent earthquakes (e.g. Italy, Greece) or other natural hazards (e.g. extreme weather conditions) that have resulted in significant economic losses, and loss of human lives; and (2) their low energy performance which increases significantly their energy consumption (buildings are responsible for 40% of EU energy consumption). Moreover, the SHM of the existing buildings is crucial for assessing continuously their structural integrity and thus to provide information for planning cost effective and sustainable maintenance decisions. Since replacing the old buildings with new is not financially feasible, and even it is not allowed for CHB, their lifetime extension requires considering simultaneously both structural and energy retrofitting. It is noted that the annual cost of repair and maintenance of existing European building stock is estimated to be about 50% of the total construction budget, currently standing at more than €300 billion. To achieve cost effectiveness, STRETCH explores a novel approach, which integrates technical textile reinforcement with thermal insulation systems and strain sensors to provide simultaneous structural-plus-energy retrofitting combined with SHM, tailored for masonry cultural heritage building envelopes. The effectiveness of the proposed retrofitting system will be validated experimentally and analytically. Moreover, draft guidelines and recommendations for determining future research on the use of smart composite materials for the concurrent retrofitting (structural-plus-energy) and SHM of the existing cultural heritage buildings envelopes will be proposed.","totalCost":"183473,28","ecMaxContribution":"183473,28","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"JRC -JOINT RESEARCH CENTRE- EUROPEAN COMMISSION","coordinatorCountry":"BE","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229281","id":"896300","acronym":"STRETCH","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Smart Textiles for RETrofitting and Monitoring of Cultural Heritage Buildings","startDate":"2020-09-01","endDate":"2022-08-31","projectUrl":"","objective":"This project aims to develop novel techniques using smart multifunctional materials for the combined seismic-plus-energy retrofitting, and Structural Health Monitoring (SHM) of the European cultural heritage buildings (CHB). The need for upgrading the existing old and CHB is becoming increasingly important for the EU countries, due to: (1) their poor structural performance during recent earthquakes (e.g. Italy, Greece) or other natural hazards (e.g. extreme weather conditions) that have resulted in significant economic losses, and loss of human lives; and (2) their low energy performance which increases significantly their energy consumption (buildings are responsible for 40% of EU energy consumption). Moreover, the SHM of the existing buildings is crucial for assessing continuously their structural integrity and thus to provide information for planning cost effective and sustainable maintenance decisions. Since replacing the old buildings with new is not financially feasible, and even it is not allowed for CHB, their lifetime extension requires considering simultaneously both structural and energy retrofitting. It is noted that the annual cost of repair and maintenance of existing European building stock is estimated to be about 50% of the total construction budget, currently standing at more than €300 billion. To achieve cost effectiveness, STRETCH explores a novel approach, which integrates technical textile reinforcement with thermal insulation systems and strain sensors to provide simultaneous structural-plus-energy retrofitting combined with SHM, tailored for masonry cultural heritage building envelopes. The effectiveness of the proposed retrofitting system will be validated experimentally and analytically. Moreover, draft guidelines and recommendations for determining future research on the use of smart composite materials for the concurrent retrofitting (structural-plus-energy) and SHM of the existing cultural heritage buildings envelopes will be proposed.","totalCost":"183473,28","ecMaxContribution":"183473,28","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"JRC -JOINT RESEARCH CENTRE- EUROPEAN COMMISSION","coordinatorCountry":"BE","participants":"","participantCountries":"","subjects":""}
|
||||||
{"rcn":"229265","id":"892890","acronym":"RhythmicPrediction","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Rhythmic prediction in speech perception: are our brain waves in sync with our native language?","startDate":"2021-01-01","endDate":"2022-12-31","projectUrl":"","objective":"Speech has rhythmic properties that widely differ across languages. When we listen to foreign languages, we may perceive them to be more musical, or rather more rap-like than our own. Even if we are unaware of it, the rhythm and melody of language, i.e. prosody, reflects its linguistic structure. On the one hand, prosody emphasizes content words and new information with stress and accents. On the other hand, it is aligned to phrase edges, marking them with boundary tones. Prosody hence helps the listener to focus on important words and to chunk sentences into phrases, and phrases into words. In fact, prosody is even used predictively, for instance to time the onset of the next word, the next piece of new information, or the total remaining length of the utterance, so the listener can seamlessly start their own speaking turn. \nSo, the listener, or rather their brain, is actively predicting when important speech events will happen, using prosody. How prosodic rhythms are exploited to predict speech timing, however, is unclear. No link between prosody and neural predictive processing has yet been empirically made. One hypothesis is that rhythm, such as the alternation of stressed and unstressed syllables, helps listeners time their attention. Similar behavior is best captured by the notion of an internal oscillator which can be set straight by attentional spikes. While neuroscientific evidence for the relation of neural oscillators to speech processing is starting to emerge, no link to the use of prosody nor predictive listening exists, yet. Furthermore, it is still unknown how native language knowledge affects cortical oscillations, and how oscillations are affected by cross-linguistic differences in rhythmic structure. The current project combines the standing knowledge of prosodic typology with the recent advances in neuroscience on cortical oscillations, to investigate the role of internal oscillators on native prosody perception, and active speech prediction.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"UNIVERSITE DE GENEVE","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229265","id":"892890","acronym":"RhythmicPrediction","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Rhythmic prediction in speech perception: are our brain waves in sync with our native language?","startDate":"2021-01-01","endDate":"2022-12-31","projectUrl":"","objective":"Speech has rhythmic properties that widely differ across languages. When we listen to foreign languages, we may perceive them to be more musical, or rather more rap-like than our own. Even if we are unaware of it, the rhythm and melody of language, i.e. prosody, reflects its linguistic structure. On the one hand, prosody emphasizes content words and new information with stress and accents. On the other hand, it is aligned to phrase edges, marking them with boundary tones. Prosody hence helps the listener to focus on important words and to chunk sentences into phrases, and phrases into words. In fact, prosody is even used predictively, for instance to time the onset of the next word, the next piece of new information, or the total remaining length of the utterance, so the listener can seamlessly start their own speaking turn. \nSo, the listener, or rather their brain, is actively predicting when important speech events will happen, using prosody. How prosodic rhythms are exploited to predict speech timing, however, is unclear. No link between prosody and neural predictive processing has yet been empirically made. One hypothesis is that rhythm, such as the alternation of stressed and unstressed syllables, helps listeners time their attention. Similar behavior is best captured by the notion of an internal oscillator which can be set straight by attentional spikes. While neuroscientific evidence for the relation of neural oscillators to speech processing is starting to emerge, no link to the use of prosody nor predictive listening exists, yet. Furthermore, it is still unknown how native language knowledge affects cortical oscillations, and how oscillations are affected by cross-linguistic differences in rhythmic structure. The current project combines the standing knowledge of prosodic typology with the recent advances in neuroscience on cortical oscillations, to investigate the role of internal oscillators on native prosody perception, and active speech prediction.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"UNIVERSITE DE GENEVE","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
||||||
{"rcn":"229235","id":"886828","acronym":"ASAP","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Advanced Solutions for Asphalt Pavements","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"The Advanced Solutions for Asphalt Pavements (ASAP) project involves the development of a unique road paving technology which will use a bio-bitumen rejuvenator to rejuvenate aged asphalt bitumen. This technology will help to extend the lifespan of asphalt pavements (roads) and will reduce the environmental and economic impact of roads and road maintenance processes. Recycling and self-healing processes will replace fossil fuel dependent technology. Self-healing will involve rejuvenating aged asphalt bitumen using a bio-rejuvenator developed using microalgae oils (rejuvenating bio-oil). Microalgae has been selected because of its fast growth, versatility and ability to survive within hostile environments, such as wastewater. \n\nASAP will utilise microalgae, cultivated within the wastewater treatment process, as a source of the rejuvenating bio-oil. The solvent (Soxhlet) processes will be used to extract the oil from the microalgae. To ensure the efficiency of the oil extraction process, an ultrasonication process will be used to pre-treat the microalgae. The suitability of rejuvenating bio-oil as a replacement for the bitumen rejuvenator (fossil fuel based) will be ascertained via a series of standard bituminous and accelerated tests. A rejuvenator-binder diffusion numerical model will be developed, based on the Delft Lattice concrete diffusion model, to determine the conditions required for rejuvenation to occur and to ascertain the healing rate of the asphalt binder. These parameters will facilitate the selection and optimisation of the asphalt self-healing systems (specifically the amount of bio-oil rejuvenator and time required) to achieve full rejuvenation. \n\nThis novel approach will benchmark the effectiveness of this intervention against existing asphalt design and maintenance processes and assess feasibility. The ASAP project presents an opportunity to revolutionise road design and maintenance processes and reduce its environmental and financial costs.","totalCost":"187572,48","ecMaxContribution":"187572,48","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"NEDERLANDSE ORGANISATIE VOOR TOEGEPAST NATUURWETENSCHAPPELIJK ONDERZOEK TNO","coordinatorCountry":"NL","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229235","id":"886828","acronym":"ASAP","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Advanced Solutions for Asphalt Pavements","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"The Advanced Solutions for Asphalt Pavements (ASAP) project involves the development of a unique road paving technology which will use a bio-bitumen rejuvenator to rejuvenate aged asphalt bitumen. This technology will help to extend the lifespan of asphalt pavements (roads) and will reduce the environmental and economic impact of roads and road maintenance processes. Recycling and self-healing processes will replace fossil fuel dependent technology. Self-healing will involve rejuvenating aged asphalt bitumen using a bio-rejuvenator developed using microalgae oils (rejuvenating bio-oil). Microalgae has been selected because of its fast growth, versatility and ability to survive within hostile environments, such as wastewater. \n\nASAP will utilise microalgae, cultivated within the wastewater treatment process, as a source of the rejuvenating bio-oil. The solvent (Soxhlet) processes will be used to extract the oil from the microalgae. To ensure the efficiency of the oil extraction process, an ultrasonication process will be used to pre-treat the microalgae. The suitability of rejuvenating bio-oil as a replacement for the bitumen rejuvenator (fossil fuel based) will be ascertained via a series of standard bituminous and accelerated tests. A rejuvenator-binder diffusion numerical model will be developed, based on the Delft Lattice concrete diffusion model, to determine the conditions required for rejuvenation to occur and to ascertain the healing rate of the asphalt binder. These parameters will facilitate the selection and optimisation of the asphalt self-healing systems (specifically the amount of bio-oil rejuvenator and time required) to achieve full rejuvenation. \n\nThis novel approach will benchmark the effectiveness of this intervention against existing asphalt design and maintenance processes and assess feasibility. The ASAP project presents an opportunity to revolutionise road design and maintenance processes and reduce its environmental and financial costs.","totalCost":"187572,48","ecMaxContribution":"187572,48","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"NEDERLANDSE ORGANISATIE VOOR TOEGEPAST NATUURWETENSCHAPPELIJK ONDERZOEK TNO","coordinatorCountry":"NL","participants":"","participantCountries":"","subjects":""}
|
||||||
{"rcn":null,"id":"886776","acronym":null,"status":null,"programme":"H2020-EU.2.1.4.","topics":null,"frameworkProgramme":"H2020","title":"BIO-Based pESTicides production for sustainable agriculture management plan","startDate":"2020-05-01","endDate":"2023-04-30","projectUrl":"","objective":"The BIOBESTicide project will validate and demonstrate the production of an effective and cost-efficient biopesticide. The demonstration will be based on an innovative bio-based value chain starting from the valorisation of sustainable biomasses, i.e. beet pulp and sugar molasses and will exploit the properties of the oomycete Pythium oligandrum strain I-5180 to increase natural plant defenses, to produce an highly effective and eco-friendly biopesticide solution for vine plants protection. \nBIOVITIS, the project coordinator, has developed, at laboratory level (TRL4), an effective method to biocontrol one of the major causes of worldwide vineyards destruction, the Grapevine Trunk Diseases (GTDs). The protection system is based on the oomycete Pythium oligandrum strain I-5180 that, at applied at optimal time and concentration, colonises the root of vines and stimulates the natural plant defences against GTDs, providing a protection that ranges between 40% and 60%. \nBIOBESTicide project will respond to the increasing demands for innovative solutions for crop protection agents, transferring the technology to a DEMO Plant able to produce more than 10 T of a high-quality oomycete-based biopesticide product per year (TRL7). \nThe BIOBESTicide project will validate the efficiency of the formulated product on vineyards of different geographical areas.\nTo assure the safety of products under both health and environmental points of view, a full and complete approval dossier for Pythium oligandrum strain I-5180 will be submitted in all the European countries. \nA Life Cycle Sustainability Assessment (LCSA) will be conducted to assess the environmental, economic and social impacts of the developed products.\nThe adoption of the effective and cost-efficient biopesticide will have significant impacts with a potential ROI of 30 % in just 5 years and a total EBITDA of more than € 6,400,000.","totalCost":"4402772,5","ecMaxContribution":"3069653","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"BIOVITIS","coordinatorCountry":"FR","participants":"MERCIER FRERES SARL;FUNDACION TECNALIA RESEARCH & INNOVATION;LAMBERTI SPA;EURION CONSULTING;CIAOTECH Srl;STOWARZYSZENIE ZACHODNIOPOMORSKI KLASTER CHEMICZNY ZIELONA CHEMIA;NORDZUCKER AG;INSTITUT NATIONAL DE RECHERCHE POUR L'AGRICULTURE, L'ALIMENTATION ET L'ENVIRONNEMENT;INSTITUT FRANCAIS DE LA VIGNE ET DU VIN","participantCountries":"FR;ES;IT;PL;DE","subjects":""}
|
{"rcn":null,"id":"886776","acronym":null,"status":null,"programme":"H2020-EU.2.1.4.","topics":"BBI-2019-SO3-D4","frameworkProgramme":"H2020","title":"BIO-Based pESTicides production for sustainable agriculture management plan","startDate":"2020-05-01","endDate":"2023-04-30","projectUrl":"","objective":"The BIOBESTicide project will validate and demonstrate the production of an effective and cost-efficient biopesticide. The demonstration will be based on an innovative bio-based value chain starting from the valorisation of sustainable biomasses, i.e. beet pulp and sugar molasses and will exploit the properties of the oomycete Pythium oligandrum strain I-5180 to increase natural plant defenses, to produce an highly effective and eco-friendly biopesticide solution for vine plants protection. \nBIOVITIS, the project coordinator, has developed, at laboratory level (TRL4), an effective method to biocontrol one of the major causes of worldwide vineyards destruction, the Grapevine Trunk Diseases (GTDs). The protection system is based on the oomycete Pythium oligandrum strain I-5180 that, at applied at optimal time and concentration, colonises the root of vines and stimulates the natural plant defences against GTDs, providing a protection that ranges between 40% and 60%. \nBIOBESTicide project will respond to the increasing demands for innovative solutions for crop protection agents, transferring the technology to a DEMO Plant able to produce more than 10 T of a high-quality oomycete-based biopesticide product per year (TRL7). \nThe BIOBESTicide project will validate the efficiency of the formulated product on vineyards of different geographical areas.\nTo assure the safety of products under both health and environmental points of view, a full and complete approval dossier for Pythium oligandrum strain I-5180 will be submitted in all the European countries. \nA Life Cycle Sustainability Assessment (LCSA) will be conducted to assess the environmental, economic and social impacts of the developed products.\nThe adoption of the effective and cost-efficient biopesticide will have significant impacts with a potential ROI of 30 % in just 5 years and a total EBITDA of more than € 6,400,000.","totalCost":"4402772,5","ecMaxContribution":"3069653","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"BIOVITIS","coordinatorCountry":"FR","participants":"MERCIER FRERES SARL;FUNDACION TECNALIA RESEARCH & INNOVATION;LAMBERTI SPA;EURION CONSULTING;CIAOTECH Srl;STOWARZYSZENIE ZACHODNIOPOMORSKI KLASTER CHEMICZNY ZIELONA CHEMIA;NORDZUCKER AG;INSTITUT NATIONAL DE RECHERCHE POUR L'AGRICULTURE, L'ALIMENTATION ET L'ENVIRONNEMENT;INSTITUT FRANCAIS DE LA VIGNE ET DU VIN","participantCountries":"FR;ES;IT;PL;DE","subjects":""}
|
||||||
{"rcn":null,"id":"886776","acronym":null,"status":null,"programme":"H2020-EU.3.2.6.","topics":"BBI-2019-SO3-D4","frameworkProgramme":"H2020","title":"BIO-Based pESTicides production for sustainable agriculture management plan","startDate":"2020-05-01","endDate":"2023-04-30","projectUrl":"","objective":"The BIOBESTicide project will validate and demonstrate the production of an effective and cost-efficient biopesticide. The demonstration will be based on an innovative bio-based value chain starting from the valorisation of sustainable biomasses, i.e. beet pulp and sugar molasses and will exploit the properties of the oomycete Pythium oligandrum strain I-5180 to increase natural plant defenses, to produce an highly effective and eco-friendly biopesticide solution for vine plants protection. \nBIOVITIS, the project coordinator, has developed, at laboratory level (TRL4), an effective method to biocontrol one of the major causes of worldwide vineyards destruction, the Grapevine Trunk Diseases (GTDs). The protection system is based on the oomycete Pythium oligandrum strain I-5180 that, at applied at optimal time and concentration, colonises the root of vines and stimulates the natural plant defences against GTDs, providing a protection that ranges between 40% and 60%. \nBIOBESTicide project will respond to the increasing demands for innovative solutions for crop protection agents, transferring the technology to a DEMO Plant able to produce more than 10 T of a high-quality oomycete-based biopesticide product per year (TRL7). \nThe BIOBESTicide project will validate the efficiency of the formulated product on vineyards of different geographical areas.\nTo assure the safety of products under both health and environmental points of view, a full and complete approval dossier for Pythium oligandrum strain I-5180 will be submitted in all the European countries. \nA Life Cycle Sustainability Assessment (LCSA) will be conducted to assess the environmental, economic and social impacts of the developed products.\nThe adoption of the effective and cost-efficient biopesticide will have significant impacts with a potential ROI of 30 % in just 5 years and a total EBITDA of more than € 6,400,000.","totalCost":"4402772,5","ecMaxContribution":"3069653","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"BIOVITIS","coordinatorCountry":"FR","participants":"MERCIER FRERES SARL;FUNDACION TECNALIA RESEARCH & INNOVATION;LAMBERTI SPA;EURION CONSULTING;CIAOTECH Srl;STOWARZYSZENIE ZACHODNIOPOMORSKI KLASTER CHEMICZNY ZIELONA CHEMIA;NORDZUCKER AG;INSTITUT NATIONAL DE RECHERCHE POUR L'AGRICULTURE, L'ALIMENTATION ET L'ENVIRONNEMENT;INSTITUT FRANCAIS DE LA VIGNE ET DU VIN","participantCountries":"FR;ES;IT;PL;DE","subjects":""}
|
{"rcn":null,"id":"886776","acronym":null,"status":null,"programme":"H2020-EU.3.2.6.","topics":"BBI-2019-SO3-D4","frameworkProgramme":"H2020","title":"BIO-Based pESTicides production for sustainable agriculture management plan","startDate":"2020-05-01","endDate":"2023-04-30","projectUrl":"","objective":"The BIOBESTicide project will validate and demonstrate the production of an effective and cost-efficient biopesticide. The demonstration will be based on an innovative bio-based value chain starting from the valorisation of sustainable biomasses, i.e. beet pulp and sugar molasses and will exploit the properties of the oomycete Pythium oligandrum strain I-5180 to increase natural plant defenses, to produce an highly effective and eco-friendly biopesticide solution for vine plants protection. \nBIOVITIS, the project coordinator, has developed, at laboratory level (TRL4), an effective method to biocontrol one of the major causes of worldwide vineyards destruction, the Grapevine Trunk Diseases (GTDs). The protection system is based on the oomycete Pythium oligandrum strain I-5180 that, at applied at optimal time and concentration, colonises the root of vines and stimulates the natural plant defences against GTDs, providing a protection that ranges between 40% and 60%. \nBIOBESTicide project will respond to the increasing demands for innovative solutions for crop protection agents, transferring the technology to a DEMO Plant able to produce more than 10 T of a high-quality oomycete-based biopesticide product per year (TRL7). \nThe BIOBESTicide project will validate the efficiency of the formulated product on vineyards of different geographical areas.\nTo assure the safety of products under both health and environmental points of view, a full and complete approval dossier for Pythium oligandrum strain I-5180 will be submitted in all the European countries. \nA Life Cycle Sustainability Assessment (LCSA) will be conducted to assess the environmental, economic and social impacts of the developed products.\nThe adoption of the effective and cost-efficient biopesticide will have significant impacts with a potential ROI of 30 % in just 5 years and a total EBITDA of more than € 6,400,000.","totalCost":"4402772,5","ecMaxContribution":"3069653","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"BIOVITIS","coordinatorCountry":"FR","participants":"MERCIER FRERES SARL;FUNDACION TECNALIA RESEARCH & INNOVATION;LAMBERTI SPA;EURION CONSULTING;CIAOTECH Srl;STOWARZYSZENIE ZACHODNIOPOMORSKI KLASTER CHEMICZNY ZIELONA CHEMIA;NORDZUCKER AG;INSTITUT NATIONAL DE RECHERCHE POUR L'AGRICULTURE, L'ALIMENTATION ET L'ENVIRONNEMENT;INSTITUT FRANCAIS DE LA VIGNE ET DU VIN","participantCountries":"FR;ES;IT;PL;DE","subjects":""}
|
||||||
{"rcn":"229276","id":"895426","acronym":"DisMoBoH","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Dissecting the molecular building principles of locally formed transcriptional hubs","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"Numerous DNA variants have already been identified that modulate inter-individual molecular traits – most prominently gene expression. However, since finding mechanistic interpretations relating genotype to phenotype has proven challenging, the focus has shifted to higher-order regulatory features, i.e. chromatin accessibility, transcription factor (TF) binding and 3D chromatin interactions. This revealed at least two enhancer types: “lead” enhancers in which the presence of genetic variants modulates the activity of entire chromatin domains, and “dependent” ones in which variants induce subtle changes, affecting DNA accessibility, but not transcription. Although cell type-specific TFs are likely important, it remains unclear which sequence features are required to establish such enhancer hierarchies, and under which circumstances genetic variation results in altered enhancer-promoter contacts and differential gene expression. Here, we propose to investigate the molecular mechanisms that link DNA variation to TF binding, chromatin topology, and gene expression response. We will leverage data on enhancer hierarchy and sequence-specific TF binding to identify the sequence signatures that define “lead” enhancers. The results will guide the design of a synthetic locus that serves as an in vivo platform to systematically vary the building blocks of local transcriptional units: i) DNA sequence – including variations in TF binding site affinity and syntax, ii) molecular interactions between TFs, and iii) chromatin conformation. To validate our findings, we will perform optical reconstruction of chromatin architecture for a select number of DNA variants. By simultaneously perturbing co-dependent features, this proposal will provide novel mechanistic insights into the formation of local transcriptional hubs.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-RI","coordinator":"ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229276","id":"895426","acronym":"DisMoBoH","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Dissecting the molecular building principles of locally formed transcriptional hubs","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"Numerous DNA variants have already been identified that modulate inter-individual molecular traits – most prominently gene expression. However, since finding mechanistic interpretations relating genotype to phenotype has proven challenging, the focus has shifted to higher-order regulatory features, i.e. chromatin accessibility, transcription factor (TF) binding and 3D chromatin interactions. This revealed at least two enhancer types: “lead” enhancers in which the presence of genetic variants modulates the activity of entire chromatin domains, and “dependent” ones in which variants induce subtle changes, affecting DNA accessibility, but not transcription. Although cell type-specific TFs are likely important, it remains unclear which sequence features are required to establish such enhancer hierarchies, and under which circumstances genetic variation results in altered enhancer-promoter contacts and differential gene expression. Here, we propose to investigate the molecular mechanisms that link DNA variation to TF binding, chromatin topology, and gene expression response. We will leverage data on enhancer hierarchy and sequence-specific TF binding to identify the sequence signatures that define “lead” enhancers. The results will guide the design of a synthetic locus that serves as an in vivo platform to systematically vary the building blocks of local transcriptional units: i) DNA sequence – including variations in TF binding site affinity and syntax, ii) molecular interactions between TFs, and iii) chromatin conformation. To validate our findings, we will perform optical reconstruction of chromatin architecture for a select number of DNA variants. By simultaneously perturbing co-dependent features, this proposal will provide novel mechanistic insights into the formation of local transcriptional hubs.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-RI","coordinator":"ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
||||||
{"rcn":"229288","id":"898218","acronym":"devUTRs","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Uncovering the roles of 5′UTRs in translational control during early zebrafish development","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"Following fertilisation, metazoan embryos are transcriptionally silent, and embryogenesis is controlled by maternally deposited factors. Developmental progression requires the synthesis of new mRNAs and proteins in a coordinated fashion. Many posttranscriptional mechanisms regulate the fate of maternal mRNAs, but it is less understood how translational control shapes early embryogenesis. In eukaryotes, translation starts at the mRNA 5′ end, consisting of the 5′ cap and 5′ untranslated region (UTR). Protein synthesis is primarily regulated at the translation initiation step by elements within the 5′UTR. However, the role of 5′UTRs in regulating the dynamics of mRNA translation during vertebrate embryogenesis remains unexplored. For example, all vertebrate ribosomal protein (RP) mRNAs harbor a conserved terminal oligopyrimidine tract (TOP) in their 5′UTR. RP levels must be tightly controlled to ensure proper organismal development, but if and how the TOP motif mediates RP mRNA translational regulation during embryogenesis is unclear. Overall, we lack a systematic understanding of the regulatory information contained in 5′UTRs. In this work, I aim to uncover the 5′UTR in vivo rules for mRNA translational regulation during zebrafish embryogenesis. I propose to apply imaging and biochemical approaches to characterise the role of the TOP motif in RP mRNA translational regulation during embryogenesis and identify the trans-acting factor(s) that bind(s) to it (Aim 1). To systematically assess the contribution of 5′UTRs to mRNA translational regulation during zebrafish embryogenesis, I will couple a massively parallel reporter assay of 5′UTRs to polysome profiling (Aim 2). By integrating the translational behaviour of 5′UTR reporters throughout embryogenesis with sequence-based regression models, I anticipate to uncover novel cis-regulatory elements in 5′UTRs with developmental roles.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"UNIVERSITAT BASEL","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229288","id":"898218","acronym":"devUTRs","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"Uncovering the roles of 5′UTRs in translational control during early zebrafish development","startDate":"2021-09-01","endDate":"2023-08-31","projectUrl":"","objective":"Following fertilisation, metazoan embryos are transcriptionally silent, and embryogenesis is controlled by maternally deposited factors. Developmental progression requires the synthesis of new mRNAs and proteins in a coordinated fashion. Many posttranscriptional mechanisms regulate the fate of maternal mRNAs, but it is less understood how translational control shapes early embryogenesis. In eukaryotes, translation starts at the mRNA 5′ end, consisting of the 5′ cap and 5′ untranslated region (UTR). Protein synthesis is primarily regulated at the translation initiation step by elements within the 5′UTR. However, the role of 5′UTRs in regulating the dynamics of mRNA translation during vertebrate embryogenesis remains unexplored. For example, all vertebrate ribosomal protein (RP) mRNAs harbor a conserved terminal oligopyrimidine tract (TOP) in their 5′UTR. RP levels must be tightly controlled to ensure proper organismal development, but if and how the TOP motif mediates RP mRNA translational regulation during embryogenesis is unclear. Overall, we lack a systematic understanding of the regulatory information contained in 5′UTRs. In this work, I aim to uncover the 5′UTR in vivo rules for mRNA translational regulation during zebrafish embryogenesis. I propose to apply imaging and biochemical approaches to characterise the role of the TOP motif in RP mRNA translational regulation during embryogenesis and identify the trans-acting factor(s) that bind(s) to it (Aim 1). To systematically assess the contribution of 5′UTRs to mRNA translational regulation during zebrafish embryogenesis, I will couple a massively parallel reporter assay of 5′UTRs to polysome profiling (Aim 2). By integrating the translational behaviour of 5′UTR reporters throughout embryogenesis with sequence-based regression models, I anticipate to uncover novel cis-regulatory elements in 5′UTRs with developmental roles.","totalCost":"191149,44","ecMaxContribution":"191149,44","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"UNIVERSITAT BASEL","coordinatorCountry":"CH","participants":"","participantCountries":"","subjects":""}
|
||||||
|
@ -13,4 +13,5 @@
|
||||||
{"rcn":"229239","id":"887259","acronym":"ALEHOOP","status":"SIGNED","programme":"H2020-EU.2.1.4.","topics":"BBI-2019-SO3-D3","frameworkProgramme":"H2020","title":"Biorefineries for the valorisation of macroalgal residual biomass and legume processing by-products to obtain new protein value chains for high-value food and feed applications","startDate":"2020-06-01","endDate":"2024-05-31","projectUrl":"","objective":"ALEHOOP provides the demonstration at pilot scale of both sustainable macroalgae and legume-based biorefineries for the recovery of low-cost dietary proteins from alga-based and plant residual biomass and their validation to meet market requirements of consumers and industry in the food and feed sectors. In these sectors, consumers are demanding affordable functional natural proteins from alternative sources and industry is demanding low-cost bio-based protein formulations with better performance and higher sustainability. \nCurrent protein demand for the 7.3 billion inhabitants of the world is approximately 202 Mt. Due to the rise in meat consumption more proteins are therefore required for animal feeding. To satisfy the current protein demand, Europe imports over 30 Mt of soy from the Americas each year mainly for animal feeding, entailing 95% dependency of EU on imported soy. Current sources of proteins are becoming unsustainable from an economic and environmental perspective for Europe resulting in concerns for sustainability and food security and leading to search for new alternative proteins. \nALEHOOP addresses the obtaining of proteins from green macroalgal blooms, brown seaweed by-products from algae processors and legume processing by-products (peas, lupines, beans and lentils) as alternative protein sources for animal feeding (case of green seaweed) and food applications (case of brown seaweed and legume by-products), since they are low cost and under-exploited biomass that do not compete with traditional food crops for space and resources. This will reduce EU´s dependency on protein imports and contribute to our raw material security. The new proteins will be validated in foods for elderly, sporty and overweight people, vegetarians and healthy consumers as well as for animal feed creating cross-sectorial interconnection between these value chains and supporting the projected business plan.","totalCost":"6718370","ecMaxContribution":"5140274,41","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"CONTACTICA S.L.","coordinatorCountry":"ES","participants":"CENTIV GMBH;ALGINOR ASA;FUNDACION TECNALIA RESEARCH & INNOVATION;INDUKERN,S.A.;ASOCIACION NACIONAL DE FABRICANTES DE CONSERVAS DE PESCADOS Y MARISCOS-CENTRO TECNICO NACIONAL DE CONSERVACION DE PRODUCTOS DE LA PESCA;BIOZOON GMBH;EIGEN VERMOGEN VAN HET INSTITUUT VOOR LANDBOUW- EN VISSERIJONDERZOEK;BIOSURYA SL;VYZKUMNY USTAV VETERINARNIHO LEKARSTVI;NUTRITION SCIENCES;TECHNOLOGICAL UNIVERSITY DUBLIN;GARLAN, S.COOP.;ISANATUR SPAIN SL;UNIVERSIDAD DE VIGO;UNIVERSIDAD DE CADIZ","participantCountries":"DE;NO;ES;BE;CZ;IE","subjects":""}
|
{"rcn":"229239","id":"887259","acronym":"ALEHOOP","status":"SIGNED","programme":"H2020-EU.2.1.4.","topics":"BBI-2019-SO3-D3","frameworkProgramme":"H2020","title":"Biorefineries for the valorisation of macroalgal residual biomass and legume processing by-products to obtain new protein value chains for high-value food and feed applications","startDate":"2020-06-01","endDate":"2024-05-31","projectUrl":"","objective":"ALEHOOP provides the demonstration at pilot scale of both sustainable macroalgae and legume-based biorefineries for the recovery of low-cost dietary proteins from alga-based and plant residual biomass and their validation to meet market requirements of consumers and industry in the food and feed sectors. In these sectors, consumers are demanding affordable functional natural proteins from alternative sources and industry is demanding low-cost bio-based protein formulations with better performance and higher sustainability. \nCurrent protein demand for the 7.3 billion inhabitants of the world is approximately 202 Mt. Due to the rise in meat consumption more proteins are therefore required for animal feeding. To satisfy the current protein demand, Europe imports over 30 Mt of soy from the Americas each year mainly for animal feeding, entailing 95% dependency of EU on imported soy. Current sources of proteins are becoming unsustainable from an economic and environmental perspective for Europe resulting in concerns for sustainability and food security and leading to search for new alternative proteins. \nALEHOOP addresses the obtaining of proteins from green macroalgal blooms, brown seaweed by-products from algae processors and legume processing by-products (peas, lupines, beans and lentils) as alternative protein sources for animal feeding (case of green seaweed) and food applications (case of brown seaweed and legume by-products), since they are low cost and under-exploited biomass that do not compete with traditional food crops for space and resources. This will reduce EU´s dependency on protein imports and contribute to our raw material security. The new proteins will be validated in foods for elderly, sporty and overweight people, vegetarians and healthy consumers as well as for animal feed creating cross-sectorial interconnection between these value chains and supporting the projected business plan.","totalCost":"6718370","ecMaxContribution":"5140274,41","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"CONTACTICA S.L.","coordinatorCountry":"ES","participants":"CENTIV GMBH;ALGINOR ASA;FUNDACION TECNALIA RESEARCH & INNOVATION;INDUKERN,S.A.;ASOCIACION NACIONAL DE FABRICANTES DE CONSERVAS DE PESCADOS Y MARISCOS-CENTRO TECNICO NACIONAL DE CONSERVACION DE PRODUCTOS DE LA PESCA;BIOZOON GMBH;EIGEN VERMOGEN VAN HET INSTITUUT VOOR LANDBOUW- EN VISSERIJONDERZOEK;BIOSURYA SL;VYZKUMNY USTAV VETERINARNIHO LEKARSTVI;NUTRITION SCIENCES;TECHNOLOGICAL UNIVERSITY DUBLIN;GARLAN, S.COOP.;ISANATUR SPAIN SL;UNIVERSIDAD DE VIGO;UNIVERSIDAD DE CADIZ","participantCountries":"DE;NO;ES;BE;CZ;IE","subjects":""}
|
||||||
{"rcn":"229239","id":"887259","acronym":"ALEHOOP","status":"SIGNED","programme":"H2020-EU.3.2.6.","topics":"BBI-2019-SO3-D3","frameworkProgramme":"H2020","title":"Biorefineries for the valorisation of macroalgal residual biomass and legume processing by-products to obtain new protein value chains for high-value food and feed applications","startDate":"2020-06-01","endDate":"2024-05-31","projectUrl":"","objective":"ALEHOOP provides the demonstration at pilot scale of both sustainable macroalgae and legume-based biorefineries for the recovery of low-cost dietary proteins from alga-based and plant residual biomass and their validation to meet market requirements of consumers and industry in the food and feed sectors. In these sectors, consumers are demanding affordable functional natural proteins from alternative sources and industry is demanding low-cost bio-based protein formulations with better performance and higher sustainability. \nCurrent protein demand for the 7.3 billion inhabitants of the world is approximately 202 Mt. Due to the rise in meat consumption more proteins are therefore required for animal feeding. To satisfy the current protein demand, Europe imports over 30 Mt of soy from the Americas each year mainly for animal feeding, entailing 95% dependency of EU on imported soy. Current sources of proteins are becoming unsustainable from an economic and environmental perspective for Europe resulting in concerns for sustainability and food security and leading to search for new alternative proteins. \nALEHOOP addresses the obtaining of proteins from green macroalgal blooms, brown seaweed by-products from algae processors and legume processing by-products (peas, lupines, beans and lentils) as alternative protein sources for animal feeding (case of green seaweed) and food applications (case of brown seaweed and legume by-products), since they are low cost and under-exploited biomass that do not compete with traditional food crops for space and resources. This will reduce EU´s dependency on protein imports and contribute to our raw material security. The new proteins will be validated in foods for elderly, sporty and overweight people, vegetarians and healthy consumers as well as for animal feed creating cross-sectorial interconnection between these value chains and supporting the projected business plan.","totalCost":"6718370","ecMaxContribution":"5140274,41","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"CONTACTICA S.L.","coordinatorCountry":"ES","participants":"CENTIV GMBH;ALGINOR ASA;FUNDACION TECNALIA RESEARCH & INNOVATION;INDUKERN,S.A.;ASOCIACION NACIONAL DE FABRICANTES DE CONSERVAS DE PESCADOS Y MARISCOS-CENTRO TECNICO NACIONAL DE CONSERVACION DE PRODUCTOS DE LA PESCA;BIOZOON GMBH;EIGEN VERMOGEN VAN HET INSTITUUT VOOR LANDBOUW- EN VISSERIJONDERZOEK;BIOSURYA SL;VYZKUMNY USTAV VETERINARNIHO LEKARSTVI;NUTRITION SCIENCES;TECHNOLOGICAL UNIVERSITY DUBLIN;GARLAN, S.COOP.;ISANATUR SPAIN SL;UNIVERSIDAD DE VIGO;UNIVERSIDAD DE CADIZ","participantCountries":"DE;NO;ES;BE;CZ;IE","subjects":""}
|
{"rcn":"229239","id":"887259","acronym":"ALEHOOP","status":"SIGNED","programme":"H2020-EU.3.2.6.","topics":"BBI-2019-SO3-D3","frameworkProgramme":"H2020","title":"Biorefineries for the valorisation of macroalgal residual biomass and legume processing by-products to obtain new protein value chains for high-value food and feed applications","startDate":"2020-06-01","endDate":"2024-05-31","projectUrl":"","objective":"ALEHOOP provides the demonstration at pilot scale of both sustainable macroalgae and legume-based biorefineries for the recovery of low-cost dietary proteins from alga-based and plant residual biomass and their validation to meet market requirements of consumers and industry in the food and feed sectors. In these sectors, consumers are demanding affordable functional natural proteins from alternative sources and industry is demanding low-cost bio-based protein formulations with better performance and higher sustainability. \nCurrent protein demand for the 7.3 billion inhabitants of the world is approximately 202 Mt. Due to the rise in meat consumption more proteins are therefore required for animal feeding. To satisfy the current protein demand, Europe imports over 30 Mt of soy from the Americas each year mainly for animal feeding, entailing 95% dependency of EU on imported soy. Current sources of proteins are becoming unsustainable from an economic and environmental perspective for Europe resulting in concerns for sustainability and food security and leading to search for new alternative proteins. \nALEHOOP addresses the obtaining of proteins from green macroalgal blooms, brown seaweed by-products from algae processors and legume processing by-products (peas, lupines, beans and lentils) as alternative protein sources for animal feeding (case of green seaweed) and food applications (case of brown seaweed and legume by-products), since they are low cost and under-exploited biomass that do not compete with traditional food crops for space and resources. This will reduce EU´s dependency on protein imports and contribute to our raw material security. The new proteins will be validated in foods for elderly, sporty and overweight people, vegetarians and healthy consumers as well as for animal feed creating cross-sectorial interconnection between these value chains and supporting the projected business plan.","totalCost":"6718370","ecMaxContribution":"5140274,41","call":"H2020-BBI-JTI-2019","fundingScheme":"BBI-IA-DEMO","coordinator":"CONTACTICA S.L.","coordinatorCountry":"ES","participants":"CENTIV GMBH;ALGINOR ASA;FUNDACION TECNALIA RESEARCH & INNOVATION;INDUKERN,S.A.;ASOCIACION NACIONAL DE FABRICANTES DE CONSERVAS DE PESCADOS Y MARISCOS-CENTRO TECNICO NACIONAL DE CONSERVACION DE PRODUCTOS DE LA PESCA;BIOZOON GMBH;EIGEN VERMOGEN VAN HET INSTITUUT VOOR LANDBOUW- EN VISSERIJONDERZOEK;BIOSURYA SL;VYZKUMNY USTAV VETERINARNIHO LEKARSTVI;NUTRITION SCIENCES;TECHNOLOGICAL UNIVERSITY DUBLIN;GARLAN, S.COOP.;ISANATUR SPAIN SL;UNIVERSIDAD DE VIGO;UNIVERSIDAD DE CADIZ","participantCountries":"DE;NO;ES;BE;CZ;IE","subjects":""}
|
||||||
{"rcn":"229258","id":"892834","acronym":"DENVPOC","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"qPCR Microfluidics point-of-care platform for dengue diagnosis","startDate":"2020-05-18","endDate":"2022-05-17","projectUrl":"","objective":"As a result of Global climate change and fast urbanization, global outbreaks of Dengue (DENV)/ Zika(ZIKV)/Chikungunya(CHIKV) virus have the potential to occur. The most common pathway of these infections in humans is through the female Aedes mosquito vector. DENV is an exanthematous febrile disease with varied clinical manifestations and progressions . Due to similarities in symptoms between DENV and ZIKV and CHIKV, it is difficult to make a differential diagnosis, impeding appropriate, timely medical intervention. Furthermore, cross-reactivity with ZIKV, which was recently related to microcephaly, is a serious issue. In 2016, in Brazil alone, there were 4180 microcephaly cases reported instead of 163 cases, more in line with yearly expected projections , , Thus, the sooner an accurate diagnostic which differentiates DENV from the other manifestations is critical; most especially at the early stages of the infection, to have a reliable diagnosis in pregnant women. In 2016, the OMS emergency committee declared that the outbreaks and the potentially resultant neurological disorders in Brazil were an important international state of emergency in public health, as a result of the associated secondary effects; these diseases became a Global concern. This project allows developing a highly and fast Multiplex qPCR POC platform by using FASTGENE technology with a minimal amount of patient serotype. It would reduce the time of analysis (30 to 90’ for a standard) and costs. Additionally, the sample preprocessing and thermalization will shorten real-time PCR amplification time and will be integrated within the microfluidic systems. This platform can result in a commercialized product whereupon a main market target would be pregnant women and people living or traveling through/from outbreak risk areas.","totalCost":"196707,84","ecMaxContribution":"196707,84","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-SE","coordinator":"BFORCURE","coordinatorCountry":"FR","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229258","id":"892834","acronym":"DENVPOC","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"qPCR Microfluidics point-of-care platform for dengue diagnosis","startDate":"2020-05-18","endDate":"2022-05-17","projectUrl":"","objective":"As a result of Global climate change and fast urbanization, global outbreaks of Dengue (DENV)/ Zika(ZIKV)/Chikungunya(CHIKV) virus have the potential to occur. The most common pathway of these infections in humans is through the female Aedes mosquito vector. DENV is an exanthematous febrile disease with varied clinical manifestations and progressions . Due to similarities in symptoms between DENV and ZIKV and CHIKV, it is difficult to make a differential diagnosis, impeding appropriate, timely medical intervention. Furthermore, cross-reactivity with ZIKV, which was recently related to microcephaly, is a serious issue. In 2016, in Brazil alone, there were 4180 microcephaly cases reported instead of 163 cases, more in line with yearly expected projections , , Thus, the sooner an accurate diagnostic which differentiates DENV from the other manifestations is critical; most especially at the early stages of the infection, to have a reliable diagnosis in pregnant women. In 2016, the OMS emergency committee declared that the outbreaks and the potentially resultant neurological disorders in Brazil were an important international state of emergency in public health, as a result of the associated secondary effects; these diseases became a Global concern. This project allows developing a highly and fast Multiplex qPCR POC platform by using FASTGENE technology with a minimal amount of patient serotype. It would reduce the time of analysis (30 to 90’ for a standard) and costs. Additionally, the sample preprocessing and thermalization will shorten real-time PCR amplification time and will be integrated within the microfluidic systems. This platform can result in a commercialized product whereupon a main market target would be pregnant women and people living or traveling through/from outbreak risk areas.","totalCost":"196707,84","ecMaxContribution":"196707,84","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-SE","coordinator":"BFORCURE","coordinatorCountry":"FR","participants":"","participantCountries":"","subjects":""}
|
||||||
{"rcn":"229280","id":"895716","acronym":"DoMiCoP","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"The Diffusion of Migration Control Practice. Actors, Processes and Effects.","startDate":"2021-03-01","endDate":"2023-02-28","projectUrl":"","objective":"DoMiCoP develops new understandings and perspectives to study migration control in practice in the European Union by asking one main question: how and why do communities of practice develop and diffuse the knowledge required to put migration control into action? Unlike the nexus between expert knowledge, epistemic communities and policy formulation, the nexus between everyday knowledge, communities of practice and policy implementation has not yet received systematic scholarly attention. My project bridges that gap by focusing on intermediate arenas in which communities of practice take shape most notably the meetings and trainings that gather state and non-state actors involved in putting asylum, detention and removal into practice. By building on field-based methodologies (interviews and participant observations), DoMiCoP sheds ethnographic light on the role that ‘learning from abroad’ plays in the implementation of migration control in the EU. My project’s aim is threefold: 1) Identifying arenas at intermediate levels in which communities of practice take shape; 2) Analysing the communities of practice by focusing on the configurations of actors and organizations involved, the motivations underlying their involvement, the process of knowledge development in interaction, the conflicts and negotiations; 3) Revealing the role of non-state organizations (private for profit and not-for-profit). From a theoretical point of view, this project goes beyond the classical view of the implementation as a test to assess the effectiveness of policy transfers towards an analysis of policy transfer at that level of policy-making. From an empirical point of view, the project expands knowledge about less-studied venues of policy-making and provides original thick descriptions. From a methodological point of view, the project engages with qualitative methods for the study of policy diffusion and aims at responding to their main challenges through participant observation.","totalCost":"163673,28","ecMaxContribution":"163673,28","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"EUROPEAN UNIVERSITY INSTITUTE","coordinatorCountry":"IT","participants":"","participantCountries":"","subjects":""}
|
{"rcn":"229280","id":"895716","acronym":"DoMiCoP","status":"SIGNED","programme":"H2020-EU.1.3.2.","topics":"MSCA-IF-2019","frameworkProgramme":"H2020","title":"The Diffusion of Migration Control Practice. Actors, Processes and Effects.","startDate":"2021-03-01","endDate":"2023-02-28","projectUrl":"","objective":"DoMiCoP develops new understandings and perspectives to study migration control in practice in the European Union by asking one main question: how and why do communities of practice develop and diffuse the knowledge required to put migration control into action? Unlike the nexus between expert knowledge, epistemic communities and policy formulation, the nexus between everyday knowledge, communities of practice and policy implementation has not yet received systematic scholarly attention. My project bridges that gap by focusing on intermediate arenas in which communities of practice take shape most notably the meetings and trainings that gather state and non-state actors involved in putting asylum, detention and removal into practice. By building on field-based methodologies (interviews and participant observations), DoMiCoP sheds ethnographic light on the role that ‘learning from abroad’ plays in the implementation of migration control in the EU. My project’s aim is threefold: 1) Identifying arenas at intermediate levels in which communities of practice take shape; 2) Analysing the communities of practice by focusing on the configurations of actors and organizations involved, the motivations underlying their involvement, the process of knowledge development in interaction, the conflicts and negotiations; 3) Revealing the role of non-state organizations (private for profit and not-for-profit). From a theoretical point of view, this project goes beyond the classical view of the implementation as a test to assess the effectiveness of policy transfers towards an analysis of policy transfer at that level of policy-making. From an empirical point of view, the project expands knowledge about less-studied venues of policy-making and provides original thick descriptions. From a methodological point of view, the project engages with qualitative methods for the study of policy diffusion and aims at responding to their main challenges through participant observation.","totalCost":"163673,28","ecMaxContribution":"163673,28","call":"H2020-MSCA-IF-2019","fundingScheme":"MSCA-IF-EF-ST","coordinator":"EUROPEAN UNIVERSITY INSTITUTE","coordinatorCountry":"IT","participants":"","participantCountries":"","subjects":""}
|
||||||
|
{"rcn":"230066","id":"883730","acronym":"SOLSPACE","status":"SIGNED","programme":"H2020-EU.1.1.","topics":"ERC-2019-ADG","frameworkProgramme":"H2020","title":"Enhancing Global Clean Energy Services Using Orbiting Solar Reflectors", "startDate":"2021-03-01","endDate":"2025-11-30","projectUrl":"","objective":"fake", "totalCost":"2496392","ecMaxContribution":"2496392","call":"ERC-2019-ADG","fundingScheme":"ERC-ADG","coordinator":"UNIVERSITY OF GLASGOW","coordinatorCountry":"UK","participants":"","participantCountries":"","subjects":""}
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,25 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.broker.model;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
import eu.dnetlib.broker.api.ShortEventMessage;
|
||||||
|
|
||||||
|
public class ShortEventMessageWithGroupId extends ShortEventMessage implements Serializable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
private static final long serialVersionUID = 4704889388757626630L;
|
||||||
|
|
||||||
|
private String group;
|
||||||
|
|
||||||
|
public String getGroup() {
|
||||||
|
return group;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setGroup(final String group) {
|
||||||
|
this.group = group;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -3,12 +3,18 @@ package eu.dnetlib.dhp.broker.oa;
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||||
|
import org.apache.http.client.methods.HttpGet;
|
||||||
|
import org.apache.http.impl.client.CloseableHttpClient;
|
||||||
|
import org.apache.http.impl.client.HttpClients;
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.sql.Dataset;
|
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.SaveMode;
|
||||||
import org.apache.spark.sql.TypedColumn;
|
import org.apache.spark.sql.TypedColumn;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -29,7 +35,7 @@ public class GenerateStatsJob {
|
||||||
IOUtils
|
IOUtils
|
||||||
.toString(
|
.toString(
|
||||||
GenerateStatsJob.class
|
GenerateStatsJob.class
|
||||||
.getResourceAsStream("/eu/dnetlib/dhp/broker/oa/common_params.json")));
|
.getResourceAsStream("/eu/dnetlib/dhp/broker/oa/stats_params.json")));
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
|
|
||||||
final Boolean isSparkSessionManaged = Optional
|
final Boolean isSparkSessionManaged = Optional
|
||||||
|
@ -43,21 +49,51 @@ public class GenerateStatsJob {
|
||||||
final String eventsPath = parser.get("workingPath") + "/events";
|
final String eventsPath = parser.get("workingPath") + "/events";
|
||||||
log.info("eventsPath: {}", eventsPath);
|
log.info("eventsPath: {}", eventsPath);
|
||||||
|
|
||||||
final String statsPath = parser.get("workingPath") + "/stats";
|
final String dbUrl = parser.get("dbUrl");
|
||||||
log.info("stats: {}", statsPath);
|
log.info("dbUrl: {}", dbUrl);
|
||||||
|
|
||||||
|
final String dbUser = parser.get("dbUser");
|
||||||
|
log.info("dbUser: {}", dbUser);
|
||||||
|
|
||||||
|
final String dbPassword = parser.get("dbPassword");
|
||||||
|
log.info("dbPassword: {}", "***");
|
||||||
|
|
||||||
|
final String brokerApiBaseUrl = parser.get("brokerApiBaseUrl");
|
||||||
|
log.info("brokerApiBaseUrl: {}", brokerApiBaseUrl);
|
||||||
|
|
||||||
final TypedColumn<Event, DatasourceStats> aggr = new StatsAggregator().toColumn();
|
final TypedColumn<Event, DatasourceStats> aggr = new StatsAggregator().toColumn();
|
||||||
|
|
||||||
|
final Properties connectionProperties = new Properties();
|
||||||
|
connectionProperties.put("user", dbUser);
|
||||||
|
connectionProperties.put("password", dbPassword);
|
||||||
|
|
||||||
runWithSparkSession(conf, isSparkSessionManaged, spark -> {
|
runWithSparkSession(conf, isSparkSessionManaged, spark -> {
|
||||||
|
|
||||||
final Dataset<DatasourceStats> stats = ClusterUtils
|
ClusterUtils
|
||||||
.readPath(spark, eventsPath, Event.class)
|
.readPath(spark, eventsPath, Event.class)
|
||||||
.groupByKey(e -> e.getMap().getTargetDatasourceId(), Encoders.STRING())
|
.groupByKey(e -> e.getTopic() + "@@@" + e.getMap().getTargetDatasourceId(), Encoders.STRING())
|
||||||
.agg(aggr)
|
.agg(aggr)
|
||||||
.map(t -> t._2, Encoders.bean(DatasourceStats.class));
|
.map(t -> t._2, Encoders.bean(DatasourceStats.class))
|
||||||
|
.write()
|
||||||
|
.mode(SaveMode.Overwrite)
|
||||||
|
.jdbc(dbUrl, "oa_datasource_stats_temp", connectionProperties);
|
||||||
|
|
||||||
|
log.info("*** updateStats");
|
||||||
|
updateStats(brokerApiBaseUrl);
|
||||||
|
log.info("*** ALL done.");
|
||||||
|
|
||||||
ClusterUtils.save(stats, statsPath, DatasourceStats.class, null);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String updateStats(final String brokerApiBaseUrl) throws IOException {
|
||||||
|
final String url = brokerApiBaseUrl + "/api/openaireBroker/stats/update";
|
||||||
|
final HttpGet req = new HttpGet(url);
|
||||||
|
|
||||||
|
try (final CloseableHttpClient client = HttpClients.createDefault()) {
|
||||||
|
try (final CloseableHttpResponse response = client.execute(req)) {
|
||||||
|
return IOUtils.toString(response.getEntity().getContent());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.broker.oa;
|
||||||
|
|
||||||
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.sql.Encoders;
|
||||||
|
import org.apache.spark.sql.SaveMode;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.google.gson.Gson;
|
||||||
|
|
||||||
|
import eu.dnetlib.broker.objects.OaBrokerEventPayload;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.broker.model.Event;
|
||||||
|
import eu.dnetlib.dhp.broker.model.ShortEventMessageWithGroupId;
|
||||||
|
import eu.dnetlib.dhp.broker.oa.util.ClusterUtils;
|
||||||
|
|
||||||
|
public class PartitionEventsByDsIdJob {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(PartitionEventsByDsIdJob.class);
|
||||||
|
private static final String OPENDOAR_NSPREFIX = "opendoar____::";
|
||||||
|
|
||||||
|
public static void main(final String[] args) throws Exception {
|
||||||
|
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||||
|
IOUtils
|
||||||
|
.toString(
|
||||||
|
PartitionEventsByDsIdJob.class
|
||||||
|
.getResourceAsStream("/eu/dnetlib/dhp/broker/oa/common_params.json")));
|
||||||
|
parser.parseArgument(args);
|
||||||
|
|
||||||
|
final Boolean isSparkSessionManaged = Optional
|
||||||
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
||||||
|
.map(Boolean::valueOf)
|
||||||
|
.orElse(Boolean.TRUE);
|
||||||
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
||||||
|
|
||||||
|
final SparkConf conf = new SparkConf();
|
||||||
|
|
||||||
|
final String eventsPath = parser.get("workingPath") + "/events";
|
||||||
|
log.info("eventsPath: {}", eventsPath);
|
||||||
|
|
||||||
|
final String partitionPath = parser.get("workingPath") + "/eventsByOpendoarId";
|
||||||
|
log.info("partitionPath: {}", partitionPath);
|
||||||
|
|
||||||
|
runWithSparkSession(conf, isSparkSessionManaged, spark -> {
|
||||||
|
|
||||||
|
ClusterUtils
|
||||||
|
.readPath(spark, eventsPath, Event.class)
|
||||||
|
.filter(e -> StringUtils.isNotBlank(e.getMap().getTargetDatasourceId()))
|
||||||
|
.filter(e -> e.getMap().getTargetDatasourceId().contains(OPENDOAR_NSPREFIX))
|
||||||
|
.limit(10000)
|
||||||
|
.map(e -> messageFromNotification(e), Encoders.bean(ShortEventMessageWithGroupId.class))
|
||||||
|
.coalesce(1)
|
||||||
|
.write()
|
||||||
|
.partitionBy("group")
|
||||||
|
.mode(SaveMode.Overwrite)
|
||||||
|
.json(partitionPath);
|
||||||
|
|
||||||
|
});
|
||||||
|
renameSubDirs(partitionPath);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void renameSubDirs(final String path) throws IOException {
|
||||||
|
final FileSystem fs = FileSystem.get(new Configuration());
|
||||||
|
|
||||||
|
log.info("** Renaming subdirs of " + path);
|
||||||
|
for (final FileStatus fileStatus : fs.listStatus(new Path(path))) {
|
||||||
|
if (fileStatus.isDirectory()) {
|
||||||
|
final Path oldPath = fileStatus.getPath();
|
||||||
|
final String oldName = oldPath.getName();
|
||||||
|
if (oldName.contains("=")) {
|
||||||
|
final Path newPath = new Path(path + "/" + StringUtils.substringAfter(oldName, "="));
|
||||||
|
log.info(" * " + oldPath.getName() + " -> " + newPath.getName());
|
||||||
|
fs.rename(oldPath, newPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ShortEventMessageWithGroupId messageFromNotification(final Event e) {
|
||||||
|
final Gson gson = new Gson();
|
||||||
|
|
||||||
|
final OaBrokerEventPayload payload = gson.fromJson(e.getPayload(), OaBrokerEventPayload.class);
|
||||||
|
|
||||||
|
final ShortEventMessageWithGroupId res = new ShortEventMessageWithGroupId();
|
||||||
|
|
||||||
|
res.setOriginalId(payload.getResult().getOriginalId());
|
||||||
|
res.setTitle(payload.getResult().getTitles().stream().filter(StringUtils::isNotBlank).findFirst().orElse(null));
|
||||||
|
res.setTopic(e.getTopic());
|
||||||
|
res.setTrust(payload.getTrust());
|
||||||
|
res.generateMessageFromObject(payload.getHighlight());
|
||||||
|
res.setGroup(StringUtils.substringAfter(e.getMap().getTargetDatasourceId(), OPENDOAR_NSPREFIX));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -2,8 +2,6 @@
|
||||||
package eu.dnetlib.dhp.broker.oa.util.aggregators.stats;
|
package eu.dnetlib.dhp.broker.oa.util.aggregators.stats;
|
||||||
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
public class DatasourceStats implements Serializable {
|
public class DatasourceStats implements Serializable {
|
||||||
|
|
||||||
|
@ -15,7 +13,8 @@ public class DatasourceStats implements Serializable {
|
||||||
private String id;
|
private String id;
|
||||||
private String name;
|
private String name;
|
||||||
private String type;
|
private String type;
|
||||||
private Map<String, Long> topics = new HashMap<>();
|
private String topic;
|
||||||
|
private long size = 0l;
|
||||||
|
|
||||||
public String getId() {
|
public String getId() {
|
||||||
return id;
|
return id;
|
||||||
|
@ -41,21 +40,24 @@ public class DatasourceStats implements Serializable {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, Long> getTopics() {
|
public String getTopic() {
|
||||||
return topics;
|
return topic;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTopics(final Map<String, Long> topics) {
|
public void setTopic(final String topic) {
|
||||||
this.topics = topics;
|
this.topic = topic;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void incrementTopic(final String topic, final long inc) {
|
public long getSize() {
|
||||||
if (topics.containsKey(topic)) {
|
return size;
|
||||||
topics.put(topic, topics.get(topic) + inc);
|
}
|
||||||
} else {
|
|
||||||
topics.put(topic, inc);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
public void setSize(final long size) {
|
||||||
|
this.size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void incrementSize(final long inc) {
|
||||||
|
this.size = this.size + inc;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,8 @@ public class StatsAggregator extends Aggregator<Event, DatasourceStats, Datasour
|
||||||
stats.setId(e.getMap().getTargetDatasourceId());
|
stats.setId(e.getMap().getTargetDatasourceId());
|
||||||
stats.setName(e.getMap().getTargetDatasourceName());
|
stats.setName(e.getMap().getTargetDatasourceName());
|
||||||
stats.setType(e.getMap().getTargetDatasourceType());
|
stats.setType(e.getMap().getTargetDatasourceType());
|
||||||
stats.incrementTopic(e.getTopic(), 1l);
|
stats.setTopic(e.getTopic());
|
||||||
|
stats.incrementSize(1l);
|
||||||
return stats;
|
return stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,8 +36,9 @@ public class StatsAggregator extends Aggregator<Event, DatasourceStats, Datasour
|
||||||
stats0.setId(stats1.getId());
|
stats0.setId(stats1.getId());
|
||||||
stats0.setName(stats1.getName());
|
stats0.setName(stats1.getName());
|
||||||
stats0.setType(stats1.getType());
|
stats0.setType(stats1.getType());
|
||||||
|
stats0.setTopic(stats1.getTopic());
|
||||||
}
|
}
|
||||||
stats1.getTopics().entrySet().forEach(e -> stats0.incrementTopic(e.getKey(), e.getValue()));
|
stats0.incrementSize(stats1.getSize());
|
||||||
return stats0;
|
return stats0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,18 @@
|
||||||
<name>brokerApiBaseUrl</name>
|
<name>brokerApiBaseUrl</name>
|
||||||
<description>the url of the broker service api</description>
|
<description>the url of the broker service api</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbUrl</name>
|
||||||
|
<description>the url of the broker database</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbUser</name>
|
||||||
|
<description>the user of the broker database</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbPassword</name>
|
||||||
|
<description>the password of the broker database</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>sparkDriverMemory</name>
|
<name>sparkDriverMemory</name>
|
||||||
<description>memory for driver process</description>
|
<description>memory for driver process</description>
|
||||||
|
@ -509,8 +521,11 @@
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
--conf spark.sql.shuffle.partitions=3840
|
--conf spark.sql.shuffle.partitions=3840
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--graphPath</arg><arg>${graphInputPath}</arg>
|
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
||||||
|
<arg>--dbUrl</arg><arg>${brokerDbUrl}</arg>
|
||||||
|
<arg>--dbUser</arg><arg>${brokerDbUser}</arg>
|
||||||
|
<arg>--dbPassword</arg><arg>${brokerDbPassword}</arg>
|
||||||
|
<arg>--brokerApiBaseUrl</arg><arg>${brokerApiBaseUrl}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>jobTracker</name>
|
||||||
|
<value>yarnRM</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>nameNode</name>
|
||||||
|
<value>hdfs://nameservice1</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.use.system.libpath</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.action.sharelib.for.spark</name>
|
||||||
|
<value>spark2</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
|
@ -0,0 +1,137 @@
|
||||||
|
<workflow-app name="create broker events - partial" xmlns="uri:oozie:workflow:0.5">
|
||||||
|
|
||||||
|
<parameters>
|
||||||
|
<property>
|
||||||
|
<name>graphInputPath</name>
|
||||||
|
<description>the path where the graph is stored</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>workingPath</name>
|
||||||
|
<description>the path where the the generated data will be stored</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>datasourceIdWhitelist</name>
|
||||||
|
<value>-</value>
|
||||||
|
<description>a white list (comma separeted, - for empty list) of datasource ids</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>datasourceTypeWhitelist</name>
|
||||||
|
<value>-</value>
|
||||||
|
<description>a white list (comma separeted, - for empty list) of datasource types</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>datasourceIdBlacklist</name>
|
||||||
|
<value>-</value>
|
||||||
|
<description>a black list (comma separeted, - for empty list) of datasource ids</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>esEventIndexName</name>
|
||||||
|
<description>the elasticsearch index name for events</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>esNotificationsIndexName</name>
|
||||||
|
<description>the elasticsearch index name for notifications</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>esIndexHost</name>
|
||||||
|
<description>the elasticsearch host</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>maxIndexedEventsForDsAndTopic</name>
|
||||||
|
<description>the max number of events for each couple (ds/topic)</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerApiBaseUrl</name>
|
||||||
|
<description>the url of the broker service api</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkDriverMemory</name>
|
||||||
|
<description>memory for driver process</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorMemory</name>
|
||||||
|
<description>memory for individual executor</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorCores</name>
|
||||||
|
<description>number of cores used by single executor</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozieActionShareLibForSpark2</name>
|
||||||
|
<description>oozie action sharelib for spark 2.*</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>spark2ExtraListeners</name>
|
||||||
|
<value>com.cloudera.spark.lineage.NavigatorAppListener</value>
|
||||||
|
<description>spark 2.* extra listeners classname</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>spark2SqlQueryExecutionListeners</name>
|
||||||
|
<value>com.cloudera.spark.lineage.NavigatorQueryListener</value>
|
||||||
|
<description>spark 2.* sql query execution listeners classname</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>spark2YarnHistoryServerAddress</name>
|
||||||
|
<description>spark 2.* yarn history server address</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>spark2EventLogDir</name>
|
||||||
|
<description>spark 2.* event log dir location</description>
|
||||||
|
</property>
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<global>
|
||||||
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
<name-node>${nameNode}</name-node>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>mapreduce.job.queuename</name>
|
||||||
|
<value>${queueName}</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.launcher.mapred.job.queue.name</name>
|
||||||
|
<value>${oozieLauncherQueueName}</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.action.sharelib.for.spark</name>
|
||||||
|
<value>${oozieActionShareLibForSpark2}</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
</global>
|
||||||
|
|
||||||
|
<start to="index_notifications"/>
|
||||||
|
|
||||||
|
<kill name="Kill">
|
||||||
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
</kill>
|
||||||
|
|
||||||
|
<action name="index_notifications">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>IndexNotificationsOnESJob</name>
|
||||||
|
<class>eu.dnetlib.dhp.broker.oa.IndexNotificationsJob</class>
|
||||||
|
<jar>dhp-broker-events-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.dynamicAllocation.maxExecutors="8"
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
--conf spark.sql.shuffle.partitions=3840
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
||||||
|
<arg>--index</arg><arg>${esNotificationsIndexName}</arg>
|
||||||
|
<arg>--esHost</arg><arg>${esIndexHost}</arg>
|
||||||
|
<arg>--brokerApiBaseUrl</arg><arg>${brokerApiBaseUrl}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
<end name="End"/>
|
||||||
|
|
||||||
|
</workflow-app>
|
|
@ -44,6 +44,18 @@
|
||||||
<name>brokerApiBaseUrl</name>
|
<name>brokerApiBaseUrl</name>
|
||||||
<description>the url of the broker service api</description>
|
<description>the url of the broker service api</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbUrl</name>
|
||||||
|
<description>the url of the broker database</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbUser</name>
|
||||||
|
<description>the user of the broker database</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>brokerDbPassword</name>
|
||||||
|
<description>the password of the broker database</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>sparkDriverMemory</name>
|
<name>sparkDriverMemory</name>
|
||||||
<description>memory for driver process</description>
|
<description>memory for driver process</description>
|
||||||
|
@ -99,38 +111,35 @@
|
||||||
</configuration>
|
</configuration>
|
||||||
</global>
|
</global>
|
||||||
|
|
||||||
<start to="index_notifications"/>
|
<start to="partition"/>
|
||||||
|
|
||||||
<kill name="Kill">
|
<kill name="Kill">
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
</kill>
|
</kill>
|
||||||
|
|
||||||
<action name="index_notifications">
|
<action name="partition">
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
<master>yarn</master>
|
<master>yarn</master>
|
||||||
<mode>cluster</mode>
|
<mode>cluster</mode>
|
||||||
<name>IndexNotificationsOnESJob</name>
|
<name>PartitionEventsByDsIdJob</name>
|
||||||
<class>eu.dnetlib.dhp.broker.oa.IndexNotificationsJob</class>
|
<class>eu.dnetlib.dhp.broker.oa.PartitionEventsByDsIdJob</class>
|
||||||
<jar>dhp-broker-events-${projectVersion}.jar</jar>
|
<jar>dhp-broker-events-${projectVersion}.jar</jar>
|
||||||
<spark-opts>
|
<spark-opts>
|
||||||
|
--executor-cores=${sparkExecutorCores}
|
||||||
--executor-memory=${sparkExecutorMemory}
|
--executor-memory=${sparkExecutorMemory}
|
||||||
--driver-memory=${sparkDriverMemory}
|
--driver-memory=${sparkDriverMemory}
|
||||||
--conf spark.dynamicAllocation.maxExecutors="8"
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
--conf spark.sql.shuffle.partitions=3840
|
--conf spark.sql.shuffle.partitions=3840
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
|
<arg>--graphPath</arg><arg>${graphInputPath}</arg>
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
||||||
<arg>--index</arg><arg>${esNotificationsIndexName}</arg>
|
|
||||||
<arg>--esHost</arg><arg>${esIndexHost}</arg>
|
|
||||||
<arg>--brokerApiBaseUrl</arg><arg>${brokerApiBaseUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"paramName": "wp",
|
||||||
|
"paramLongName": "workingPath",
|
||||||
|
"paramDescription": "the working path",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "dburl",
|
||||||
|
"paramLongName": "dbUrl",
|
||||||
|
"paramDescription": "the broker database url",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "u",
|
||||||
|
"paramLongName": "dbUser",
|
||||||
|
"paramDescription": "the broker database user",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "p",
|
||||||
|
"paramLongName": "dbPassword",
|
||||||
|
"paramDescription": "the broker database password",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "broker",
|
||||||
|
"paramLongName": "brokerApiBaseUrl",
|
||||||
|
"paramDescription": "the url of the broker service api",
|
||||||
|
"paramRequired": true
|
||||||
|
}
|
||||||
|
]
|
|
@ -1,7 +1,6 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.dedup;
|
package eu.dnetlib.dhp.oa.dedup;
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -18,6 +17,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.oa.merge.AuthorMerger;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
|
|
@ -14,6 +14,7 @@ import org.codehaus.jackson.map.ObjectMapper;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.oa.merge.AuthorMerger;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
import eu.dnetlib.pace.util.MapDocumentUtil;
|
import eu.dnetlib.pace.util.MapDocumentUtil;
|
||||||
import scala.Tuple2;
|
import scala.Tuple2;
|
||||||
|
@ -65,7 +66,7 @@ public class EntityMergerTest implements Serializable {
|
||||||
assertEquals(dedupId, pub_merged.getId());
|
assertEquals(dedupId, pub_merged.getId());
|
||||||
|
|
||||||
assertEquals(pub_merged.getJournal(), pub_top.getJournal());
|
assertEquals(pub_merged.getJournal(), pub_top.getJournal());
|
||||||
assertEquals(pub_merged.getBestaccessright(), pub_top.getBestaccessright());
|
assertEquals(pub_merged.getBestaccessright().getClassid(), "OPEN");
|
||||||
assertEquals(pub_merged.getResulttype(), pub_top.getResulttype());
|
assertEquals(pub_merged.getResulttype(), pub_top.getResulttype());
|
||||||
assertEquals(pub_merged.getLanguage(), pub_merged.getLanguage());
|
assertEquals(pub_merged.getLanguage(), pub_merged.getLanguage());
|
||||||
assertEquals(pub_merged.getPublisher(), pub_top.getPublisher());
|
assertEquals(pub_merged.getPublisher(), pub_top.getPublisher());
|
||||||
|
|
|
@ -194,10 +194,10 @@ public class SparkDedupTest implements Serializable {
|
||||||
.textFile(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_simrel")
|
.textFile(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_simrel")
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
assertEquals(3432, orgs_simrel);
|
assertEquals(3082, orgs_simrel);
|
||||||
assertEquals(7152, pubs_simrel);
|
assertEquals(7036, pubs_simrel);
|
||||||
assertEquals(344, sw_simrel);
|
assertEquals(344, sw_simrel);
|
||||||
assertEquals(458, ds_simrel);
|
assertEquals(442, ds_simrel);
|
||||||
assertEquals(6750, orp_simrel);
|
assertEquals(6750, orp_simrel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,8 +343,8 @@ public class SparkDedupTest implements Serializable {
|
||||||
.load(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_mergerel")
|
.load(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_mergerel")
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
assertEquals(1276, orgs_mergerel);
|
assertEquals(1272, orgs_mergerel);
|
||||||
assertEquals(1442, pubs_mergerel);
|
assertEquals(1438, pubs_mergerel);
|
||||||
assertEquals(288, sw_mergerel);
|
assertEquals(288, sw_mergerel);
|
||||||
assertEquals(472, ds_mergerel);
|
assertEquals(472, ds_mergerel);
|
||||||
assertEquals(718, orp_mergerel);
|
assertEquals(718, orp_mergerel);
|
||||||
|
@ -390,10 +390,10 @@ public class SparkDedupTest implements Serializable {
|
||||||
testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_deduprecord")
|
testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_deduprecord")
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
assertEquals(82, orgs_deduprecord);
|
assertEquals(85, orgs_deduprecord);
|
||||||
assertEquals(66, pubs_deduprecord);
|
assertEquals(65, pubs_deduprecord);
|
||||||
assertEquals(51, sw_deduprecord);
|
assertEquals(51, sw_deduprecord);
|
||||||
assertEquals(96, ds_deduprecord);
|
assertEquals(97, ds_deduprecord);
|
||||||
assertEquals(89, orp_deduprecord);
|
assertEquals(89, orp_deduprecord);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -473,12 +473,12 @@ public class SparkDedupTest implements Serializable {
|
||||||
.distinct()
|
.distinct()
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
assertEquals(897, publications);
|
assertEquals(896, publications);
|
||||||
assertEquals(835, organizations);
|
assertEquals(838, organizations);
|
||||||
assertEquals(100, projects);
|
assertEquals(100, projects);
|
||||||
assertEquals(100, datasource);
|
assertEquals(100, datasource);
|
||||||
assertEquals(200, softwares);
|
assertEquals(200, softwares);
|
||||||
assertEquals(388, dataset);
|
assertEquals(389, dataset);
|
||||||
assertEquals(517, otherresearchproduct);
|
assertEquals(517, otherresearchproduct);
|
||||||
|
|
||||||
long deletedOrgs = jsc
|
long deletedOrgs = jsc
|
||||||
|
@ -533,7 +533,7 @@ public class SparkDedupTest implements Serializable {
|
||||||
|
|
||||||
long relations = jsc.textFile(testDedupGraphBasePath + "/relation").count();
|
long relations = jsc.textFile(testDedupGraphBasePath + "/relation").count();
|
||||||
|
|
||||||
assertEquals(4866, relations);
|
assertEquals(4858, relations);
|
||||||
|
|
||||||
// check deletedbyinference
|
// check deletedbyinference
|
||||||
final Dataset<Relation> mergeRels = spark
|
final Dataset<Relation> mergeRels = spark
|
||||||
|
|
|
@ -168,10 +168,10 @@ public class SparkStatsTest implements Serializable {
|
||||||
.textFile(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_blockstats")
|
.textFile(testOutputBasePath + "/" + testActionSetId + "/otherresearchproduct_blockstats")
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
assertEquals(121, orgs_blocks);
|
assertEquals(549, orgs_blocks);
|
||||||
assertEquals(110, pubs_blocks);
|
assertEquals(299, pubs_blocks);
|
||||||
assertEquals(21, sw_blocks);
|
assertEquals(122, sw_blocks);
|
||||||
assertEquals(67, ds_blocks);
|
assertEquals(186, ds_blocks);
|
||||||
assertEquals(55, orp_blocks);
|
assertEquals(170, orp_blocks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -341,13 +341,7 @@ object DoiBoostMappingUtil {
|
||||||
|
|
||||||
def generateIdentifier (oaf: Result, doi: String): String = {
|
def generateIdentifier (oaf: Result, doi: String): String = {
|
||||||
val id = DHPUtils.md5 (doi.toLowerCase)
|
val id = DHPUtils.md5 (doi.toLowerCase)
|
||||||
return s"50|${
|
s"50|${doiBoostNSPREFIX}${SEPARATOR}${id}"
|
||||||
doiBoostNSPREFIX
|
|
||||||
}${
|
|
||||||
SEPARATOR
|
|
||||||
}${
|
|
||||||
id
|
|
||||||
}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
package eu.dnetlib.doiboost
|
package eu.dnetlib.doiboost
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
import eu.dnetlib.dhp.schema.oaf.{Publication, Relation, Dataset => OafDataset, Organization}
|
import eu.dnetlib.dhp.oa.merge.AuthorMerger
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.{Organization, Publication, Relation, Dataset => OafDataset}
|
||||||
import eu.dnetlib.doiboost.mag.ConversionUtil
|
import eu.dnetlib.doiboost.mag.ConversionUtil
|
||||||
import org.apache.commons.io.IOUtils
|
import org.apache.commons.io.IOUtils
|
||||||
import org.apache.spark.SparkConf
|
import org.apache.spark.SparkConf
|
||||||
import org.apache.spark.sql.functions.col
|
import org.apache.spark.sql.functions.col
|
||||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
import scala.collection.JavaConverters._
|
import scala.collection.JavaConverters._
|
||||||
|
|
||||||
object SparkGenerateDoiBoost {
|
object SparkGenerateDoiBoost {
|
||||||
|
@ -49,6 +51,7 @@ object SparkGenerateDoiBoost {
|
||||||
val otherPub = item._2._2
|
val otherPub = item._2._2
|
||||||
if (otherPub != null) {
|
if (otherPub != null) {
|
||||||
crossrefPub.mergeFrom(otherPub)
|
crossrefPub.mergeFrom(otherPub)
|
||||||
|
crossrefPub.setAuthor(AuthorMerger.mergeAuthor(crossrefPub.getAuthor, otherPub.getAuthor))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
crossrefPub
|
crossrefPub
|
||||||
|
|
|
@ -14,6 +14,8 @@ import scala.collection.JavaConverters._
|
||||||
import scala.collection.mutable
|
import scala.collection.mutable
|
||||||
import scala.util.matching.Regex
|
import scala.util.matching.Regex
|
||||||
|
|
||||||
|
case class CrossrefDT(doi: String, json:String) {}
|
||||||
|
|
||||||
case class mappingAffiliation(name: String) {}
|
case class mappingAffiliation(name: String) {}
|
||||||
|
|
||||||
case class mappingAuthor(given: Option[String], family: String, ORCID: Option[String], affiliation: Option[mappingAffiliation]) {}
|
case class mappingAuthor(given: Option[String], family: String, ORCID: Option[String], affiliation: Option[mappingAffiliation]) {}
|
||||||
|
@ -93,7 +95,7 @@ case object Crossref2Oaf {
|
||||||
|
|
||||||
result.setOriginalId(tmp.filter(id => id != null).asJava)
|
result.setOriginalId(tmp.filter(id => id != null).asJava)
|
||||||
|
|
||||||
//Set identifier as {50|60} | doiboost____::md5(DOI)
|
//Set identifier as 50 | doiboost____::md5(DOI)
|
||||||
result.setId(generateIdentifier(result, doi))
|
result.setId(generateIdentifier(result, doi))
|
||||||
|
|
||||||
// Add DataInfo
|
// Add DataInfo
|
||||||
|
@ -267,7 +269,7 @@ case object Crossref2Oaf {
|
||||||
|
|
||||||
val r = new Relation
|
val r = new Relation
|
||||||
r.setSource(sourceId)
|
r.setSource(sourceId)
|
||||||
r.setTarget(s"$nsPrefix::$targetId")
|
r.setTarget(s"40|$nsPrefix::$targetId")
|
||||||
r.setRelType("resultProject")
|
r.setRelType("resultProject")
|
||||||
r.setRelClass("isProducedBy")
|
r.setRelClass("isProducedBy")
|
||||||
r.setSubRelType("outcome")
|
r.setSubRelType("outcome")
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
package eu.dnetlib.doiboost.crossref
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
|
import org.apache.commons.io.IOUtils
|
||||||
|
import org.apache.spark.SparkConf
|
||||||
|
import org.apache.spark.sql.expressions.Aggregator
|
||||||
|
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||||
|
import org.json4s
|
||||||
|
import org.json4s.DefaultFormats
|
||||||
|
import org.json4s.jackson.JsonMethods.parse
|
||||||
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
|
object CrossrefDataset {
|
||||||
|
|
||||||
|
|
||||||
|
def extractTimestamp(input:String): Long = {
|
||||||
|
|
||||||
|
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||||
|
lazy val json: json4s.JValue = parse(input)
|
||||||
|
|
||||||
|
(json\"indexed"\"timestamp").extractOrElse[Long](0)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main(args: Array[String]): Unit = {
|
||||||
|
|
||||||
|
|
||||||
|
val logger: Logger = LoggerFactory.getLogger(SparkMapDumpIntoOAF.getClass)
|
||||||
|
val conf: SparkConf = new SparkConf()
|
||||||
|
val parser = new ArgumentApplicationParser(IOUtils.toString(CrossrefDataset.getClass.getResourceAsStream("/eu/dnetlib/dhp/doiboost/crossref_to_dataset_params.json")))
|
||||||
|
parser.parseArgument(args)
|
||||||
|
val spark: SparkSession =
|
||||||
|
SparkSession
|
||||||
|
.builder()
|
||||||
|
.config(conf)
|
||||||
|
.appName(SparkMapDumpIntoOAF.getClass.getSimpleName)
|
||||||
|
.master(parser.get("master")).getOrCreate()
|
||||||
|
import spark.implicits._
|
||||||
|
|
||||||
|
|
||||||
|
val crossrefAggregator = new Aggregator[CrossrefDT, CrossrefDT, CrossrefDT] with Serializable {
|
||||||
|
|
||||||
|
override def zero: CrossrefDT = null
|
||||||
|
|
||||||
|
override def reduce(b: CrossrefDT, a: CrossrefDT): CrossrefDT = {
|
||||||
|
if (b == null)
|
||||||
|
return a
|
||||||
|
if (a == null)
|
||||||
|
return b
|
||||||
|
|
||||||
|
val tb = extractTimestamp(b.json)
|
||||||
|
val ta = extractTimestamp(a.json)
|
||||||
|
if(ta >tb) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
b
|
||||||
|
}
|
||||||
|
|
||||||
|
override def merge(a: CrossrefDT, b: CrossrefDT): CrossrefDT = {
|
||||||
|
if (b == null)
|
||||||
|
return a
|
||||||
|
if (a == null)
|
||||||
|
return b
|
||||||
|
|
||||||
|
val tb = extractTimestamp(b.json)
|
||||||
|
val ta = extractTimestamp(a.json)
|
||||||
|
if(ta >tb) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
b
|
||||||
|
}
|
||||||
|
|
||||||
|
override def bufferEncoder: Encoder[CrossrefDT] = implicitly[Encoder[CrossrefDT]]
|
||||||
|
|
||||||
|
override def outputEncoder: Encoder[CrossrefDT] = implicitly[Encoder[CrossrefDT]]
|
||||||
|
|
||||||
|
override def finish(reduction: CrossrefDT): CrossrefDT = reduction
|
||||||
|
}
|
||||||
|
|
||||||
|
val sourcePath:String = parser.get("sourcePath")
|
||||||
|
val targetPath:String = parser.get("targetPath")
|
||||||
|
|
||||||
|
val ds:Dataset[CrossrefDT] = spark.read.load(sourcePath).as[CrossrefDT]
|
||||||
|
|
||||||
|
ds.groupByKey(_.doi)
|
||||||
|
.agg(crossrefAggregator.toColumn)
|
||||||
|
.map(s=>s._2)
|
||||||
|
.write.mode(SaveMode.Overwrite).save(targetPath)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -46,11 +46,11 @@
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
<name-node>${nameNode}</name-node>
|
<name-node>${nameNode}</name-node>
|
||||||
<main-class>eu.dnetlib.doiboost.crossref.CrossrefImporter</main-class>
|
<main-class>eu.dnetlib.doiboost.crossref.CrossrefImporter</main-class>
|
||||||
<arg>-t</arg><arg>${workingPath}/input/crossref/index_dump</arg>
|
<arg>-t</arg><arg>${workingPath}/input/crossref/index_dump_1</arg>
|
||||||
<arg>-n</arg><arg>${nameNode}</arg>
|
<arg>-n</arg><arg>${nameNode}</arg>
|
||||||
<arg>-ts</arg><arg>${timestamp}</arg>
|
<arg>-ts</arg><arg>${timestamp}</arg>
|
||||||
</java>
|
</java>
|
||||||
<ok to="ExtractCrossrefToOAF"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@
|
||||||
--driver-memory=${sparkDriverMemory}
|
--driver-memory=${sparkDriverMemory}
|
||||||
${sparkExtraOPT}
|
${sparkExtraOPT}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--sourcePath</arg><arg>${workingPath}/input/crossref/index_dump,${workingPath}/crossref/index_dump</arg>
|
<arg>--sourcePath</arg><arg>${workingPath}/input/crossref/index_dump,${workingPath}/input/crossref/index_dump_1,${workingPath}/crossref/index_dump</arg>
|
||||||
<arg>--targetPath</arg><arg>${workingPath}/input/crossref</arg>
|
<arg>--targetPath</arg><arg>${workingPath}/input/crossref</arg>
|
||||||
<arg>--master</arg><arg>yarn-cluster</arg>
|
<arg>--master</arg><arg>yarn-cluster</arg>
|
||||||
</spark>
|
</spark>
|
||||||
|
@ -76,5 +76,28 @@
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<action name="GenerateDataset">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn-cluster</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>ExtractCrossrefToOAF</name>
|
||||||
|
<class>eu.dnetlib.doiboost.crossref.CrossrefDataset</class>
|
||||||
|
<jar>dhp-doiboost-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
--executor-cores=${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
${sparkExtraOPT}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--sourcePath</arg><arg>/data/doiboost/crossref/cr_dataset</arg>
|
||||||
|
<arg>--targetPath</arg><arg>/data/doiboost/crossref/crossrefDataset</arg>
|
||||||
|
<arg>--master</arg><arg>yarn-cluster</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
</workflow-app>
|
</workflow-app>
|
|
@ -0,0 +1,6 @@
|
||||||
|
[
|
||||||
|
{"paramName":"s", "paramLongName":"sourcePath", "paramDescription": "the path of the sequencial file to read", "paramRequired": true},
|
||||||
|
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "the working dir path", "paramRequired": true},
|
||||||
|
{"paramName":"m", "paramLongName":"master", "paramDescription": "the master name", "paramRequired": true}
|
||||||
|
|
||||||
|
]
|
|
@ -89,7 +89,7 @@
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--dbPublicationPath</arg><arg>${workingDirPath}/doiBoostPublicationFiltered</arg>
|
<arg>--dbPublicationPath</arg><arg>${workingDirPath}/doiBoostPublicationFiltered</arg>
|
||||||
<arg>--dbDatasetPath</arg><arg>${workingDirPath}/crossrefDataset</arg>
|
<arg>--dbDatasetPath</arg><arg>${workingDirPath}/crossrefDataset</arg>
|
||||||
<arg>--crossRefRelation</arg><arg>/data/doiboost/input/crossref/relations</arg>
|
<arg>--crossRefRelation</arg><arg>${workingDirPath}/crossrefRelation</arg>
|
||||||
<arg>--dbaffiliationRelationPath</arg><arg>${workingDirPath}/doiBoostPublicationAffiliation</arg>
|
<arg>--dbaffiliationRelationPath</arg><arg>${workingDirPath}/doiBoostPublicationAffiliation</arg>
|
||||||
<arg>-do</arg><arg>${workingDirPath}/doiBoostOrganization</arg>
|
<arg>-do</arg><arg>${workingDirPath}/doiBoostOrganization</arg>
|
||||||
<arg>--targetPath</arg><arg>${workingDirPath}/actionDataSet</arg>
|
<arg>--targetPath</arg><arg>${workingDirPath}/actionDataSet</arg>
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package eu.dnetlib.dhp.doiboost
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Publication
|
||||||
|
import org.apache.spark.SparkContext
|
||||||
|
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||||
|
import org.codehaus.jackson.map.{ObjectMapper, SerializationConfig}
|
||||||
|
import org.json4s
|
||||||
|
import org.json4s.DefaultFormats
|
||||||
|
import org.json4s.jackson.JsonMethods._
|
||||||
|
|
||||||
|
import scala.collection.JavaConverters._
|
||||||
|
class QueryTest {
|
||||||
|
|
||||||
|
def extract_payload(input:String) :String = {
|
||||||
|
|
||||||
|
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||||
|
lazy val json: json4s.JValue = parse(input)
|
||||||
|
|
||||||
|
|
||||||
|
compact(render((json \ "payload")))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
def hasInstanceWithUrl(p:Publication):Boolean = {
|
||||||
|
val c = p.getInstance.asScala.map(i => i.getUrl!= null && !i.getUrl.isEmpty).size
|
||||||
|
!(!p.getInstance.isEmpty && c == p.getInstance().size)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def hasNullAccessRights(p:Publication):Boolean = {
|
||||||
|
val c = p.getInstance.asScala.map(i => i.getAccessright!= null && i.getAccessright.getClassname.nonEmpty).size
|
||||||
|
!p.getInstance.isEmpty && c == p.getInstance().size()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def myQuery(spark:SparkSession, sc:SparkContext): Unit = {
|
||||||
|
implicit val mapEncoderPub: Encoder[Publication] = Encoders.kryo[Publication]
|
||||||
|
|
||||||
|
val mapper = new ObjectMapper()
|
||||||
|
mapper.getSerializationConfig.enable(SerializationConfig.Feature.INDENT_OUTPUT)
|
||||||
|
|
||||||
|
|
||||||
|
val ds:Dataset[Publication] = spark.read.load("/tmp/p").as[Publication]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ds.filter(p =>p.getBestaccessright!= null && p.getBestaccessright.getClassname.nonEmpty).count()
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -19,8 +19,6 @@ class CrossrefMappingTest {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
def testFunderRelationshipsMapping(): Unit = {
|
def testFunderRelationshipsMapping(): Unit = {
|
||||||
val template = Source.fromInputStream(getClass.getResourceAsStream("article_funder_template.json")).mkString
|
val template = Source.fromInputStream(getClass.getResourceAsStream("article_funder_template.json")).mkString
|
||||||
|
|
|
@ -83,7 +83,6 @@
|
||||||
<artifactId>dhp-schemas</artifactId>
|
<artifactId>dhp-schemas</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.jayway.jsonpath</groupId>
|
<groupId>com.jayway.jsonpath</groupId>
|
||||||
<artifactId>json-path</artifactId>
|
<artifactId>json-path</artifactId>
|
||||||
|
|
|
@ -90,169 +90,15 @@ public class CleanGraphSparkJob {
|
||||||
final CleaningRuleMap mapping = CleaningRuleMap.create(vocs);
|
final CleaningRuleMap mapping = CleaningRuleMap.create(vocs);
|
||||||
|
|
||||||
readTableFromPath(spark, inputPath, clazz)
|
readTableFromPath(spark, inputPath, clazz)
|
||||||
.map((MapFunction<T, T>) value -> fixVocabularyNames(value), Encoders.bean(clazz))
|
.map((MapFunction<T, T>) value -> CleaningFunctions.fixVocabularyNames(value), Encoders.bean(clazz))
|
||||||
.map((MapFunction<T, T>) value -> OafCleaner.apply(value, mapping), Encoders.bean(clazz))
|
.map((MapFunction<T, T>) value -> OafCleaner.apply(value, mapping), Encoders.bean(clazz))
|
||||||
.map((MapFunction<T, T>) value -> fixDefaults(value), Encoders.bean(clazz))
|
.map((MapFunction<T, T>) value -> CleaningFunctions.fixDefaults(value), Encoders.bean(clazz))
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
.json(outputPath);
|
.json(outputPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static <T extends Oaf> T fixVocabularyNames(T value) {
|
|
||||||
if (value instanceof Datasource) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Project) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Organization) {
|
|
||||||
Organization o = (Organization) value;
|
|
||||||
if (Objects.nonNull(o.getCountry())) {
|
|
||||||
fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
|
|
||||||
}
|
|
||||||
} else if (value instanceof Relation) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Result) {
|
|
||||||
|
|
||||||
Result r = (Result) value;
|
|
||||||
|
|
||||||
fixVocabName(r.getLanguage(), ModelConstants.DNET_LANGUAGES);
|
|
||||||
fixVocabName(r.getResourcetype(), ModelConstants.DNET_DATA_CITE_RESOURCE);
|
|
||||||
fixVocabName(r.getBestaccessright(), ModelConstants.DNET_ACCESS_MODES);
|
|
||||||
|
|
||||||
if (Objects.nonNull(r.getSubject())) {
|
|
||||||
r.getSubject().forEach(s -> fixVocabName(s.getQualifier(), ModelConstants.DNET_SUBJECT_TYPOLOGIES));
|
|
||||||
}
|
|
||||||
if (Objects.nonNull(r.getInstance())) {
|
|
||||||
for (Instance i : r.getInstance()) {
|
|
||||||
fixVocabName(i.getAccessright(), ModelConstants.DNET_ACCESS_MODES);
|
|
||||||
fixVocabName(i.getRefereed(), ModelConstants.DNET_REVIEW_LEVELS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (Objects.nonNull(r.getAuthor())) {
|
|
||||||
r.getAuthor().forEach(a -> {
|
|
||||||
if (Objects.nonNull(a.getPid())) {
|
|
||||||
a.getPid().forEach(p -> {
|
|
||||||
fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (value instanceof Publication) {
|
|
||||||
|
|
||||||
} else if (value instanceof eu.dnetlib.dhp.schema.oaf.Dataset) {
|
|
||||||
|
|
||||||
} else if (value instanceof OtherResearchProduct) {
|
|
||||||
|
|
||||||
} else if (value instanceof Software) {
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void fixVocabName(Qualifier q, String vocabularyName) {
|
|
||||||
if (Objects.nonNull(q) && StringUtils.isBlank(q.getSchemeid())) {
|
|
||||||
q.setSchemeid(vocabularyName);
|
|
||||||
q.setSchemename(vocabularyName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected static <T extends Oaf> T fixDefaults(T value) {
|
|
||||||
if (value instanceof Datasource) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Project) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Organization) {
|
|
||||||
Organization o = (Organization) value;
|
|
||||||
if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
|
|
||||||
o.setCountry(qualifier("UNKNOWN", "Unknown", ModelConstants.DNET_COUNTRY_TYPE));
|
|
||||||
}
|
|
||||||
} else if (value instanceof Relation) {
|
|
||||||
// nothing to clean here
|
|
||||||
} else if (value instanceof Result) {
|
|
||||||
|
|
||||||
Result r = (Result) value;
|
|
||||||
if (Objects.nonNull(r.getPublisher()) && StringUtils.isBlank(r.getPublisher().getValue())) {
|
|
||||||
r.setPublisher(null);
|
|
||||||
}
|
|
||||||
if (Objects.isNull(r.getLanguage()) || StringUtils.isBlank(r.getLanguage().getClassid())) {
|
|
||||||
r
|
|
||||||
.setLanguage(
|
|
||||||
qualifier("und", "Undetermined", ModelConstants.DNET_LANGUAGES));
|
|
||||||
}
|
|
||||||
if (Objects.nonNull(r.getSubject())) {
|
|
||||||
r
|
|
||||||
.setSubject(
|
|
||||||
r
|
|
||||||
.getSubject()
|
|
||||||
.stream()
|
|
||||||
.filter(Objects::nonNull)
|
|
||||||
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
|
||||||
.filter(sp -> Objects.nonNull(sp.getQualifier()))
|
|
||||||
.filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
|
|
||||||
.collect(Collectors.toList()));
|
|
||||||
}
|
|
||||||
if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
|
|
||||||
r
|
|
||||||
.setResourcetype(
|
|
||||||
qualifier("UNKNOWN", "Unknown", ModelConstants.DNET_DATA_CITE_RESOURCE));
|
|
||||||
}
|
|
||||||
if (Objects.nonNull(r.getInstance())) {
|
|
||||||
for (Instance i : r.getInstance()) {
|
|
||||||
if (Objects.isNull(i.getAccessright()) || StringUtils.isBlank(i.getAccessright().getClassid())) {
|
|
||||||
i.setAccessright(qualifier("UNKNOWN", "not available", ModelConstants.DNET_ACCESS_MODES));
|
|
||||||
}
|
|
||||||
if (Objects.isNull(i.getHostedby()) || StringUtils.isBlank(i.getHostedby().getKey())) {
|
|
||||||
i.setHostedby(ModelConstants.UNKNOWN_REPOSITORY);
|
|
||||||
}
|
|
||||||
if (Objects.isNull(i.getRefereed())) {
|
|
||||||
i.setRefereed(qualifier("0000", "Unknown", ModelConstants.DNET_REVIEW_LEVELS));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (Objects.isNull(r.getBestaccessright()) || StringUtils.isBlank(r.getBestaccessright().getClassid())) {
|
|
||||||
Qualifier bestaccessrights = AbstractMdRecordToOafMapper.createBestAccessRights(r.getInstance());
|
|
||||||
if (Objects.isNull(bestaccessrights)) {
|
|
||||||
r
|
|
||||||
.setBestaccessright(
|
|
||||||
qualifier("UNKNOWN", "not available", ModelConstants.DNET_ACCESS_MODES));
|
|
||||||
} else {
|
|
||||||
r.setBestaccessright(bestaccessrights);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (Objects.nonNull(r.getAuthor())) {
|
|
||||||
boolean nullRank = r
|
|
||||||
.getAuthor()
|
|
||||||
.stream()
|
|
||||||
.anyMatch(a -> Objects.isNull(a.getRank()));
|
|
||||||
if (nullRank) {
|
|
||||||
int i = 1;
|
|
||||||
for (Author author : r.getAuthor()) {
|
|
||||||
author.setRank(i++);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (value instanceof Publication) {
|
|
||||||
|
|
||||||
} else if (value instanceof eu.dnetlib.dhp.schema.oaf.Dataset) {
|
|
||||||
|
|
||||||
} else if (value instanceof OtherResearchProduct) {
|
|
||||||
|
|
||||||
} else if (value instanceof Software) {
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Qualifier qualifier(String classid, String classname, String scheme) {
|
|
||||||
return OafMapperUtils
|
|
||||||
.qualifier(
|
|
||||||
classid, classname, scheme, scheme);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T extends Oaf> Dataset<T> readTableFromPath(
|
private static <T extends Oaf> Dataset<T> readTableFromPath(
|
||||||
SparkSession spark, String inputEntityPath, Class<T> clazz) {
|
SparkSession spark, String inputEntityPath, Class<T> clazz) {
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,204 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.oa.graph.clean;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.function.Function;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
|
import com.clearspring.analytics.util.Lists;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.oa.graph.raw.AbstractMdRecordToOafMapper;
|
||||||
|
import eu.dnetlib.dhp.oa.graph.raw.common.OafMapperUtils;
|
||||||
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
|
|
||||||
|
public class CleaningFunctions {
|
||||||
|
|
||||||
|
public static final String ORCID_PREFIX_REGEX = "^http(s?):\\/\\/orcid\\.org\\/";
|
||||||
|
|
||||||
|
public static <T extends Oaf> T fixVocabularyNames(T value) {
|
||||||
|
if (value instanceof Datasource) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Project) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Organization) {
|
||||||
|
Organization o = (Organization) value;
|
||||||
|
if (Objects.nonNull(o.getCountry())) {
|
||||||
|
fixVocabName(o.getCountry(), ModelConstants.DNET_COUNTRY_TYPE);
|
||||||
|
}
|
||||||
|
} else if (value instanceof Relation) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Result) {
|
||||||
|
|
||||||
|
Result r = (Result) value;
|
||||||
|
|
||||||
|
fixVocabName(r.getLanguage(), ModelConstants.DNET_LANGUAGES);
|
||||||
|
fixVocabName(r.getResourcetype(), ModelConstants.DNET_DATA_CITE_RESOURCE);
|
||||||
|
fixVocabName(r.getBestaccessright(), ModelConstants.DNET_ACCESS_MODES);
|
||||||
|
|
||||||
|
if (Objects.nonNull(r.getSubject())) {
|
||||||
|
r.getSubject().forEach(s -> fixVocabName(s.getQualifier(), ModelConstants.DNET_SUBJECT_TYPOLOGIES));
|
||||||
|
}
|
||||||
|
if (Objects.nonNull(r.getInstance())) {
|
||||||
|
for (Instance i : r.getInstance()) {
|
||||||
|
fixVocabName(i.getAccessright(), ModelConstants.DNET_ACCESS_MODES);
|
||||||
|
fixVocabName(i.getRefereed(), ModelConstants.DNET_REVIEW_LEVELS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (Objects.nonNull(r.getAuthor())) {
|
||||||
|
r.getAuthor().forEach(a -> {
|
||||||
|
if (Objects.nonNull(a.getPid())) {
|
||||||
|
a.getPid().forEach(p -> {
|
||||||
|
fixVocabName(p.getQualifier(), ModelConstants.DNET_PID_TYPES);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (value instanceof Publication) {
|
||||||
|
|
||||||
|
} else if (value instanceof eu.dnetlib.dhp.schema.oaf.Dataset) {
|
||||||
|
|
||||||
|
} else if (value instanceof OtherResearchProduct) {
|
||||||
|
|
||||||
|
} else if (value instanceof Software) {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static <T extends Oaf> T fixDefaults(T value) {
|
||||||
|
if (value instanceof Datasource) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Project) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Organization) {
|
||||||
|
Organization o = (Organization) value;
|
||||||
|
if (Objects.isNull(o.getCountry()) || StringUtils.isBlank(o.getCountry().getClassid())) {
|
||||||
|
o.setCountry(qualifier("UNKNOWN", "Unknown", ModelConstants.DNET_COUNTRY_TYPE));
|
||||||
|
}
|
||||||
|
} else if (value instanceof Relation) {
|
||||||
|
// nothing to clean here
|
||||||
|
} else if (value instanceof Result) {
|
||||||
|
|
||||||
|
Result r = (Result) value;
|
||||||
|
if (Objects.nonNull(r.getPublisher()) && StringUtils.isBlank(r.getPublisher().getValue())) {
|
||||||
|
r.setPublisher(null);
|
||||||
|
}
|
||||||
|
if (Objects.isNull(r.getLanguage()) || StringUtils.isBlank(r.getLanguage().getClassid())) {
|
||||||
|
r
|
||||||
|
.setLanguage(
|
||||||
|
qualifier("und", "Undetermined", ModelConstants.DNET_LANGUAGES));
|
||||||
|
}
|
||||||
|
if (Objects.nonNull(r.getSubject())) {
|
||||||
|
r
|
||||||
|
.setSubject(
|
||||||
|
r
|
||||||
|
.getSubject()
|
||||||
|
.stream()
|
||||||
|
.filter(Objects::nonNull)
|
||||||
|
.filter(sp -> StringUtils.isNotBlank(sp.getValue()))
|
||||||
|
.filter(sp -> Objects.nonNull(sp.getQualifier()))
|
||||||
|
.filter(sp -> StringUtils.isNotBlank(sp.getQualifier().getClassid()))
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
}
|
||||||
|
if (Objects.isNull(r.getResourcetype()) || StringUtils.isBlank(r.getResourcetype().getClassid())) {
|
||||||
|
r
|
||||||
|
.setResourcetype(
|
||||||
|
qualifier("UNKNOWN", "Unknown", ModelConstants.DNET_DATA_CITE_RESOURCE));
|
||||||
|
}
|
||||||
|
if (Objects.nonNull(r.getInstance())) {
|
||||||
|
for (Instance i : r.getInstance()) {
|
||||||
|
if (Objects.isNull(i.getAccessright()) || StringUtils.isBlank(i.getAccessright().getClassid())) {
|
||||||
|
i.setAccessright(qualifier("UNKNOWN", "not available", ModelConstants.DNET_ACCESS_MODES));
|
||||||
|
}
|
||||||
|
if (Objects.isNull(i.getHostedby()) || StringUtils.isBlank(i.getHostedby().getKey())) {
|
||||||
|
i.setHostedby(ModelConstants.UNKNOWN_REPOSITORY);
|
||||||
|
}
|
||||||
|
if (Objects.isNull(i.getRefereed())) {
|
||||||
|
i.setRefereed(qualifier("0000", "Unknown", ModelConstants.DNET_REVIEW_LEVELS));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (Objects.isNull(r.getBestaccessright()) || StringUtils.isBlank(r.getBestaccessright().getClassid())) {
|
||||||
|
Qualifier bestaccessrights = AbstractMdRecordToOafMapper.createBestAccessRights(r.getInstance());
|
||||||
|
if (Objects.isNull(bestaccessrights)) {
|
||||||
|
r
|
||||||
|
.setBestaccessright(
|
||||||
|
qualifier("UNKNOWN", "not available", ModelConstants.DNET_ACCESS_MODES));
|
||||||
|
} else {
|
||||||
|
r.setBestaccessright(bestaccessrights);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (Objects.nonNull(r.getAuthor())) {
|
||||||
|
boolean nullRank = r
|
||||||
|
.getAuthor()
|
||||||
|
.stream()
|
||||||
|
.anyMatch(a -> Objects.isNull(a.getRank()));
|
||||||
|
if (nullRank) {
|
||||||
|
int i = 1;
|
||||||
|
for (Author author : r.getAuthor()) {
|
||||||
|
author.setRank(i++);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (Author a : r.getAuthor()) {
|
||||||
|
if (Objects.isNull(a.getPid())) {
|
||||||
|
a.setPid(Lists.newArrayList());
|
||||||
|
} else {
|
||||||
|
a
|
||||||
|
.setPid(
|
||||||
|
a
|
||||||
|
.getPid()
|
||||||
|
.stream()
|
||||||
|
.filter(p -> Objects.nonNull(p.getQualifier()))
|
||||||
|
.filter(p -> StringUtils.isNotBlank(p.getValue()))
|
||||||
|
.map(p -> {
|
||||||
|
p.setValue(p.getValue().trim().replaceAll(ORCID_PREFIX_REGEX, ""));
|
||||||
|
return p;
|
||||||
|
})
|
||||||
|
.collect(
|
||||||
|
Collectors
|
||||||
|
.toMap(
|
||||||
|
StructuredProperty::getValue, Function.identity(), (p1, p2) -> p1,
|
||||||
|
LinkedHashMap::new))
|
||||||
|
.values()
|
||||||
|
.stream()
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if (value instanceof Publication) {
|
||||||
|
|
||||||
|
} else if (value instanceof eu.dnetlib.dhp.schema.oaf.Dataset) {
|
||||||
|
|
||||||
|
} else if (value instanceof OtherResearchProduct) {
|
||||||
|
|
||||||
|
} else if (value instanceof Software) {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
// HELPERS
|
||||||
|
|
||||||
|
private static void fixVocabName(Qualifier q, String vocabularyName) {
|
||||||
|
if (Objects.nonNull(q) && StringUtils.isBlank(q.getSchemeid())) {
|
||||||
|
q.setSchemeid(vocabularyName);
|
||||||
|
q.setSchemename(vocabularyName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Qualifier qualifier(String classid, String classname, String scheme) {
|
||||||
|
return OafMapperUtils
|
||||||
|
.qualifier(
|
||||||
|
classid, classname, scheme, scheme);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -11,6 +11,7 @@ import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.sql.Dataset;
|
import org.apache.spark.sql.Dataset;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
import org.apache.spark.sql.SaveMode;
|
||||||
|
@ -57,7 +58,7 @@ public class DumpProducts implements Serializable {
|
||||||
|
|
||||||
Utils
|
Utils
|
||||||
.readPath(spark, inputPath, inputClazz)
|
.readPath(spark, inputPath, inputClazz)
|
||||||
.map(value -> execMap(value, communityMap, graph), Encoders.bean(outputClazz))
|
.map((MapFunction<I, O>) value -> execMap(value, communityMap, graph), Encoders.bean(outputClazz))
|
||||||
.filter(Objects::nonNull)
|
.filter(Objects::nonNull)
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
|
|
|
@ -9,6 +9,7 @@ import java.util.*;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
import org.apache.spark.sql.SaveMode;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
@ -88,7 +89,9 @@ public class DumpGraphEntities implements Serializable {
|
||||||
Class<E> inputClazz) {
|
Class<E> inputClazz) {
|
||||||
Utils
|
Utils
|
||||||
.readPath(spark, inputPath, inputClazz)
|
.readPath(spark, inputPath, inputClazz)
|
||||||
.map(d -> mapDatasource((eu.dnetlib.dhp.schema.oaf.Datasource) d), Encoders.bean(Datasource.class))
|
.map(
|
||||||
|
(MapFunction<E, Datasource>) d -> mapDatasource((eu.dnetlib.dhp.schema.oaf.Datasource) d),
|
||||||
|
Encoders.bean(Datasource.class))
|
||||||
.filter(Objects::nonNull)
|
.filter(Objects::nonNull)
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
|
@ -100,7 +103,9 @@ public class DumpGraphEntities implements Serializable {
|
||||||
Class<E> inputClazz) {
|
Class<E> inputClazz) {
|
||||||
Utils
|
Utils
|
||||||
.readPath(spark, inputPath, inputClazz)
|
.readPath(spark, inputPath, inputClazz)
|
||||||
.map(p -> mapProject((eu.dnetlib.dhp.schema.oaf.Project) p), Encoders.bean(Project.class))
|
.map(
|
||||||
|
(MapFunction<E, Project>) p -> mapProject((eu.dnetlib.dhp.schema.oaf.Project) p),
|
||||||
|
Encoders.bean(Project.class))
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
|
@ -374,13 +379,17 @@ public class DumpGraphEntities implements Serializable {
|
||||||
}
|
}
|
||||||
|
|
||||||
project
|
project
|
||||||
.setProgramme(
|
.setH2020Classifications(
|
||||||
Optional
|
Optional
|
||||||
.ofNullable(p.getProgramme())
|
.ofNullable(p.getH2020classification())
|
||||||
.map(
|
.map(
|
||||||
programme -> programme
|
classification -> classification
|
||||||
.stream()
|
.stream()
|
||||||
.map(pg -> Programme.newInstance(pg.getCode(), pg.getDescription()))
|
.map(
|
||||||
|
c -> H2020Classification
|
||||||
|
.newInstance(
|
||||||
|
c.getH2020Programme().getCode(), c.getH2020Programme().getDescription(),
|
||||||
|
c.getLevel1(), c.getLevel2(), c.getLevel3(), c.getClassification()))
|
||||||
.collect(Collectors.toList()))
|
.collect(Collectors.toList()))
|
||||||
.orElse(new ArrayList<>()));
|
.orElse(new ArrayList<>()));
|
||||||
|
|
||||||
|
@ -442,7 +451,9 @@ public class DumpGraphEntities implements Serializable {
|
||||||
Class<E> inputClazz) {
|
Class<E> inputClazz) {
|
||||||
Utils
|
Utils
|
||||||
.readPath(spark, inputPath, inputClazz)
|
.readPath(spark, inputPath, inputClazz)
|
||||||
.map(o -> mapOrganization((eu.dnetlib.dhp.schema.oaf.Organization) o), Encoders.bean(Organization.class))
|
.map(
|
||||||
|
(MapFunction<E, Organization>) o -> mapOrganization((eu.dnetlib.dhp.schema.oaf.Organization) o),
|
||||||
|
Encoders.bean(Organization.class))
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.option("compression", "gzip")
|
.option("compression", "gzip")
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.oa.graph.merge;
|
||||||
|
|
||||||
|
import java.util.Comparator;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Qualifier;
|
||||||
|
|
||||||
|
public class DatasourceCompatibilityComparator implements Comparator<Qualifier> {
|
||||||
|
@Override
|
||||||
|
public int compare(Qualifier left, Qualifier right) {
|
||||||
|
|
||||||
|
String lClass = left.getClassid();
|
||||||
|
String rClass = right.getClassid();
|
||||||
|
|
||||||
|
if (lClass.equals(rClass))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (lClass.equals("openaire-cris_1.1"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("openaire-cris_1.1"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("openaire4.0"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("openaire4.0"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("driver-openaire2.0"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("driver-openaire2.0"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("driver"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("driver"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("openaire2.0"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("openaire2.0"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("openaire3.0"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("openaire3.0"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("openaire2.0_data"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("openaire2.0_data"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("native"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("native"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("hostedBy"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("hostedBy"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("notCompatible"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("notCompatible"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (lClass.equals("UNKNOWN"))
|
||||||
|
return -1;
|
||||||
|
if (rClass.equals("UNKNOWN"))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
// Else (but unlikely), lexicographical ordering will do.
|
||||||
|
return lClass.compareTo(rClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CASE WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override, a.compatibility):: TEXT) @> ARRAY
|
||||||
|
* ['openaire-cris_1.1']) THEN 'openaire-cris_1.1@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT
|
||||||
|
* COALESCE (a.compatibility_override, a.compatibility):: TEXT) @> ARRAY ['openaire4.0']) THEN
|
||||||
|
* 'openaire4.0@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override,
|
||||||
|
* a.compatibility):: TEXT) @> ARRAY ['driver', 'openaire2.0']) THEN
|
||||||
|
* 'driver-openaire2.0@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE
|
||||||
|
* (a.compatibility_override, a.compatibility) :: TEXT) @> ARRAY ['driver']) THEN
|
||||||
|
* 'driver@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override,
|
||||||
|
* a.compatibility) :: TEXT) @> ARRAY ['openaire2.0']) THEN 'openaire2.0@@@dnet:datasourceCompatibilityLevel' WHEN
|
||||||
|
* (array_agg(DISTINCT COALESCE (a.compatibility_override, a.compatibility) :: TEXT) @> ARRAY ['openaire3.0']) THEN
|
||||||
|
* 'openaire3.0@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override,
|
||||||
|
* a.compatibility) :: TEXT) @> ARRAY ['openaire2.0_data']) THEN
|
||||||
|
* 'openaire2.0_data@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE
|
||||||
|
* (a.compatibility_override, a.compatibility) :: TEXT) @> ARRAY ['native']) THEN
|
||||||
|
* 'native@@@dnet:datasourceCompatibilityLevel' WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override,
|
||||||
|
* a.compatibility) :: TEXT) @> ARRAY ['hostedBy']) THEN 'hostedBy@@@dnet:datasourceCompatibilityLevel' WHEN
|
||||||
|
* (array_agg(DISTINCT COALESCE (a.compatibility_override, a.compatibility) :: TEXT) @> ARRAY ['notCompatible'])
|
||||||
|
* THEN 'notCompatible@@@dnet:datasourceCompatibilityLevel' ELSE 'UNKNOWN@@@dnet:datasourceCompatibilityLevel' END
|
||||||
|
*/
|
||||||
|
}
|
|
@ -3,8 +3,9 @@ package eu.dnetlib.dhp.oa.graph.merge;
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.*;
|
||||||
import java.util.Optional;
|
|
||||||
|
import javax.xml.crypto.Data;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.spark.SparkConf;
|
import org.apache.spark.SparkConf;
|
||||||
|
@ -14,6 +15,7 @@ import org.apache.spark.sql.Dataset;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
import org.apache.spark.sql.SaveMode;
|
||||||
import org.apache.spark.sql.SparkSession;
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import org.jetbrains.annotations.NotNull;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -39,6 +41,14 @@ public class MergeGraphSparkJob {
|
||||||
|
|
||||||
private static final String PRIORITY_DEFAULT = "BETA"; // BETA | PROD
|
private static final String PRIORITY_DEFAULT = "BETA"; // BETA | PROD
|
||||||
|
|
||||||
|
private static final Datasource DATASOURCE = new Datasource();
|
||||||
|
|
||||||
|
static {
|
||||||
|
Qualifier compatibility = new Qualifier();
|
||||||
|
compatibility.setClassid("UNKNOWN");
|
||||||
|
DATASOURCE.setOpenairecompatibility(compatibility);
|
||||||
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
String jsonConfiguration = IOUtils
|
String jsonConfiguration = IOUtils
|
||||||
|
@ -104,6 +114,10 @@ public class MergeGraphSparkJob {
|
||||||
.map((MapFunction<Tuple2<Tuple2<String, P>, Tuple2<String, B>>, P>) value -> {
|
.map((MapFunction<Tuple2<Tuple2<String, P>, Tuple2<String, B>>, P>) value -> {
|
||||||
Optional<P> p = Optional.ofNullable(value._1()).map(Tuple2::_2);
|
Optional<P> p = Optional.ofNullable(value._1()).map(Tuple2::_2);
|
||||||
Optional<B> b = Optional.ofNullable(value._2()).map(Tuple2::_2);
|
Optional<B> b = Optional.ofNullable(value._2()).map(Tuple2::_2);
|
||||||
|
|
||||||
|
if (p.orElse((P) b.orElse((B) DATASOURCE)) instanceof Datasource) {
|
||||||
|
return mergeDatasource(p, b);
|
||||||
|
}
|
||||||
switch (priority) {
|
switch (priority) {
|
||||||
default:
|
default:
|
||||||
case "BETA":
|
case "BETA":
|
||||||
|
@ -119,6 +133,36 @@ public class MergeGraphSparkJob {
|
||||||
.json(outputPath);
|
.json(outputPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Datasources involved in the merge operation doesn't obey to the infra precedence policy, but relies on a custom
|
||||||
|
* behaviour that, given two datasources from beta and prod returns the one from prod with the highest
|
||||||
|
* compatibility among the two.
|
||||||
|
*
|
||||||
|
* @param p datasource from PROD
|
||||||
|
* @param b datasource from BETA
|
||||||
|
* @param <P> Datasource class type from PROD
|
||||||
|
* @param <B> Datasource class type from BETA
|
||||||
|
* @return the datasource from PROD with the highest compatibility level.
|
||||||
|
*/
|
||||||
|
protected static <P extends Oaf, B extends Oaf> P mergeDatasource(Optional<P> p, Optional<B> b) {
|
||||||
|
if (p.isPresent() & !b.isPresent()) {
|
||||||
|
return p.get();
|
||||||
|
}
|
||||||
|
if (b.isPresent() & !p.isPresent()) {
|
||||||
|
return (P) b.get();
|
||||||
|
}
|
||||||
|
if (!b.isPresent() & !p.isPresent()) {
|
||||||
|
return null; // unlikely, at least one should be produced by the join operation
|
||||||
|
}
|
||||||
|
|
||||||
|
Datasource dp = (Datasource) p.get();
|
||||||
|
Datasource db = (Datasource) b.get();
|
||||||
|
|
||||||
|
List<Qualifier> list = Arrays.asList(dp.getOpenairecompatibility(), db.getOpenairecompatibility());
|
||||||
|
dp.setOpenairecompatibility(Collections.min(list, new DatasourceCompatibilityComparator()));
|
||||||
|
return (P) dp;
|
||||||
|
}
|
||||||
|
|
||||||
private static <P extends Oaf, B extends Oaf> P mergeWithPriorityToPROD(Optional<P> p, Optional<B> b) {
|
private static <P extends Oaf, B extends Oaf> P mergeWithPriorityToPROD(Optional<P> p, Optional<B> b) {
|
||||||
if (b.isPresent() & !p.isPresent()) {
|
if (b.isPresent() & !p.isPresent()) {
|
||||||
return (P) b.get();
|
return (P) b.get();
|
||||||
|
|
|
@ -38,13 +38,11 @@ import java.io.IOException;
|
||||||
import java.sql.Array;
|
import java.sql.Array;
|
||||||
import java.sql.ResultSet;
|
import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Date;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
@ -197,7 +195,14 @@ public class MigrateDbEntitiesApplication extends AbstractMigrationApplication i
|
||||||
final Datasource ds = new Datasource();
|
final Datasource ds = new Datasource();
|
||||||
|
|
||||||
ds.setId(createOpenaireId(10, rs.getString("datasourceid"), true));
|
ds.setId(createOpenaireId(10, rs.getString("datasourceid"), true));
|
||||||
ds.setOriginalId(Arrays.asList((String[]) rs.getArray("identities").getArray()));
|
ds
|
||||||
|
.setOriginalId(
|
||||||
|
Arrays
|
||||||
|
.asList(
|
||||||
|
(String[]) rs.getArray("identities").getArray())
|
||||||
|
.stream()
|
||||||
|
.filter(StringUtils::isNotBlank)
|
||||||
|
.collect(Collectors.toList()));
|
||||||
ds
|
ds
|
||||||
.setCollectedfrom(
|
.setCollectedfrom(
|
||||||
listKeyValues(
|
listKeyValues(
|
||||||
|
@ -243,7 +248,13 @@ public class MigrateDbEntitiesApplication extends AbstractMigrationApplication i
|
||||||
ds.setCertificates(field(rs.getString("certificates"), info));
|
ds.setCertificates(field(rs.getString("certificates"), info));
|
||||||
ds.setPolicies(new ArrayList<>()); // The sql query returns an empty array
|
ds.setPolicies(new ArrayList<>()); // The sql query returns an empty array
|
||||||
ds
|
ds
|
||||||
.setJournal(prepareJournal(rs.getString("officialname"), rs.getString("journal"), info)); // Journal
|
.setJournal(
|
||||||
|
journal(
|
||||||
|
rs.getString("officialname"),
|
||||||
|
rs.getString("issnPrinted"),
|
||||||
|
rs.getString("issnOnline"),
|
||||||
|
rs.getString("issnLinking"),
|
||||||
|
info)); // Journal
|
||||||
ds.setDataInfo(info);
|
ds.setDataInfo(info);
|
||||||
ds.setLastupdatetimestamp(lastUpdateTimestamp);
|
ds.setLastupdatetimestamp(lastUpdateTimestamp);
|
||||||
|
|
||||||
|
@ -567,21 +578,15 @@ public class MigrateDbEntitiesApplication extends AbstractMigrationApplication i
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
private Journal prepareJournal(final String name, final String sj, final DataInfo info) {
|
private Journal prepareJournal(final ResultSet rs, final DataInfo info) throws SQLException {
|
||||||
if (StringUtils.isNotBlank(sj)) {
|
if (Objects.isNull(rs)) {
|
||||||
final String[] arr = sj.split("@@@");
|
return null;
|
||||||
if (arr.length == 3) {
|
} else {
|
||||||
final String issn = StringUtils.isNotBlank(arr[0]) ? arr[0].trim() : null;
|
|
||||||
final String eissn = StringUtils.isNotBlank(arr[1]) ? arr[1].trim() : null;
|
|
||||||
|
|
||||||
final String lissn = StringUtils.isNotBlank(arr[2]) ? arr[2].trim() : null;
|
return journal(
|
||||||
|
rs.getString("officialname"), rs.getString("issnPrinted"), rs.getString("issnOnline"),
|
||||||
if (issn != null || eissn != null || lissn != null) {
|
rs.getString("issnLinking"), info);
|
||||||
return journal(name, issn, eissn, eissn, null, null, null, null, null, null, null, info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -153,6 +153,27 @@ public class OafMapperUtils {
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Journal journal(
|
||||||
|
final String name,
|
||||||
|
final String issnPrinted,
|
||||||
|
final String issnOnline,
|
||||||
|
final String issnLinking,
|
||||||
|
final DataInfo dataInfo) {
|
||||||
|
return journal(
|
||||||
|
name,
|
||||||
|
issnPrinted,
|
||||||
|
issnOnline,
|
||||||
|
issnLinking,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
dataInfo);
|
||||||
|
}
|
||||||
|
|
||||||
public static Journal journal(
|
public static Journal journal(
|
||||||
final String name,
|
final String name,
|
||||||
final String issnPrinted,
|
final String issnPrinted,
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
package eu.dnetlib.dhp.sx.ebi
|
package eu.dnetlib.dhp.sx.ebi
|
||||||
|
import eu.dnetlib.dhp.oa.merge.AuthorMerger
|
||||||
import eu.dnetlib.dhp.schema.oaf.{Publication, Relation, Dataset => OafDataset}
|
import eu.dnetlib.dhp.schema.oaf.{Publication, Relation, Dataset => OafDataset}
|
||||||
import eu.dnetlib.dhp.schema.scholexplorer.{DLIDataset, DLIPublication, DLIUnknown}
|
import eu.dnetlib.dhp.schema.scholexplorer.{DLIDataset, DLIPublication, DLIUnknown}
|
||||||
import org.apache.spark.sql.{Encoder, Encoders}
|
import org.apache.spark.sql.{Encoder, Encoders}
|
||||||
|
@ -14,6 +15,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def reduce(b: OafDataset, a: (String, OafDataset)): OafDataset = {
|
override def reduce(b: OafDataset, a: (String, OafDataset)): OafDataset = {
|
||||||
b.mergeFrom(a._2)
|
b.mergeFrom(a._2)
|
||||||
|
b.setAuthor(AuthorMerger.mergeAuthor(a._2.getAuthor, b.getAuthor))
|
||||||
if (b.getId == null)
|
if (b.getId == null)
|
||||||
b.setId(a._2.getId)
|
b.setId(a._2.getId)
|
||||||
b
|
b
|
||||||
|
@ -22,6 +24,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def merge(wx: OafDataset, wy: OafDataset): OafDataset = {
|
override def merge(wx: OafDataset, wy: OafDataset): OafDataset = {
|
||||||
wx.mergeFrom(wy)
|
wx.mergeFrom(wy)
|
||||||
|
wx.setAuthor(AuthorMerger.mergeAuthor(wy.getAuthor, wx.getAuthor))
|
||||||
if(wx.getId == null && wy.getId.nonEmpty)
|
if(wx.getId == null && wy.getId.nonEmpty)
|
||||||
wx.setId(wy.getId)
|
wx.setId(wy.getId)
|
||||||
wx
|
wx
|
||||||
|
@ -35,8 +38,6 @@ object EBIAggregator {
|
||||||
Encoders.kryo(classOf[OafDataset])
|
Encoders.kryo(classOf[OafDataset])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def getDLIUnknownAggregator(): Aggregator[(String, DLIUnknown), DLIUnknown, DLIUnknown] = new Aggregator[(String, DLIUnknown), DLIUnknown, DLIUnknown]{
|
def getDLIUnknownAggregator(): Aggregator[(String, DLIUnknown), DLIUnknown, DLIUnknown] = new Aggregator[(String, DLIUnknown), DLIUnknown, DLIUnknown]{
|
||||||
|
|
||||||
override def zero: DLIUnknown = new DLIUnknown()
|
override def zero: DLIUnknown = new DLIUnknown()
|
||||||
|
@ -69,6 +70,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def reduce(b: DLIDataset, a: (String, DLIDataset)): DLIDataset = {
|
override def reduce(b: DLIDataset, a: (String, DLIDataset)): DLIDataset = {
|
||||||
b.mergeFrom(a._2)
|
b.mergeFrom(a._2)
|
||||||
|
b.setAuthor(AuthorMerger.mergeAuthor(a._2.getAuthor, b.getAuthor))
|
||||||
if (b.getId == null)
|
if (b.getId == null)
|
||||||
b.setId(a._2.getId)
|
b.setId(a._2.getId)
|
||||||
b
|
b
|
||||||
|
@ -76,6 +78,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def merge(wx: DLIDataset, wy: DLIDataset): DLIDataset = {
|
override def merge(wx: DLIDataset, wy: DLIDataset): DLIDataset = {
|
||||||
wx.mergeFrom(wy)
|
wx.mergeFrom(wy)
|
||||||
|
wx.setAuthor(AuthorMerger.mergeAuthor(wy.getAuthor, wx.getAuthor))
|
||||||
if(wx.getId == null && wy.getId.nonEmpty)
|
if(wx.getId == null && wy.getId.nonEmpty)
|
||||||
wx.setId(wy.getId)
|
wx.setId(wy.getId)
|
||||||
wx
|
wx
|
||||||
|
@ -96,6 +99,8 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def reduce(b: DLIPublication, a: (String, DLIPublication)): DLIPublication = {
|
override def reduce(b: DLIPublication, a: (String, DLIPublication)): DLIPublication = {
|
||||||
b.mergeFrom(a._2)
|
b.mergeFrom(a._2)
|
||||||
|
b.setAuthor(AuthorMerger.mergeAuthor(a._2.getAuthor, b.getAuthor))
|
||||||
|
|
||||||
if (b.getId == null)
|
if (b.getId == null)
|
||||||
b.setId(a._2.getId)
|
b.setId(a._2.getId)
|
||||||
b
|
b
|
||||||
|
@ -104,6 +109,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def merge(wx: DLIPublication, wy: DLIPublication): DLIPublication = {
|
override def merge(wx: DLIPublication, wy: DLIPublication): DLIPublication = {
|
||||||
wx.mergeFrom(wy)
|
wx.mergeFrom(wy)
|
||||||
|
wx.setAuthor(AuthorMerger.mergeAuthor(wy.getAuthor, wx.getAuthor))
|
||||||
if(wx.getId == null && wy.getId.nonEmpty)
|
if(wx.getId == null && wy.getId.nonEmpty)
|
||||||
wx.setId(wy.getId)
|
wx.setId(wy.getId)
|
||||||
wx
|
wx
|
||||||
|
@ -124,6 +130,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def reduce(b: Publication, a: (String, Publication)): Publication = {
|
override def reduce(b: Publication, a: (String, Publication)): Publication = {
|
||||||
b.mergeFrom(a._2)
|
b.mergeFrom(a._2)
|
||||||
|
b.setAuthor(AuthorMerger.mergeAuthor(a._2.getAuthor, b.getAuthor))
|
||||||
if (b.getId == null)
|
if (b.getId == null)
|
||||||
b.setId(a._2.getId)
|
b.setId(a._2.getId)
|
||||||
b
|
b
|
||||||
|
@ -132,6 +139,7 @@ object EBIAggregator {
|
||||||
|
|
||||||
override def merge(wx: Publication, wy: Publication): Publication = {
|
override def merge(wx: Publication, wy: Publication): Publication = {
|
||||||
wx.mergeFrom(wy)
|
wx.mergeFrom(wy)
|
||||||
|
wx.setAuthor(AuthorMerger.mergeAuthor(wy.getAuthor, wx.getAuthor))
|
||||||
if(wx.getId == null && wy.getId.nonEmpty)
|
if(wx.getId == null && wy.getId.nonEmpty)
|
||||||
wx.setId(wy.getId)
|
wx.setId(wy.getId)
|
||||||
wx
|
wx
|
||||||
|
@ -145,7 +153,6 @@ object EBIAggregator {
|
||||||
Encoders.kryo(classOf[Publication])
|
Encoders.kryo(classOf[Publication])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def getRelationAggregator(): Aggregator[(String, Relation), Relation, Relation] = new Aggregator[(String, Relation), Relation, Relation]{
|
def getRelationAggregator(): Aggregator[(String, Relation), Relation, Relation] = new Aggregator[(String, Relation), Relation, Relation]{
|
||||||
|
|
||||||
override def zero: Relation = new Relation()
|
override def zero: Relation = new Relation()
|
||||||
|
@ -166,10 +173,4 @@ object EBIAggregator {
|
||||||
override def outputEncoder: Encoder[Relation] =
|
override def outputEncoder: Encoder[Relation] =
|
||||||
Encoders.kryo(classOf[Relation])
|
Encoders.kryo(classOf[Relation])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
DROP VIEW IF EXISTS ${hiveDbName}.result;
|
DROP VIEW IF EXISTS ${hiveDbName}.result;
|
||||||
|
|
||||||
CREATE VIEW IF NOT EXISTS result as
|
CREATE VIEW IF NOT EXISTS ${hiveDbName}.result as
|
||||||
select id, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.publication p
|
select id, originalid, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, relevantdate, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.publication p
|
||||||
union all
|
union all
|
||||||
select id, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.dataset d
|
select id, originalid, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, relevantdate, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.dataset d
|
||||||
union all
|
union all
|
||||||
select id, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.software s
|
select id, originalid, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, relevantdate, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.software s
|
||||||
union all
|
union all
|
||||||
select id, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.otherresearchproduct o;
|
select id, originalid, dateofcollection, title, publisher, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, relevantdate, embargoenddate, resourcetype, context, externalreference, instance from ${hiveDbName}.otherresearchproduct o;
|
||||||
|
|
|
@ -3,7 +3,7 @@ SELECT
|
||||||
d.id || array_agg(distinct di.pid) AS identities,
|
d.id || array_agg(distinct di.pid) AS identities,
|
||||||
d.officialname AS officialname,
|
d.officialname AS officialname,
|
||||||
d.englishname AS englishname,
|
d.englishname AS englishname,
|
||||||
d.contactemail AS contactemail,
|
d.contactemail AS contactemail,
|
||||||
CASE
|
CASE
|
||||||
WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override, a.compatibility):: TEXT) @> ARRAY ['openaire-cris_1.1'])
|
WHEN (array_agg(DISTINCT COALESCE (a.compatibility_override, a.compatibility):: TEXT) @> ARRAY ['openaire-cris_1.1'])
|
||||||
THEN
|
THEN
|
||||||
|
@ -84,8 +84,10 @@ SELECT
|
||||||
dc.id AS collectedfromid,
|
dc.id AS collectedfromid,
|
||||||
dc.officialname AS collectedfromname,
|
dc.officialname AS collectedfromname,
|
||||||
d.typology||'@@@dnet:datasource_typologies' AS datasourcetype,
|
d.typology||'@@@dnet:datasource_typologies' AS datasourcetype,
|
||||||
'sysimport:crosswalk:entityregistry@@@dnet:provenance_actions' AS provenanceaction,
|
'sysimport:crosswalk:entityregistry@@@dnet:provenance_actions' AS provenanceaction,
|
||||||
d.issn || ' @@@ ' || d.eissn || ' @@@ ' || d.lissn AS journal
|
d.issn AS issnPrinted,
|
||||||
|
d.eissn AS issnOnline,
|
||||||
|
d.lissn AS issnLinking
|
||||||
|
|
||||||
FROM dsm_datasources d
|
FROM dsm_datasources d
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class CleaningFunctionTest {
|
||||||
assertTrue(p_in instanceof Result);
|
assertTrue(p_in instanceof Result);
|
||||||
assertTrue(p_in instanceof Publication);
|
assertTrue(p_in instanceof Publication);
|
||||||
|
|
||||||
Publication p_out = OafCleaner.apply(CleanGraphSparkJob.fixVocabularyNames(p_in), mapping);
|
Publication p_out = OafCleaner.apply(CleaningFunctions.fixVocabularyNames(p_in), mapping);
|
||||||
|
|
||||||
assertNotNull(p_out);
|
assertNotNull(p_out);
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ public class CleaningFunctionTest {
|
||||||
.map(p -> p.getQualifier())
|
.map(p -> p.getQualifier())
|
||||||
.allMatch(q -> pidTerms.contains(q.getClassid())));
|
.allMatch(q -> pidTerms.contains(q.getClassid())));
|
||||||
|
|
||||||
Publication p_defaults = CleanGraphSparkJob.fixDefaults(p_out);
|
Publication p_defaults = CleaningFunctions.fixDefaults(p_out);
|
||||||
assertEquals("CLOSED", p_defaults.getBestaccessright().getClassid());
|
assertEquals("CLOSED", p_defaults.getBestaccessright().getClassid());
|
||||||
assertNull(p_out.getPublisher());
|
assertNull(p_out.getPublisher());
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,84 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.oa.graph.merge;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.Datasource;
|
||||||
|
|
||||||
|
public class MergeGraphSparkJobTest {
|
||||||
|
|
||||||
|
private ObjectMapper mapper;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setUp() {
|
||||||
|
mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMergeDatasources() throws IOException {
|
||||||
|
assertEquals(
|
||||||
|
"openaire-cris_1.1",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_cris.json"),
|
||||||
|
d("datasource_UNKNOWN.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
assertEquals(
|
||||||
|
"openaire-cris_1.1",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_UNKNOWN.json"),
|
||||||
|
d("datasource_cris.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
assertEquals(
|
||||||
|
"driver-openaire2.0",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_native.json"),
|
||||||
|
d("datasource_driver-openaire2.0.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
assertEquals(
|
||||||
|
"driver-openaire2.0",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_driver-openaire2.0.json"),
|
||||||
|
d("datasource_native.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
assertEquals(
|
||||||
|
"openaire4.0",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_notCompatible.json"),
|
||||||
|
d("datasource_openaire4.0.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
assertEquals(
|
||||||
|
"notCompatible",
|
||||||
|
MergeGraphSparkJob
|
||||||
|
.mergeDatasource(
|
||||||
|
d("datasource_notCompatible.json"),
|
||||||
|
d("datasource_UNKNOWN.json"))
|
||||||
|
.getOpenairecompatibility()
|
||||||
|
.getClassid());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Optional<Datasource> d(String file) throws IOException {
|
||||||
|
String json = IOUtils.toString(getClass().getResourceAsStream(file));
|
||||||
|
return Optional.of(mapper.readValue(json, Datasource.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -304,9 +304,40 @@ public class MappersTest {
|
||||||
assertValidId(d.getCollectedfrom().get(0).getKey());
|
assertValidId(d.getCollectedfrom().get(0).getKey());
|
||||||
assertTrue(StringUtils.isNotBlank(d.getTitle().get(0).getValue()));
|
assertTrue(StringUtils.isNotBlank(d.getTitle().get(0).getValue()));
|
||||||
assertEquals(1, d.getAuthor().size());
|
assertEquals(1, d.getAuthor().size());
|
||||||
assertEquals(0, d.getSubject().size());
|
assertEquals(1, d.getSubject().size());
|
||||||
assertEquals(1, d.getInstance().size());
|
assertEquals(1, d.getInstance().size());
|
||||||
assertEquals(1, d.getPid().size());
|
assertEquals(1, d.getPid().size());
|
||||||
|
assertNotNull(d.getInstance().get(0).getUrl());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testClaimFromCrossref() throws IOException {
|
||||||
|
final String xml = IOUtils.toString(getClass().getResourceAsStream("oaf_claim_crossref.xml"));
|
||||||
|
final List<Oaf> list = new OafToOafMapper(vocs, false).processMdRecord(xml);
|
||||||
|
|
||||||
|
System.out.println("***************");
|
||||||
|
System.out.println(new ObjectMapper().writeValueAsString(list));
|
||||||
|
System.out.println("***************");
|
||||||
|
|
||||||
|
final Publication p = (Publication) list.get(0);
|
||||||
|
assertValidId(p.getId());
|
||||||
|
assertValidId(p.getCollectedfrom().get(0).getKey());
|
||||||
|
System.out.println(p.getTitle().get(0).getValue());
|
||||||
|
assertTrue(StringUtils.isNotBlank(p.getTitle().get(0).getValue()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testODFRecord() throws IOException {
|
||||||
|
final String xml = IOUtils.toString(getClass().getResourceAsStream("odf_record.xml"));
|
||||||
|
List<Oaf> list = new OdfToOafMapper(vocs, false).processMdRecord(xml);
|
||||||
|
System.out.println("***************");
|
||||||
|
System.out.println(new ObjectMapper().writeValueAsString(list));
|
||||||
|
System.out.println("***************");
|
||||||
|
final Dataset p = (Dataset) list.get(0);
|
||||||
|
assertValidId(p.getId());
|
||||||
|
assertValidId(p.getCollectedfrom().get(0).getKey());
|
||||||
|
System.out.println(p.getTitle().get(0).getValue());
|
||||||
|
assertTrue(StringUtils.isNotBlank(p.getTitle().get(0).getValue()));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertValidId(final String id) {
|
private void assertValidId(final String id) {
|
||||||
|
|
|
@ -73,12 +73,16 @@ public class MigrateDbEntitiesApplicationTest {
|
||||||
final Datasource ds = (Datasource) list.get(0);
|
final Datasource ds = (Datasource) list.get(0);
|
||||||
assertValidId(ds.getId());
|
assertValidId(ds.getId());
|
||||||
assertValidId(ds.getCollectedfrom().get(0).getKey());
|
assertValidId(ds.getCollectedfrom().get(0).getKey());
|
||||||
assertEquals(ds.getOfficialname().getValue(), getValueAsString("officialname", fields));
|
assertEquals(getValueAsString("officialname", fields), ds.getOfficialname().getValue());
|
||||||
assertEquals(ds.getEnglishname().getValue(), getValueAsString("englishname", fields));
|
assertEquals(getValueAsString("englishname", fields), ds.getEnglishname().getValue());
|
||||||
assertEquals(ds.getContactemail().getValue(), getValueAsString("contactemail", fields));
|
assertEquals(getValueAsString("contactemail", fields), ds.getContactemail().getValue());
|
||||||
assertEquals(ds.getWebsiteurl().getValue(), getValueAsString("websiteurl", fields));
|
assertEquals(getValueAsString("websiteurl", fields), ds.getWebsiteurl().getValue());
|
||||||
assertEquals(ds.getNamespaceprefix().getValue(), getValueAsString("namespaceprefix", fields));
|
assertEquals(getValueAsString("namespaceprefix", fields), ds.getNamespaceprefix().getValue());
|
||||||
assertEquals(ds.getCollectedfrom().get(0).getValue(), getValueAsString("collectedfromname", fields));
|
assertEquals(getValueAsString("collectedfromname", fields), ds.getCollectedfrom().get(0).getValue());
|
||||||
|
assertEquals(getValueAsString("officialname", fields), ds.getJournal().getName());
|
||||||
|
assertEquals(getValueAsString("issnPrinted", fields), ds.getJournal().getIssnPrinted());
|
||||||
|
assertEquals(getValueAsString("issnOnline", fields), ds.getJournal().getIssnOnline());
|
||||||
|
assertEquals(getValueAsString("issnLinking", fields), ds.getJournal().getIssnLinking());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -92,9 +96,11 @@ public class MigrateDbEntitiesApplicationTest {
|
||||||
final Project p = (Project) list.get(0);
|
final Project p = (Project) list.get(0);
|
||||||
assertValidId(p.getId());
|
assertValidId(p.getId());
|
||||||
assertValidId(p.getCollectedfrom().get(0).getKey());
|
assertValidId(p.getCollectedfrom().get(0).getKey());
|
||||||
assertEquals(p.getAcronym().getValue(), getValueAsString("acronym", fields));
|
assertEquals(getValueAsString("acronym", fields), p.getAcronym().getValue());
|
||||||
assertEquals(p.getTitle().getValue(), getValueAsString("title", fields));
|
assertEquals(getValueAsString("title", fields), p.getTitle().getValue());
|
||||||
assertEquals(p.getCollectedfrom().get(0).getValue(), getValueAsString("collectedfromname", fields));
|
assertEquals(getValueAsString("collectedfromname", fields), p.getCollectedfrom().get(0).getValue());
|
||||||
|
assertEquals(getValueAsFloat("fundedamount", fields), p.getFundedamount());
|
||||||
|
assertEquals(getValueAsFloat("totalcost", fields), p.getTotalcost());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -110,14 +116,14 @@ public class MigrateDbEntitiesApplicationTest {
|
||||||
final Organization o = (Organization) list.get(0);
|
final Organization o = (Organization) list.get(0);
|
||||||
assertValidId(o.getId());
|
assertValidId(o.getId());
|
||||||
assertValidId(o.getCollectedfrom().get(0).getKey());
|
assertValidId(o.getCollectedfrom().get(0).getKey());
|
||||||
assertEquals(o.getLegalshortname().getValue(), getValueAsString("legalshortname", fields));
|
assertEquals(getValueAsString("legalshortname", fields), o.getLegalshortname().getValue());
|
||||||
assertEquals(o.getLegalname().getValue(), getValueAsString("legalname", fields));
|
assertEquals(getValueAsString("legalname", fields), o.getLegalname().getValue());
|
||||||
assertEquals(o.getWebsiteurl().getValue(), getValueAsString("websiteurl", fields));
|
assertEquals(getValueAsString("websiteurl", fields), o.getWebsiteurl().getValue());
|
||||||
assertEquals(o.getCountry().getClassid(), getValueAsString("country", fields).split("@@@")[0]);
|
assertEquals(getValueAsString("country", fields).split("@@@")[0], o.getCountry().getClassid());
|
||||||
assertEquals(o.getCountry().getClassname(), getValueAsString("country", fields).split("@@@")[0]);
|
assertEquals(getValueAsString("country", fields).split("@@@")[0], o.getCountry().getClassname());
|
||||||
assertEquals(o.getCountry().getSchemeid(), getValueAsString("country", fields).split("@@@")[1]);
|
assertEquals(getValueAsString("country", fields).split("@@@")[1], o.getCountry().getSchemeid());
|
||||||
assertEquals(o.getCountry().getSchemename(), getValueAsString("country", fields).split("@@@")[1]);
|
assertEquals(getValueAsString("country", fields).split("@@@")[1], o.getCountry().getSchemename());
|
||||||
assertEquals(o.getCollectedfrom().get(0).getValue(), getValueAsString("collectedfromname", fields));
|
assertEquals(getValueAsString("collectedfromname", fields), o.getCollectedfrom().get(0).getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -322,12 +328,20 @@ public class MigrateDbEntitiesApplicationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getValueAsString(final String name, final List<TypedField> fields) {
|
private String getValueAsString(final String name, final List<TypedField> fields) {
|
||||||
|
return getValueAs(name, fields);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Float getValueAsFloat(final String name, final List<TypedField> fields) {
|
||||||
|
return new Float(getValueAs(name, fields).toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
private <T> T getValueAs(final String name, final List<TypedField> fields) {
|
||||||
return fields
|
return fields
|
||||||
.stream()
|
.stream()
|
||||||
.filter(f -> f.getField().equals(name))
|
.filter(f -> f.getField().equals(name))
|
||||||
.map(TypedField::getValue)
|
.map(TypedField::getValue)
|
||||||
.filter(Objects::nonNull)
|
.filter(Objects::nonNull)
|
||||||
.map(o -> o.toString())
|
.map(o -> (T) o)
|
||||||
.findFirst()
|
.findFirst()
|
||||||
.get();
|
.get();
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}
|
||||||
|
import eu.dnetlib.dhp.schema.scholexplorer.DLIPublication
|
||||||
|
import eu.dnetlib.dhp.sx.ebi.EBIAggregator
|
||||||
|
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||||
|
import org.junit.jupiter.api.Assertions._
|
||||||
|
import org.junit.jupiter.api.Test
|
||||||
|
|
||||||
|
import scala.io.Source
|
||||||
|
|
||||||
|
class SparkScholexplorerAggregationTest {
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
def testFunderRelationshipsMapping(): Unit = {
|
||||||
|
val publications = Source.fromInputStream(getClass.getResourceAsStream("publication.json")).mkString
|
||||||
|
|
||||||
|
var s: List[DLIPublication] = List[DLIPublication]()
|
||||||
|
|
||||||
|
val m: ObjectMapper = new ObjectMapper()
|
||||||
|
|
||||||
|
m.enable(SerializationFeature.INDENT_OUTPUT)
|
||||||
|
|
||||||
|
for (line <- publications.lines) {
|
||||||
|
s = m.readValue(line, classOf[DLIPublication]) :: s
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
implicit val pubEncoder: Encoder[DLIPublication] = Encoders.kryo[DLIPublication]
|
||||||
|
val spark: SparkSession = SparkSession.builder().appName("Test").master("local[*]").getOrCreate()
|
||||||
|
|
||||||
|
|
||||||
|
val ds: Dataset[DLIPublication] = spark.createDataset(spark.sparkContext.parallelize(s)).as[DLIPublication]
|
||||||
|
|
||||||
|
val unique = ds.map(d => (d.getId, d))(Encoders.tuple(Encoders.STRING, pubEncoder))
|
||||||
|
.groupByKey(_._1)(Encoders.STRING)
|
||||||
|
.agg(EBIAggregator.getDLIPublicationAggregator().toColumn)
|
||||||
|
.map(p => p._2)
|
||||||
|
|
||||||
|
val uniquePubs: DLIPublication = unique.first()
|
||||||
|
|
||||||
|
s.foreach(pp => assertFalse(pp.getAuthor.isEmpty))
|
||||||
|
|
||||||
|
|
||||||
|
assertNotNull(uniquePubs.getAuthor)
|
||||||
|
assertFalse(uniquePubs.getAuthor.isEmpty)
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -2,4 +2,5 @@
|
||||||
package eu.dnetlib.dhp.sx.graph;
|
package eu.dnetlib.dhp.sx.graph;
|
||||||
|
|
||||||
public class SparkScholexplorerGraphImporterTest {
|
public class SparkScholexplorerGraphImporterTest {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,28 @@
|
||||||
"schemename": "dnet:pid_types"
|
"schemename": "dnet:pid_types"
|
||||||
},
|
},
|
||||||
"value": "0000-0001-9613-6639"
|
"value": "0000-0001-9613-6639"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dataInfo": {
|
||||||
|
"deletedbyinference": false,
|
||||||
|
"inferenceprovenance": "",
|
||||||
|
"inferred": false,
|
||||||
|
"invisible": false,
|
||||||
|
"provenanceaction": {
|
||||||
|
"classid": "sysimport:crosswalk:datasetarchive",
|
||||||
|
"classname": "sysimport:crosswalk:datasetarchive",
|
||||||
|
"schemeid": "dnet:provenanceActions",
|
||||||
|
"schemename": "dnet:provenanceActions"
|
||||||
|
},
|
||||||
|
"trust": "0.9"
|
||||||
|
},
|
||||||
|
"qualifier": {
|
||||||
|
"classid": "ORCID12",
|
||||||
|
"classname": "ORCID12",
|
||||||
|
"schemeid": "dnet:pid_types",
|
||||||
|
"schemename": "dnet:pid_types"
|
||||||
|
},
|
||||||
|
"value": "https://orcid.org/0000-0001-9613-6639"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"rank": 1,
|
"rank": 1,
|
||||||
|
@ -91,8 +113,7 @@
|
||||||
],
|
],
|
||||||
"fullname": "Barry, Peter S.",
|
"fullname": "Barry, Peter S.",
|
||||||
"name": "Peter S.",
|
"name": "Peter S.",
|
||||||
"pid": [
|
"pid": null,
|
||||||
],
|
|
||||||
"rank": 3,
|
"rank": 3,
|
||||||
"surname": "Barry"
|
"surname": "Barry"
|
||||||
},
|
},
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "UNKNOWN" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "openaire-cris_1.1" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "driver-openaire2.0" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "hostedBy" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "native" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "notCompatible" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "openaire2.0" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "openaire2.0_data" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "openaire3.0" }}
|
|
@ -0,0 +1 @@
|
||||||
|
{ "id": "10|274269ac6f3b::2a2e2793b500f3f7b47ef24b1a9277b7", "openairecompatibility": { "classid": "openaire4.0" }}
|
|
@ -228,8 +228,18 @@
|
||||||
"value": "sysimport:crosswalk:entityregistry@@@dnet:provenance_actions"
|
"value": "sysimport:crosswalk:entityregistry@@@dnet:provenance_actions"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"field": "journal",
|
"field": "issnPrinted",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"value": "2579-5449 @@@ 2597-6540 @@@ "
|
"value": "2579-5449"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"field": "issnOnline",
|
||||||
|
"type": "string",
|
||||||
|
"value": "2579-5448"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"field": "issnLinking",
|
||||||
|
"type": "string",
|
||||||
|
"value": "2579-5447"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<record xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:dr="http://www.driver-repository.eu/namespace/dr"
|
||||||
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri"
|
||||||
|
xmlns:oaf="http://namespace.openaire.eu/oaf"
|
||||||
|
xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
|
<header xmlns="http://namespace.openaire.eu/">
|
||||||
|
<dri:objIdentifier>userclaim___::7f0f7807f17db50e5c2b5c452ccaf06d</dri:objIdentifier>
|
||||||
|
<dri:recordIdentifier>userclaim___::7f0f7807f17db50e5c2b5c452ccaf06d</dri:recordIdentifier>
|
||||||
|
<dri:dateOfCollection>2020-08-06T07:04:09.62Z</dri:dateOfCollection>
|
||||||
|
<dri:mdFormat/>
|
||||||
|
<dri:mdFormatInterpretation/>
|
||||||
|
<dri:repositoryId/>
|
||||||
|
<dr:objectIdentifier/>
|
||||||
|
<dr:dateOfCollection/>
|
||||||
|
<dr:dateOfTransformation>2020-08-06T07:20:57.911Z</dr:dateOfTransformation>
|
||||||
|
<oaf:datasourceprefix>openaire____</oaf:datasourceprefix>
|
||||||
|
</header>
|
||||||
|
<metadata xmlns="http://namespace.openaire.eu/">
|
||||||
|
<dc:title>A case report of serious haemolysis in a glucose-6-phosphate dehydrogenase-deficient COVID-19 patient receiving hydroxychloroquine</dc:title>
|
||||||
|
<dc:creator>Maillart, E.</dc:creator>
|
||||||
|
<dc:creator>Leemans, S.</dc:creator>
|
||||||
|
<dc:creator>Van Noten, H.</dc:creator>
|
||||||
|
<dc:creator>Vandergraesen, T.</dc:creator>
|
||||||
|
<dc:creator>Mahadeb, B.</dc:creator>
|
||||||
|
<dc:creator>Salaouatchi, M. T.</dc:creator>
|
||||||
|
<dc:creator>De Bels, D.</dc:creator>
|
||||||
|
<dc:creator>Clevenbergh, P.</dc:creator>
|
||||||
|
<dc:date/>
|
||||||
|
<dc:identifier>http://dx.doi.org/10.1080/23744235.2020.1774644</dc:identifier>
|
||||||
|
<dc:language/>
|
||||||
|
<dc:publisher>Informa UK Limited</dc:publisher>
|
||||||
|
<dc:source>Crossref</dc:source>
|
||||||
|
<dc:source>Infectious Diseases</dc:source>
|
||||||
|
<dc:subject>Microbiology (medical)</dc:subject>
|
||||||
|
<dc:subject>General Immunology and Microbiology</dc:subject>
|
||||||
|
<dc:subject>Infectious Diseases</dc:subject>
|
||||||
|
<dc:subject>General Medicine</dc:subject>
|
||||||
|
<dc:type>journal-article</dc:type>
|
||||||
|
<dr:CobjCategory type="publication">0001</dr:CobjCategory>
|
||||||
|
<oaf:dateAccepted>2020-06-04</oaf:dateAccepted>
|
||||||
|
<oaf:projectid/>
|
||||||
|
<oaf:accessrights>UNKNOWN</oaf:accessrights>
|
||||||
|
<oaf:hostedBy
|
||||||
|
id="openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18" name="Unknown Repository"/>
|
||||||
|
<oaf:collectedFrom id="openaire____::crossref" name="Crossref"/>
|
||||||
|
<oaf:identifier identifierType="doi">10.1080/23744235.2020.1774644</oaf:identifier>
|
||||||
|
<oaf:journal eissn="2374-4243" ep="3" iss="" issn="2374-4235" sp="1" vol="">Infectious Diseases</oaf:journal>
|
||||||
|
</metadata>
|
||||||
|
<about xmlns:oai="http://www.openarchives.org/OAI/2.0/">
|
||||||
|
<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">
|
||||||
|
<originDescription altered="true" harvestDate="2020-08-06T07:04:09.62Z">
|
||||||
|
<baseURL>file%3A%2F%2F%2Fsrv%2Fclaims%2Frecords%2Fpublication%2Fcrossref</baseURL>
|
||||||
|
<identifier/>
|
||||||
|
<datestamp/>
|
||||||
|
<metadataNamespace/>
|
||||||
|
</originDescription>
|
||||||
|
</provenance>
|
||||||
|
<oaf:datainfo>
|
||||||
|
<oaf:inferred>false</oaf:inferred>
|
||||||
|
<oaf:deletedbyinference>false</oaf:deletedbyinference>
|
||||||
|
<oaf:trust>0.9</oaf:trust>
|
||||||
|
<oaf:inferenceprovenance/>
|
||||||
|
<oaf:provenanceaction classid="user:claim" classname="user:claim"
|
||||||
|
schemeid="dnet:provenanceActions" schemename="dnet:provenanceActions"/>
|
||||||
|
</oaf:datainfo>
|
||||||
|
</about>
|
||||||
|
</record>
|
|
@ -1,77 +1,75 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<record xmlns:dr="http://www.driver-repository.eu/namespace/dr"
|
<record xmlns:dr="http://www.driver-repository.eu/namespace/dr"
|
||||||
xmlns:oaf="http://namespace.openaire.eu/oaf" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
xmlns:oaf="http://namespace.openaire.eu/oaf"
|
||||||
|
xmlns:oai="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
<oai:header xmlns="http://namespace.openaire.eu/"
|
<oai:header xmlns="http://namespace.openaire.eu/"
|
||||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
xmlns:dri="http://www.driver-repository.eu/namespace/dri"
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
||||||
xmlns:oai="http://www.openarchives.org/OAI/2.0/" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
<dri:objIdentifier>r3f5b9831893::01a497c6c6b44289c52dcdf22b6c0fc0</dri:objIdentifier>
|
||||||
<dri:objIdentifier>r3f5b9831893::cca7367159bc3ff90cd2f75bf9dc21c4</dri:objIdentifier>
|
<dri:recordIdentifier>oai:nakala.fr:hdl_11280_50f302c6</dri:recordIdentifier>
|
||||||
<dri:recordIdentifier>oai:nakala.fr:hdl_11280_847e01df</dri:recordIdentifier>
|
<dri:dateOfCollection>2020-10-03T06:06:52.228Z</dri:dateOfCollection>
|
||||||
<dri:dateOfCollection>2020-08-01T00:16:24.742Z</dri:dateOfCollection>
|
|
||||||
<oaf:datasourceprefix>r3f5b9831893</oaf:datasourceprefix>
|
<oaf:datasourceprefix>r3f5b9831893</oaf:datasourceprefix>
|
||||||
<identifier xmlns="http://www.openarchives.org/OAI/2.0/">oai:nakala.fr:hdl_11280_847e01df</identifier>
|
<identifier xmlns="http://www.openarchives.org/OAI/2.0/">oai:nakala.fr:hdl_11280_50f302c6</identifier>
|
||||||
<datestamp xmlns="http://www.openarchives.org/OAI/2.0/">2020-06-08T01:01:38Z</datestamp>
|
<datestamp xmlns="http://www.openarchives.org/OAI/2.0/">2020-09-19T23:56:08Z</datestamp>
|
||||||
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_2b09fc10</setSpec>
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_96355742</setSpec>
|
||||||
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_c1bc48d0</setSpec>
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_26914437</setSpec>
|
||||||
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_57c8db3a</setSpec>
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11280_86561837</setSpec>
|
||||||
<dr:dateOfTransformation>2020-08-01T00:31:35.625Z</dr:dateOfTransformation>
|
<dr:dateOfTransformation>2020-10-19T15:39:52.151Z</dr:dateOfTransformation>
|
||||||
</oai:header>
|
</oai:header>
|
||||||
<metadata>
|
<metadata>
|
||||||
<datacite:resource xmlns="http://www.openarchives.org/OAI/2.0/"
|
<datacite:resource xmlns="http://www.openarchives.org/OAI/2.0/"
|
||||||
xmlns:datacite="http://datacite.org/schema/kernel-4"
|
xmlns:datacite="http://datacite.org/schema/kernel-4"
|
||||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
xmlns:dri="http://www.driver-repository.eu/namespace/dri"
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri"
|
||||||
xmlns:oai="http://www.openarchives.org/OAI/2.0/"
|
|
||||||
xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://datacite.org/schema/kernel-4 https://schema.datacite.org/meta/kernel-4/metadata.xsd">
|
xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://datacite.org/schema/kernel-4 https://schema.datacite.org/meta/kernel-4/metadata.xsd">
|
||||||
<datacite:alternateIdentifier identifierType="URL" xmlns:datacite="http://datacite.org/schema/kernel-4/">277</datacite:alternateIdentifier>
|
<alternateIdentifier alternateIdentifierType="URL" xmlns="http://datacite.org/schema/kernel-4">http://nakala.fr/data/11280/50f302c6</alternateIdentifier>
|
||||||
<datacite:identifier identifierType="Handle" xmlns:datacite="http://datacite.org/schema/kernel-4/">http://hdl.handle.net/11280/847e01df</datacite:identifier>
|
<identifier identifierType="Handle" xmlns="http://datacite.org/schema/kernel-4">11280/50f302c6</identifier>
|
||||||
<alternateIdentifiers>
|
<datacite:creators>
|
||||||
<alternateIdentifier alternateIdentifierType="URL">http://hdl.handle.net/http://hdl.handle.net/11280/847e01df</alternateIdentifier>
|
|
||||||
</alternateIdentifiers>
|
|
||||||
<datacite:alternateIdentifier identifierType="URL" xmlns:datacite="http://datacite.org/schema/kernel-4/">http://nakala.fr/data/11280/847e01df</datacite:alternateIdentifier>
|
|
||||||
<datacite:creators xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
|
||||||
<datacite:creator>
|
<datacite:creator>
|
||||||
<datacite:creatorName>DHAAP</datacite:creatorName>
|
<datacite:creatorName>Desbrosse, Xavier</datacite:creatorName>
|
||||||
</datacite:creator>
|
</datacite:creator>
|
||||||
</datacite:creators>
|
</datacite:creators>
|
||||||
<datacite:titles xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
<datacite:titles>
|
||||||
<datacite:title>CVP_Notice277-1 place du Docteur Antoine Béclère _PHO02.jpg</datacite:title>
|
<datacite:title>Les rues Stalingrad en France (1945-2013)</datacite:title>
|
||||||
</datacite:titles>
|
</datacite:titles>
|
||||||
<datacite:descriptions xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
<datacite:subjects>
|
||||||
<datacite:description descriptionType="Abstract">Hôpital Saint-Antoine. Fragment de dalle funéraire trouvée en décembre 1932. Paris (XIIème arr.). Photographie d'Albert Citerne (1876-1970). Plaque de verre, 1932. Département Histoire de l'Architecture et Archéologie de Paris.</datacite:description>
|
<datacite:subject>Rues – Noms -- France</datacite:subject>
|
||||||
<datacite:description descriptionType="Abstract">Nfa_1146</datacite:description>
|
</datacite:subjects>
|
||||||
<datacite:description descriptionType="Abstract">Hôpital Saint-Antoine. Fragment de dalle funéraire trouvée en décembre 1932. Paris (XIIème arr.). Photographie d'Albert Citerne (1876-1970). Plaque de verre, 1932. Département Histoire de l'Architecture et Archéologie de Paris.</datacite:description>
|
<datacite:descriptions>
|
||||||
|
<datacite:description descriptionType="Abstract">Cette carte appartient à la collection « Guerre froide vue d’en bas » élaborée dans le cadre de l’enquête 2009-2013 du réseau des correspondants départementaux de l’IHTP « La Guerre froide vue d’en bas : 1947-1967 », enquête conduite sous la direction de Philippe Buton Professeur d’Histoire contemporaine à l’Université de Reims, d’Olivier Büttner Ingénieur de Recherche IHTP-CNRS et de Michel Hastings, Professeur de Science politique à l’Institut d’Etudes Politiques de Lille.</datacite:description>
|
||||||
</datacite:descriptions>
|
</datacite:descriptions>
|
||||||
<datacite:publisher xmlns:datacite="http://datacite.org/schema/kernel-4/">Nakala by Huma-Num</datacite:publisher>
|
<datacite:publisher>IHTP-CNRS</datacite:publisher>
|
||||||
<datacite:contributors xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
<datacite:contributors>
|
||||||
<datacite:contributor contributorType="Other">
|
<datacite:contributor contributorType="Other">
|
||||||
<datacite:contributorName>DHAAP, Pôle Archéologique</datacite:contributorName>
|
<datacite:contributorName>(CNRS), Institut d'Histoire du Temps Présent (IHTP) - Centre National de la Recherche Scientifique </datacite:contributorName>
|
||||||
</datacite:contributor>
|
</datacite:contributor>
|
||||||
</datacite:contributors>
|
</datacite:contributors>
|
||||||
<datacite:dates xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
<datacite:dates>
|
||||||
<datacite:date dateType="Created">1932</datacite:date>
|
<datacite:date dateType="Created">2013</datacite:date>
|
||||||
</datacite:dates>
|
</datacite:dates>
|
||||||
<datacite:resourceType resourceTypeGeneral="Image" xmlns:datacite="http://datacite.org/schema/kernel-4/">StillImage</datacite:resourceType>
|
<datacite:resourceType resourceTypeGeneral="Image">Carte</datacite:resourceType>
|
||||||
<datacite:rightsList xmlns:datacite="http://datacite.org/schema/kernel-4/">
|
<datacite:geoLocations>
|
||||||
<datacite:rights rightsURI="info:eu-repo/semantics/openAccess"/>
|
<datacite:geoLocation>
|
||||||
</datacite:rightsList>
|
<datacite:geoLocationPlace>France</datacite:geoLocationPlace>
|
||||||
|
</datacite:geoLocation>
|
||||||
|
</datacite:geoLocations>
|
||||||
</datacite:resource>
|
</datacite:resource>
|
||||||
<oaf:identifier identifierType="handle">http://hdl.handle.net/11280/847e01df</oaf:identifier>
|
<oaf:identifier identifierType="handle">11280/50f302c6</oaf:identifier>
|
||||||
|
<oaf:concept id="dariah"/>
|
||||||
<dr:CobjCategory type="dataset">0025</dr:CobjCategory>
|
<dr:CobjCategory type="dataset">0025</dr:CobjCategory>
|
||||||
<oaf:dateAccepted/>
|
<oaf:dateAccepted/>
|
||||||
<oaf:accessrights>OPEN</oaf:accessrights>
|
<oaf:accessrights>UNKNOWN</oaf:accessrights>
|
||||||
<oaf:language>und</oaf:language>
|
<oaf:language>und</oaf:language>
|
||||||
<oaf:hostedBy id="re3data_____::r3d100012102" name="NAKALA"/>
|
<oaf:hostedBy id="re3data_____::r3d100012102" name="NAKALA"/>
|
||||||
<oaf:collectedFrom id="re3data_____::r3d100012102" name="NAKALA"/>
|
<oaf:collectedFrom id="re3data_____::r3d100012102" name="NAKALA"/>
|
||||||
</metadata>
|
</metadata>
|
||||||
<about xmlns:dc="http://purl.org/dc/elements/1.1/"
|
<about xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
xmlns:dri="http://www.driver-repository.eu/namespace/dri"
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
||||||
xmlns:oai="http://www.openarchives.org/OAI/2.0/" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
|
||||||
<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">
|
<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">
|
||||||
<originDescription altered="true" harvestDate="2020-08-01T00:16:24.742Z">
|
<originDescription altered="true" harvestDate="2020-10-03T06:06:52.228Z">
|
||||||
<baseURL>https%3A%2F%2Fwww.nakala.fr%2Foai_oa%2F11280%2F8892ab4b</baseURL>
|
<baseURL>https%3A%2F%2Fwww.nakala.fr%2Foai_oa%2F11280%2F92c4d30b</baseURL>
|
||||||
<identifier>oai:nakala.fr:hdl_11280_847e01df</identifier>
|
<identifier>oai:nakala.fr:hdl_11280_50f302c6</identifier>
|
||||||
<datestamp>2020-06-08T01:01:38Z</datestamp>
|
<datestamp>2020-09-19T23:56:08Z</datestamp>
|
||||||
<metadataNamespace/>
|
<metadataNamespace/>
|
||||||
</originDescription>
|
</originDescription>
|
||||||
</provenance>
|
</provenance>
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<record xmlns:dr="http://www.driver-repository.eu/namespace/dr"
|
||||||
|
xmlns:oaf="http://namespace.openaire.eu/oaf"
|
||||||
|
xmlns:oai="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
|
<oai:header xmlns="http://namespace.openaire.eu/"
|
||||||
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
||||||
|
<dri:objIdentifier>r3a507cdacc5::03b31980d9bb3c4609e6005c4a3baba6</dri:objIdentifier>
|
||||||
|
<dri:recordIdentifier>oai:lindat.mff.cuni.cz:11372/LRT-1844</dri:recordIdentifier>
|
||||||
|
<dri:dateOfCollection>2020-09-04T14:36:48.411Z</dri:dateOfCollection>
|
||||||
|
<oaf:datasourceprefix>r3a507cdacc5</oaf:datasourceprefix>
|
||||||
|
<identifier xmlns="http://www.openarchives.org/OAI/2.0/">oai:lindat.mff.cuni.cz:11372/LRT-1844</identifier>
|
||||||
|
<datestamp xmlns="http://www.openarchives.org/OAI/2.0/">2016-12-07T11:10:30Z</datestamp>
|
||||||
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11858_00-097C-0000-0007-710A-A</setSpec>
|
||||||
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">hdl_11858_00-097C-0000-0007-710B-8</setSpec>
|
||||||
|
<setSpec xmlns="http://www.openarchives.org/OAI/2.0/">openaire_data</setSpec>
|
||||||
|
<dr:dateOfTransformation>2020-09-04T14:39:16.458Z</dr:dateOfTransformation>
|
||||||
|
</oai:header>
|
||||||
|
<metadata>
|
||||||
|
<resource xmlns="http://datacite.org/schema/kernel-4"
|
||||||
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
||||||
|
<identifier identifierType="Handle">11372/LRT-1844</identifier>
|
||||||
|
<alternateIdentifiers>
|
||||||
|
<alternateIdentifier alternateIdentifierType="URL">http://hdl.handle.net/11372/LRT-1844</alternateIdentifier>
|
||||||
|
</alternateIdentifiers>
|
||||||
|
<creators>
|
||||||
|
<creator>
|
||||||
|
<creatorName>Hercig, Tomáš</creatorName>
|
||||||
|
</creator>
|
||||||
|
<creator>
|
||||||
|
<creatorName>Brychcín, Tomáš</creatorName>
|
||||||
|
</creator>
|
||||||
|
<creator>
|
||||||
|
<creatorName>Svoboda, Lukáš</creatorName>
|
||||||
|
</creator>
|
||||||
|
<creator>
|
||||||
|
<creatorName>Konkol, Michal</creatorName>
|
||||||
|
</creator>
|
||||||
|
<creator>
|
||||||
|
<creatorName>Steinberger, Josef</creatorName>
|
||||||
|
</creator>
|
||||||
|
</creators>
|
||||||
|
<titles>
|
||||||
|
<title>Restaurant Reviews CZ ABSA corpus v2</title>
|
||||||
|
</titles>
|
||||||
|
<publisher>University of West Bohemia, Department of Computer Science and Engineering</publisher>
|
||||||
|
<publicationYear>2016</publicationYear>
|
||||||
|
<contributors>
|
||||||
|
<contributor contributorType="Funder">
|
||||||
|
<contributorName>European Commission</contributorName>
|
||||||
|
<nameIdentifier nameIdentifierScheme="info">info:eu-repo/grantAgreement/EC/FP7/630786</nameIdentifier>
|
||||||
|
</contributor>
|
||||||
|
</contributors>
|
||||||
|
<dates>
|
||||||
|
<date dateType="Issued">2016</date>
|
||||||
|
<date dateType="Accepted">2016-12-07T11:10:30Z</date>
|
||||||
|
<date dateType="Available">2016-12-07T11:10:30Z</date>
|
||||||
|
</dates>
|
||||||
|
<resourceType resourceTypeGeneral="Dataset">corpus</resourceType>
|
||||||
|
<rightsList>
|
||||||
|
<rights rightsURI="info:eu-repo/semantics/openAccess"/>
|
||||||
|
<rights rightsURI="http://creativecommons.org/licenses/by-nc-sa/4.0/"/>
|
||||||
|
</rightsList>
|
||||||
|
<descriptions>
|
||||||
|
<description descriptionType="Abstract">Restaurant Reviews CZ ABSA - 2.15k reviews with their related target and category
|
||||||
|
|
||||||
|
The work done is described in the paper: https://doi.org/10.13053/CyS-20-3-2469</description>
|
||||||
|
</descriptions>
|
||||||
|
</resource>
|
||||||
|
<oaf:identifier identifierType="handle">11372/LRT-1844</oaf:identifier>
|
||||||
|
<oaf:embargoenddate>2016-12-07</oaf:embargoenddate>
|
||||||
|
<dr:CobjCategory type="dataset">0021</dr:CobjCategory>
|
||||||
|
<oaf:dateAccepted>2016-01-01</oaf:dateAccepted>
|
||||||
|
<oaf:accessrights>OPEN</oaf:accessrights>
|
||||||
|
<oaf:license>http://creativecommons.org/licenses/by-nc-sa/4.0/</oaf:license>
|
||||||
|
<oaf:language>und</oaf:language>
|
||||||
|
<oaf:projectid>corda_______::630786</oaf:projectid>
|
||||||
|
<oaf:hostedBy id="re3data_____::r3d100010386" name="LINDAT/CLARIN repository"/>
|
||||||
|
<oaf:collectedFrom id="re3data_____::r3d100010386" name="LINDAT/CLARIN repository"/>
|
||||||
|
</metadata>
|
||||||
|
<about xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:dri="http://www.driver-repository.eu/namespace/dri" xmlns:prov="http://www.openarchives.org/OAI/2.0/provenance">
|
||||||
|
<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">
|
||||||
|
<originDescription altered="true" harvestDate="2020-09-04T14:36:48.411Z">
|
||||||
|
<baseURL>https%3A%2F%2Flindat.mff.cuni.cz%2Frepository%2Foai%2Fopenaire_data</baseURL>
|
||||||
|
<identifier>oai:lindat.mff.cuni.cz:11372/LRT-1844</identifier>
|
||||||
|
<datestamp>2016-12-07T11:10:30Z</datestamp>
|
||||||
|
<metadataNamespace/>
|
||||||
|
</originDescription>
|
||||||
|
</provenance>
|
||||||
|
<oaf:datainfo>
|
||||||
|
<oaf:inferred>false</oaf:inferred>
|
||||||
|
<oaf:deletedbyinference>false</oaf:deletedbyinference>
|
||||||
|
<oaf:trust>0.9</oaf:trust>
|
||||||
|
<oaf:inferenceprovenance/>
|
||||||
|
<oaf:provenanceaction classid="sysimport:crosswalk:datasetarchive"
|
||||||
|
classname="sysimport:crosswalk:datasetarchive"
|
||||||
|
schemeid="dnet:provenanceActions" schemename="dnet:provenanceActions"/>
|
||||||
|
</oaf:datainfo>
|
||||||
|
</about>
|
||||||
|
</record>
|
|
@ -142,12 +142,12 @@
|
||||||
{
|
{
|
||||||
"field": "totalcost",
|
"field": "totalcost",
|
||||||
"type": "double",
|
"type": "double",
|
||||||
"value": null
|
"value": 157846
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"field": "fundedamount",
|
"field": "fundedamount",
|
||||||
"type": "double",
|
"type": "double",
|
||||||
"value": null
|
"value": 157846
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"field": "collectedfromid",
|
"field": "collectedfromid",
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
{"collectedfrom":[{"key":"dli_________::datacite","value":"Datasets in Datacite","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["10.3390/w11050916"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2018-10-28T00:39:04.337Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao, Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan, Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson, Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu, Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao, Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-01","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":"In terms of climate change and precipitation, there is large interest in how large-scale climatic features affect regional rainfall amount and rainfall occurrence. Large-scale climate elements need to be downscaled to the regional level for hydrologic applications. Here, a new Nonhomogeneous Hidden Markov Model (NHMM) called the Bayesian-NHMM is presented for downscaling and predicting of multisite daily rainfall during rainy season over the Huaihe River Basin (HRB). The Bayesian-NHMM provides a Bayesian method for parameters estimation. The model avoids the risk to have no solutions for parameter estimation, which often occurs in the traditional NHMM that uses point estimates of parameters. The Bayesian-NHMM accurately captures seasonality and interannual variability of rainfall amount and wet days during the rainy season. The model establishes a link between large-scale meteorological characteristics and local precipitation patterns. It also provides a more stable and efficient method to estimate parameters...","dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[{"license":null,"accessright":null,"instancetype":null,"hostedby":{"key":"openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18","value":"Unknown Repository","dataInfo":null},"url":["10.3390/w11050916"],"distributionlocation":null,"collectedfrom":null,"dateofacceptance":null,"processingchargeamount":null,"processingchargecurrency":null,"refereed":null}],"journal":null,"originalObjIdentifier":"datacite____::100bb045f34ea2da81433d0b9ae3afa1","dlicollectedfrom":[{"id":"dli_________::datacite","name":"Datasets in Datacite","completionStatus":"complete","collectionMode":null}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::datacite","value":"Datasets in Datacite","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["10.3390/w11050916"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2018-10-28T00:39:04.337Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao, Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan, Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson, Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu, Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao, Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-01","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":"In terms of climate change and precipitation, there is large interest in how large-scale climatic features affect regional rainfall amount and rainfall occurrence. Large-scale climate elements need to be downscaled to the regional level for hydrologic applications. Here, a new Nonhomogeneous Hidden Markov Model (NHMM) called the Bayesian-NHMM is presented for downscaling and predicting of multisite daily rainfall during rainy season over the Huaihe River Basin (HRB). The Bayesian-NHMM provides a Bayesian method for parameters estimation. The model avoids the risk to have no solutions for parameter estimation, which often occurs in the traditional NHMM that uses point estimates of parameters. The Bayesian-NHMM accurately captures seasonality and interannual variability of rainfall amount and wet days during the rainy season. The model establishes a link between large-scale meteorological characteristics and local precipitation patterns. It also provides a more stable and efficient method to estimate parameters in the model. These results suggest that prediction of daily precipitation could be improved by the suggested new Bayesian-NHMM method, which can be helpful for water resources management and research on climate change.","dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[{"license":null,"accessright":null,"instancetype":null,"hostedby":{"key":"openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18","value":"Unknown Repository","dataInfo":null},"url":["10.3390/w11050916"],"distributionlocation":null,"collectedfrom":null,"dateofacceptance":null,"processingchargeamount":null,"processingchargecurrency":null,"refereed":null}],"journal":null,"originalObjIdentifier":"datacite____::100bb045f34ea2da81433d0b9ae3afa1","dlicollectedfrom":[{"id":"dli_________::datacite","name":"Datasets in Datacite","completionStatus":"complete","collectionMode":null}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::datacite","value":"Datasets in Datacite","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["10.3390/w11050916"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2018-10-28T00:39:04.337Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao, Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan, Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson, Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu, Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao, Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-01","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":"In terms of climate change and precipitation, there is large interest in how large-scale climatic features affect regional rainfall amount and rainfall occurrence. Large-scale climate elements need to be downscaled to the regional level for hydrologic applications. Here, a new Nonhomogeneous Hidden Markov Model (NHMM) called the Bayesian-NHMM is presented for downscaling and predicting of multisite daily rainfall during rainy season over the Huaihe River Basin (HRB). The Bayesian-NHMM provides a Bayesian method for parameters estimation. The model avoids the risk to have no solutions for parameter estimation, which often occurs in the traditional NHMM that uses point estimates of parameters. The Bayesian-NHMM accurately captures seasonality and interannual variability of rainfall amount and wet days during the rainy season. The model establishes a link between large-scale meteorological characteristics and local precipitation patterns. It also provides a more stable and efficient method to estimate parameters in the model. These results suggest that prediction of daily precipitation could be improved by the suggested new Bayesian-NHMM method, which can be helpful for water resources management and research on climate change.","dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[{"license":null,"accessright":null,"instancetype":null,"hostedby":{"key":"openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18","value":"Unknown Repository","dataInfo":null},"url":["10.3390/w11050916"],"distributionlocation":null,"collectedfrom":null,"dateofacceptance":null,"processingchargeamount":null,"processingchargecurrency":null,"refereed":null}],"journal":null,"originalObjIdentifier":"datacite____::100bb045f34ea2da81433d0b9ae3afa1","dlicollectedfrom":[{"id":"dli_________::datacite","name":"Datasets in Datacite","completionStatus":"complete","collectionMode":null}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::datacite","value":"Datasets in Datacite","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["10.3390/w11050916"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2018-10-28T00:39:04.337Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao, Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan, Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson, Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu, Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao, Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-01","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":"In terms of climate change and precipitation, there is large interest in how large-scale climatic features affect regional rainfall amount and rainfall occurrence. Large-scale climate elements need to be downscaled to the regional level for hydrologic applications. Here, a new Nonhomogeneous Hidden Markov Model (NHMM) called the Bayesian-NHMM is presented for downscaling and predicting of multisite daily rainfall during rainy season over the Huaihe River Basin (HRB). The Bayesian-NHMM provides a Bayesian method for parameters estimation. The model avoids the risk to have no solutions for parameter estimation, which often occurs in the traditional NHMM that uses point estimates of parameters. The Bayesian-NHMM accurately captures seasonality and interannual variability of rainfall amount and wet days during the rainy season. The model establishes a link between large-scale meteorological characteristics and local precipitation patterns. It also provides a more stable and efficient method to estimate parameters in the model. These results suggest that prediction of daily precipitation could be improved by the suggested new Bayesian-NHMM method, which can be helpful for water resources management and research on climate change.","dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[{"license":null,"accessright":null,"instancetype":null,"hostedby":{"key":"openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18","value":"Unknown Repository","dataInfo":null},"url":["10.3390/w11050916"],"distributionlocation":null,"collectedfrom":null,"dateofacceptance":null,"processingchargeamount":null,"processingchargecurrency":null,"refereed":null}],"journal":null,"originalObjIdentifier":"datacite____::100bb045f34ea2da81433d0b9ae3afa1","dlicollectedfrom":[{"id":"dli_________::datacite","name":"Datasets in Datacite","completionStatus":"complete","collectionMode":null}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::datacite","value":"Datasets in Datacite","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["10.3390/w11050916"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2018-10-28T00:39:04.337Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao, Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan, Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson, Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu, Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao, Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao, Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-01","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":"In terms of climate change and precipitation, there is large interest in how large-scale climatic features affect regional rainfall amount and rainfall occurrence. Large-scale climate elements need to be downscaled to the regional level for hydrologic applications. Here, a new Nonhomogeneous Hidden Markov Model (NHMM) called the Bayesian-NHMM is presented for downscaling and predicting of multisite daily rainfall during rainy season over the Huaihe River Basin (HRB). The Bayesian-NHMM provides a Bayesian method for parameters estimation. The model avoids the risk to have no solutions for parameter estimation, which often occurs in the traditional NHMM that uses point estimates of parameters. The Bayesian-NHMM accurately captures seasonality and interannual variability of rainfall amount and wet days during the rainy season. The model establishes a link between large-scale meteorological characteristics and local precipitation patterns. It also provides a more stable and efficient method to estimate parameters in the model. These results suggest that prediction of daily precipitation could be improved by the suggested new Bayesian-NHMM method, which can be helpful for water resources management and research on climate change.","dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[{"license":null,"accessright":null,"instancetype":null,"hostedby":{"key":"openaire____::1256f046-bf1f-4afc-8b47-d0b147148b18","value":"Unknown Repository","dataInfo":null},"url":["10.3390/w11050916"],"distributionlocation":null,"collectedfrom":null,"dateofacceptance":null,"processingchargeamount":null,"processingchargecurrency":null,"refereed":null}],"journal":null,"originalObjIdentifier":"datacite____::100bb045f34ea2da81433d0b9ae3afa1","dlicollectedfrom":[{"id":"dli_________::datacite","name":"Datasets in Datacite","completionStatus":"complete","collectionMode":null}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::crossref","value":"Crossref","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["1307198540d2264d839dfd8c9a19f4a7"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2020-10-04T14:16:06.105Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-02T07:15:22Z","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":null,"dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[],"journal":null,"originalObjIdentifier":"dli_resolver::1307198540d2264d839dfd8c9a19f4a7","dlicollectedfrom":[{"id":"dli_________::crossref","name":"Crossref","completionStatus":"complete","collectionMode":"resolved"}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::crossref","value":"Crossref","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["1307198540d2264d839dfd8c9a19f4a7"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2020-09-27T11:39:38.835Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-02T07:15:22Z","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":null,"dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[],"journal":null,"originalObjIdentifier":"dli_resolver::1307198540d2264d839dfd8c9a19f4a7","dlicollectedfrom":[{"id":"dli_________::crossref","name":"Crossref","completionStatus":"complete","collectionMode":"resolved"}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::crossref","value":"Crossref","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["1307198540d2264d839dfd8c9a19f4a7"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2020-08-30T11:48:49.809Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-02T07:15:22Z","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":null,"dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[],"journal":null,"originalObjIdentifier":"dli_resolver::1307198540d2264d839dfd8c9a19f4a7","dlicollectedfrom":[{"id":"dli_________::crossref","name":"Crossref","completionStatus":"complete","collectionMode":"resolved"}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::crossref","value":"Crossref","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["1307198540d2264d839dfd8c9a19f4a7"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2020-08-14T14:25:55.176Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-02T07:15:22Z","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":null,"dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[],"journal":null,"originalObjIdentifier":"dli_resolver::1307198540d2264d839dfd8c9a19f4a7","dlicollectedfrom":[{"id":"dli_________::crossref","name":"Crossref","completionStatus":"complete","collectionMode":"resolved"}],"completionStatus":"complete"}
|
||||||
|
{"collectedfrom":[{"key":"dli_________::crossref","value":"Crossref","dataInfo":null}],"dataInfo":{"invisible":false,"inferred":null,"deletedbyinference":false,"trust":"0.9","inferenceprovenance":null,"provenanceaction":null},"lastupdatetimestamp":null,"id":"50|1307198540d2264d839dfd8c9a19f4a7","originalId":["1307198540d2264d839dfd8c9a19f4a7"],"pid":[{"value":"10.3390/w11050916","qualifier":{"classid":"doi","classname":"doi","schemeid":"dnet:pid_types","schemename":"dnet:pid_types"},"dataInfo":null}],"dateofcollection":"2020-08-09T11:35:23.526Z","dateoftransformation":null,"extraInfo":null,"oaiprovenance":null,"measures":null,"author":[{"fullname":"Cao Qing","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Zhenchun","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Yuan Feifei","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Berndtsson Ronny","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Xu Shijie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Gao Huibin","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null},{"fullname":"Hao Jie","name":null,"surname":null,"rank":null,"pid":null,"affiliation":null}],"resulttype":{"classid":"publication","classname":"publication","schemeid":"publication","schemename":"publication"},"language":null,"country":null,"subject":[],"title":[{"value":"On the Predictability of Daily Rainfall during Rainy Season over the Huaihe River Basin","qualifier":{"classid":"main title","classname":null,"schemeid":"dnet:dataCite_title","schemename":"dnet:dataCite_title"},"dataInfo":null}],"relevantdate":[{"value":"2019-05-02T07:15:22Z","qualifier":{"classid":"date","classname":"date","schemeid":"dnet::date","schemename":"dnet::date"},"dataInfo":null}],"description":[{"value":null,"dataInfo":null}],"dateofacceptance":null,"publisher":{"value":"MDPI AG","dataInfo":null},"embargoenddate":null,"source":null,"fulltext":null,"format":null,"contributor":null,"resourcetype":null,"coverage":null,"bestaccessright":null,"context":null,"externalReference":null,"instance":[],"journal":null,"originalObjIdentifier":"dli_resolver::1307198540d2264d839dfd8c9a19f4a7","dlicollectedfrom":[{"id":"dli_________::crossref","name":"Crossref","completionStatus":"complete","collectionMode":"resolved"}],"completionStatus":"complete"}
|
|
@ -62,6 +62,10 @@
|
||||||
<artifactId>dhp-schemas</artifactId>
|
<artifactId>dhp-schemas</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.httpcomponents</groupId>
|
||||||
|
<artifactId>httpmime</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.elasticsearch</groupId>
|
<groupId>org.elasticsearch</groupId>
|
||||||
|
|
|
@ -47,6 +47,7 @@ object DLIToOAF {
|
||||||
"References" -> ("isRelatedTo", "relationship"),
|
"References" -> ("isRelatedTo", "relationship"),
|
||||||
"IsRelatedTo" -> ("isRelatedTo", "relationship"),
|
"IsRelatedTo" -> ("isRelatedTo", "relationship"),
|
||||||
"IsSupplementedBy" -> ("isSupplementedBy", "supplement"),
|
"IsSupplementedBy" -> ("isSupplementedBy", "supplement"),
|
||||||
|
"Documents"-> ("isRelatedTo", "relationship"),
|
||||||
"Cites" -> ("cites", "citation"),
|
"Cites" -> ("cites", "citation"),
|
||||||
"Unknown" -> ("isRelatedTo", "relationship"),
|
"Unknown" -> ("isRelatedTo", "relationship"),
|
||||||
"IsSourceOf" -> ("isRelatedTo", "relationship"),
|
"IsSourceOf" -> ("isRelatedTo", "relationship"),
|
||||||
|
@ -83,7 +84,7 @@ object DLIToOAF {
|
||||||
|
|
||||||
val rel_inverse: Map[String, String] = Map(
|
val rel_inverse: Map[String, String] = Map(
|
||||||
"isRelatedTo" -> "isRelatedTo",
|
"isRelatedTo" -> "isRelatedTo",
|
||||||
"IsSupplementedBy" -> "isSupplementTo",
|
"isSupplementedBy" -> "isSupplementTo",
|
||||||
"cites" -> "IsCitedBy",
|
"cites" -> "IsCitedBy",
|
||||||
"IsCitedBy" -> "cites",
|
"IsCitedBy" -> "cites",
|
||||||
"reviews" -> "IsReviewedBy"
|
"reviews" -> "IsReviewedBy"
|
||||||
|
@ -273,29 +274,18 @@ object DLIToOAF {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// def convertDLIRelation(r: DLIRelation): Relation = {
|
def convertDLIRelation(r: Relation): Relation = {
|
||||||
//
|
|
||||||
// val result = new Relation
|
val rt = r.getRelType
|
||||||
// if (!relationTypeMapping.contains(r.getRelType))
|
if (!relationTypeMapping.contains(rt))
|
||||||
// return null
|
return null
|
||||||
//
|
r.setRelType("resultResult")
|
||||||
// if (r.getProperties == null || r.getProperties.size() == 0 || (r.getProperties.size() == 1 && r.getProperties.get(0) == null))
|
r.setRelClass(relationTypeMapping(rt)._1)
|
||||||
// return null
|
r.setSubRelType(relationTypeMapping(rt)._2)
|
||||||
// val t = relationTypeMapping.get(r.getRelType)
|
r.setSource(generateId(r.getSource))
|
||||||
//
|
r.setTarget(generateId(r.getTarget))
|
||||||
// result.setRelType("resultResult")
|
r
|
||||||
// result.setRelClass(t.get._1)
|
}
|
||||||
// result.setSubRelType(t.get._2)
|
|
||||||
// result.setCollectedfrom(r.getProperties.asScala.map(c => collectedFromMap.getOrElse(c.getKey, null)).filter(p => p != null).asJava)
|
|
||||||
// result.setSource(generateId(r.getSource))
|
|
||||||
// result.setTarget(generateId(r.getTarget))
|
|
||||||
//
|
|
||||||
// if (result.getSource.equals(result.getTarget))
|
|
||||||
// return null
|
|
||||||
// result.setDataInfo(generateDataInfo())
|
|
||||||
//
|
|
||||||
// result
|
|
||||||
// }
|
|
||||||
|
|
||||||
|
|
||||||
def convertDLIDatasetTOOAF(d: DLIDataset): Dataset = {
|
def convertDLIDatasetTOOAF(d: DLIDataset): Dataset = {
|
||||||
|
|
|
@ -15,11 +15,13 @@ import org.apache.spark.{SparkConf, SparkContext}
|
||||||
import org.codehaus.jackson.map.ObjectMapper
|
import org.codehaus.jackson.map.ObjectMapper
|
||||||
|
|
||||||
import scala.collection.mutable.ArrayBuffer
|
import scala.collection.mutable.ArrayBuffer
|
||||||
|
import scala.collection.JavaConverters._
|
||||||
|
|
||||||
object SparkExportContentForOpenAire {
|
object SparkExportContentForOpenAire {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main(args: Array[String]): Unit = {
|
def main(args: Array[String]): Unit = {
|
||||||
val conf: SparkConf = new SparkConf()
|
val conf: SparkConf = new SparkConf()
|
||||||
val parser = new ArgumentApplicationParser(IOUtils.toString(SparkExportContentForOpenAire.getClass.getResourceAsStream("input_export_content_parameters.json")))
|
val parser = new ArgumentApplicationParser(IOUtils.toString(SparkExportContentForOpenAire.getClass.getResourceAsStream("input_export_content_parameters.json")))
|
||||||
|
@ -32,51 +34,54 @@ object SparkExportContentForOpenAire {
|
||||||
.master(parser.get("master")).getOrCreate()
|
.master(parser.get("master")).getOrCreate()
|
||||||
|
|
||||||
|
|
||||||
val sc:SparkContext = spark.sparkContext
|
|
||||||
|
|
||||||
val workingPath = parser.get("workingDirPath")
|
val workingPath = parser.get("workingDirPath")
|
||||||
|
|
||||||
|
implicit val dliPubEncoder: Encoder[DLIPublication] = Encoders.kryo(classOf[DLIPublication])
|
||||||
|
implicit val dliDatEncoder: Encoder[DLIDataset] = Encoders.kryo(classOf[DLIDataset])
|
||||||
implicit val pubEncoder: Encoder[Publication] = Encoders.bean(classOf[Publication])
|
implicit val pubEncoder: Encoder[Publication] = Encoders.bean(classOf[Publication])
|
||||||
implicit val datEncoder: Encoder[OafDataset] = Encoders.bean(classOf[OafDataset])
|
implicit val datEncoder: Encoder[OafDataset] = Encoders.bean(classOf[OafDataset])
|
||||||
implicit val relEncoder: Encoder[Relation] = Encoders.bean(classOf[Relation])
|
implicit val relEncoder: Encoder[Relation] = Encoders.bean(classOf[Relation])
|
||||||
|
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
|
|
||||||
|
val dsRel = spark.read.load(s"$workingPath/relation_b").as[Relation]
|
||||||
|
dsRel.filter(r => r.getDataInfo==null || r.getDataInfo.getDeletedbyinference ==false)
|
||||||
|
.map(DLIToOAF.convertDLIRelation)
|
||||||
|
.filter(r => r!= null)
|
||||||
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/relationDS")
|
||||||
|
|
||||||
val relRDD:RDD[Relation] = sc.textFile(s"$workingPath/relation_j")
|
|
||||||
.map(s => new ObjectMapper().readValue(s, classOf[Relation]))
|
|
||||||
.filter(p => p.getDataInfo.getDeletedbyinference == false)
|
|
||||||
spark.createDataset(relRDD).write.mode(SaveMode.Overwrite).save(s"$workingPath/relationDS")
|
|
||||||
|
|
||||||
val datRDD:RDD[OafDataset] = sc.textFile(s"$workingPath/dataset")
|
val dsPubs = spark.read.load(s"$workingPath/publication").as[DLIPublication]
|
||||||
.map(s => new ObjectMapper().readValue(s, classOf[DLIDataset]))
|
dsPubs
|
||||||
|
.filter(p=>p.getDataInfo.getDeletedbyinference == false)
|
||||||
|
.map(DLIToOAF.convertDLIPublicationToOAF)
|
||||||
|
.filter(p=>p!= null)
|
||||||
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/publicationDS")
|
||||||
|
|
||||||
|
|
||||||
|
val dsDataset = spark.read.load(s"$workingPath/dataset").as[DLIDataset]
|
||||||
|
dsDataset
|
||||||
.filter(p => p.getDataInfo.getDeletedbyinference == false)
|
.filter(p => p.getDataInfo.getDeletedbyinference == false)
|
||||||
.map(DLIToOAF.convertDLIDatasetTOOAF).filter(p=>p!= null)
|
.map(DLIToOAF.convertDLIDatasetTOOAF).filter(p=>p!= null)
|
||||||
spark.createDataset(datRDD).write.mode(SaveMode.Overwrite).save(s"$workingPath/datasetDS")
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/datasetDS")
|
||||||
|
|
||||||
|
|
||||||
val pubRDD:RDD[Publication] = sc.textFile(s"$workingPath/publication")
|
|
||||||
.map(s => new ObjectMapper().readValue(s, classOf[DLIPublication]))
|
|
||||||
.filter(p => p.getDataInfo.getDeletedbyinference == false)
|
|
||||||
.map(DLIToOAF.convertDLIPublicationToOAF).filter(p=>p!= null)
|
|
||||||
spark.createDataset(pubRDD).write.mode(SaveMode.Overwrite).save(s"$workingPath/publicationDS")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
val pubs:Dataset[Publication] = spark.read.load(s"$workingPath/publicationDS").as[Publication]
|
|
||||||
val dats :Dataset[OafDataset] = spark.read.load(s"$workingPath/datasetDS").as[OafDataset]
|
val pubs:Dataset[Publication] = spark.read.load(s"$workingPath/export/publicationDS").as[Publication]
|
||||||
val relDS1 :Dataset[Relation] = spark.read.load(s"$workingPath/relationDS").as[Relation]
|
val dats :Dataset[OafDataset] = spark.read.load(s"$workingPath/export/datasetDS").as[OafDataset]
|
||||||
|
val relDS1 :Dataset[Relation] = spark.read.load(s"$workingPath/export/relationDS").as[Relation]
|
||||||
|
|
||||||
|
|
||||||
val pub_id = pubs.select("id").distinct()
|
val pub_id = pubs.select("id").distinct()
|
||||||
val dat_id = dats.select("id").distinct()
|
val dat_id = dats.select("id").distinct()
|
||||||
|
|
||||||
|
|
||||||
pub_id.joinWith(relDS1, pub_id("id").equalTo(relDS1("source"))).map(k => k._2).write.mode(SaveMode.Overwrite).save(s"$workingPath/relationDS_f1")
|
pub_id.joinWith(relDS1, pub_id("id").equalTo(relDS1("source"))).map(k => k._2).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/relationDS_f1")
|
||||||
|
|
||||||
val relDS2= spark.read.load(s"$workingPath/relationDS_f1").as[Relation]
|
val relDS2= spark.read.load(s"$workingPath/export/relationDS_f1").as[Relation]
|
||||||
|
|
||||||
relDS2.joinWith(dat_id, relDS2("target").equalTo(dats("id"))).map(k => k._1).write.mode(SaveMode.Overwrite).save(s"$workingPath/relationDS_filtered")
|
relDS2.joinWith(dat_id, relDS2("target").equalTo(dats("id"))).map(k => k._1).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/relationDS_filtered")
|
||||||
|
|
||||||
|
|
||||||
val r_source = relDS2.select(relDS2("source")).distinct()
|
val r_source = relDS2.select(relDS2("source")).distinct()
|
||||||
|
@ -87,22 +92,20 @@ object SparkExportContentForOpenAire {
|
||||||
|
|
||||||
pubs.joinWith(r_source, pubs("id").equalTo(r_source("source")), "inner").map(k => k._1)
|
pubs.joinWith(r_source, pubs("id").equalTo(r_source("source")), "inner").map(k => k._1)
|
||||||
.withColumn("row",row_number.over(w2)).where($"row" === 1).drop("row")
|
.withColumn("row",row_number.over(w2)).where($"row" === 1).drop("row")
|
||||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/publicationDS_filtered")
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/publicationDS_filtered")
|
||||||
|
|
||||||
dats.joinWith(r_target, dats("id").equalTo(r_target("target")), "inner").map(k => k._1)
|
dats.joinWith(r_target, dats("id").equalTo(r_target("target")), "inner").map(k => k._1)
|
||||||
.withColumn("row",row_number.over(w2)).where($"row" === 1).drop("row")
|
.withColumn("row",row_number.over(w2)).where($"row" === 1).drop("row")
|
||||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/datasetAS")
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/datasetAS")
|
||||||
|
|
||||||
spark.createDataset(sc.textFile(s"$workingPath/dataset")
|
|
||||||
.map(s => new ObjectMapper().readValue(s, classOf[DLIDataset]))
|
|
||||||
.map(DLIToOAF.convertDLIDatasetToExternalReference)
|
|
||||||
.filter(p => p != null)).as[DLIExternalReference].write.mode(SaveMode.Overwrite).save(s"$workingPath/externalReference")
|
|
||||||
|
|
||||||
val pf = spark.read.load(s"$workingPath/publicationDS_filtered").select("id")
|
dsDataset.map(DLIToOAF.convertDLIDatasetToExternalReference).filter(p => p != null).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/externalReference")
|
||||||
val relDS3 = spark.read.load(s"$workingPath/relationDS").as[Relation]
|
|
||||||
|
val pf = spark.read.load(s"$workingPath/export/publicationDS_filtered").select("id")
|
||||||
|
val relDS3 = spark.read.load(s"$workingPath/export/relationDS").as[Relation]
|
||||||
val relationTo = pf.joinWith(relDS3, pf("id").equalTo(relDS3("source")),"inner").map(t =>t._2)
|
val relationTo = pf.joinWith(relDS3, pf("id").equalTo(relDS3("source")),"inner").map(t =>t._2)
|
||||||
|
|
||||||
val extRef = spark.read.load(s"$workingPath/externalReference").as[DLIExternalReference]
|
val extRef = spark.read.load(s"$workingPath/export/externalReference").as[DLIExternalReference]
|
||||||
|
|
||||||
spark.createDataset(relationTo.joinWith(extRef, relationTo("target").equalTo(extRef("id")), "inner").map(d => {
|
spark.createDataset(relationTo.joinWith(extRef, relationTo("target").equalTo(extRef("id")), "inner").map(d => {
|
||||||
val r = d._1
|
val r = d._1
|
||||||
|
@ -112,11 +115,11 @@ object SparkExportContentForOpenAire {
|
||||||
var dli_ext = ArrayBuffer[DLIExternalReference]()
|
var dli_ext = ArrayBuffer[DLIExternalReference]()
|
||||||
f._2.foreach(d => if (dli_ext.size < 100) dli_ext += d )
|
f._2.foreach(d => if (dli_ext.size < 100) dli_ext += d )
|
||||||
(f._1, dli_ext)
|
(f._1, dli_ext)
|
||||||
})).write.mode(SaveMode.Overwrite).save(s"$workingPath/externalReference_grouped")
|
})).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/externalReference_grouped")
|
||||||
|
|
||||||
val pubf :Dataset[Publication] = spark.read.load(s"$workingPath/publicationDS_filtered").as[Publication]
|
val pubf :Dataset[Publication] = spark.read.load(s"$workingPath/export/publicationDS_filtered").as[Publication]
|
||||||
|
|
||||||
val groupedERf:Dataset[(String, List[DLIExternalReference])]= spark.read.load(s"$workingPath/externalReference_grouped").as[(String, List[DLIExternalReference])]
|
val groupedERf:Dataset[(String, List[DLIExternalReference])]= spark.read.load(s"$workingPath/export/externalReference_grouped").as[(String, List[DLIExternalReference])]
|
||||||
|
|
||||||
groupedERf.joinWith(pubf,pubf("id").equalTo(groupedERf("_1"))).map(t =>
|
groupedERf.joinWith(pubf,pubf("id").equalTo(groupedERf("_1"))).map(t =>
|
||||||
{
|
{
|
||||||
|
@ -128,29 +131,28 @@ object SparkExportContentForOpenAire {
|
||||||
} else
|
} else
|
||||||
publication
|
publication
|
||||||
}
|
}
|
||||||
).write.mode(SaveMode.Overwrite).save(s"$workingPath/publicationAS")
|
).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/publicationAS")
|
||||||
|
|
||||||
|
|
||||||
spark.createDataset(sc.textFile(s"$workingPath/dataset")
|
dsDataset
|
||||||
.map(s => new ObjectMapper().readValue(s, classOf[DLIDataset]))
|
|
||||||
.map(DLIToOAF.convertClinicalTrial)
|
.map(DLIToOAF.convertClinicalTrial)
|
||||||
.filter(p => p != null))
|
.filter(p => p != null)
|
||||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/clinicalTrials")
|
.write.mode(SaveMode.Overwrite).save(s"$workingPath/export/clinicalTrials")
|
||||||
|
|
||||||
val ct:Dataset[(String,String)] = spark.read.load(s"$workingPath/clinicalTrials").as[(String,String)]
|
val ct:Dataset[(String,String)] = spark.read.load(s"$workingPath/export/clinicalTrials").as[(String,String)]
|
||||||
|
|
||||||
val relDS= spark.read.load(s"$workingPath/relationDS_f1").as[Relation]
|
val relDS= spark.read.load(s"$workingPath/export/relationDS_f1").as[Relation]
|
||||||
|
|
||||||
relDS.joinWith(ct, relDS("target").equalTo(ct("_1")), "inner")
|
relDS.joinWith(ct, relDS("target").equalTo(ct("_1")), "inner")
|
||||||
.map(k =>{
|
.map(k =>{
|
||||||
val currentRel = k._1
|
val currentRel = k._1
|
||||||
currentRel.setTarget(k._2._2)
|
currentRel.setTarget(k._2._2)
|
||||||
currentRel
|
currentRel
|
||||||
}).write.mode(SaveMode.Overwrite).save(s"$workingPath/clinicalTrialsRels")
|
}).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/clinicalTrialsRels")
|
||||||
|
|
||||||
|
|
||||||
val clRels:Dataset[Relation] = spark.read.load(s"$workingPath/clinicalTrialsRels").as[Relation]
|
val clRels:Dataset[Relation] = spark.read.load(s"$workingPath/export/clinicalTrialsRels").as[Relation]
|
||||||
val rels:Dataset[Relation] = spark.read.load(s"$workingPath/relationDS_filtered").as[Relation]
|
val rels:Dataset[Relation] = spark.read.load(s"$workingPath/export/relationDS_filtered").as[Relation]
|
||||||
|
|
||||||
rels.union(clRels).flatMap(r => {
|
rels.union(clRels).flatMap(r => {
|
||||||
val inverseRel = new Relation
|
val inverseRel = new Relation
|
||||||
|
@ -162,18 +164,18 @@ object SparkExportContentForOpenAire {
|
||||||
inverseRel.setSubRelType(r.getSubRelType)
|
inverseRel.setSubRelType(r.getSubRelType)
|
||||||
inverseRel.setRelClass(DLIToOAF.rel_inverse(r.getRelClass))
|
inverseRel.setRelClass(DLIToOAF.rel_inverse(r.getRelClass))
|
||||||
List(r, inverseRel)
|
List(r, inverseRel)
|
||||||
}).write.mode(SaveMode.Overwrite).save(s"$workingPath/relationAS")
|
}).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/relationAS")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
spark.read.load(s"$workingPath/publicationAS").as[Publication].map(DLIToOAF.fixInstance).write.mode(SaveMode.Overwrite).save(s"$workingPath/publicationAS_fixed")
|
spark.read.load(s"$workingPath/export/publicationAS").as[Publication].map(DLIToOAF.fixInstance).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/publicationAS_fixed")
|
||||||
spark.read.load(s"$workingPath/datasetAS").as[OafDataset].map(DLIToOAF.fixInstanceDataset).write.mode(SaveMode.Overwrite).save(s"$workingPath/datasetAS_fixed")
|
spark.read.load(s"$workingPath/export/datasetAS").as[OafDataset].map(DLIToOAF.fixInstanceDataset).write.mode(SaveMode.Overwrite).save(s"$workingPath/export/datasetAS_fixed")
|
||||||
|
|
||||||
val fRels:Dataset[(String,String)] = spark.read.load(s"$workingPath/relationAS").as[Relation].map(DLIToOAF.toActionSet)
|
val fRels:Dataset[(String,String)] = spark.read.load(s"$workingPath/export/relationAS").as[Relation].map(DLIToOAF.toActionSet)
|
||||||
val fpubs:Dataset[(String,String)] = spark.read.load(s"$workingPath/publicationAS_fixed").as[Publication].map(DLIToOAF.toActionSet)
|
val fpubs:Dataset[(String,String)] = spark.read.load(s"$workingPath/export/publicationAS_fixed").as[Publication].map(DLIToOAF.toActionSet)
|
||||||
val fdats:Dataset[(String,String)] = spark.read.load(s"$workingPath/datasetAS_fixed").as[OafDataset].map(DLIToOAF.toActionSet)
|
val fdats:Dataset[(String,String)] = spark.read.load(s"$workingPath/export/datasetAS_fixed").as[OafDataset].map(DLIToOAF.toActionSet)
|
||||||
|
|
||||||
fRels.union(fpubs).union(fdats).rdd.map(s => (new Text(s._1), new Text(s._2))).saveAsHadoopFile(s"$workingPath/rawset", classOf[Text], classOf[Text], classOf[SequenceFileOutputFormat[Text,Text]], classOf[GzipCodec])
|
fRels.union(fpubs).union(fdats).rdd.map(s => (new Text(s._1), new Text(s._2))).saveAsHadoopFile(s"$workingPath/export/rawset", classOf[Text], classOf[Text], classOf[SequenceFileOutputFormat[Text,Text]], classOf[GzipCodec])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,111 @@
|
||||||
|
|
||||||
|
package eu.dnetlib.dhp.export.zenodo;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
|
||||||
|
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
|
||||||
|
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.*;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
|
||||||
|
public class MakeTar implements Serializable {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(MakeTar.class);
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
String jsonConfiguration = IOUtils
|
||||||
|
.toString(
|
||||||
|
MakeTar.class
|
||||||
|
.getResourceAsStream(
|
||||||
|
"/eu/dnetlib/dhp/export/input_maketar_parameters.json"));
|
||||||
|
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
||||||
|
parser.parseArgument(args);
|
||||||
|
|
||||||
|
final String outputPath = parser.get("targetPath");
|
||||||
|
log.info("hdfsPath: {}", outputPath);
|
||||||
|
|
||||||
|
final String hdfsNameNode = parser.get("nameNode");
|
||||||
|
log.info("nameNode: {}", hdfsNameNode);
|
||||||
|
|
||||||
|
final String inputPath = parser.get("sourcePath");
|
||||||
|
log.info("input path : {}", inputPath);
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set("fs.defaultFS", hdfsNameNode);
|
||||||
|
|
||||||
|
FileSystem fileSystem = FileSystem.get(conf);
|
||||||
|
|
||||||
|
makeTArArchive(fileSystem, inputPath, outputPath);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath) throws IOException {
|
||||||
|
|
||||||
|
RemoteIterator<LocatedFileStatus> dir_iterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
||||||
|
|
||||||
|
while (dir_iterator.hasNext()) {
|
||||||
|
LocatedFileStatus fileStatus = dir_iterator.next();
|
||||||
|
|
||||||
|
Path p = fileStatus.getPath();
|
||||||
|
String p_string = p.toString();
|
||||||
|
String entity = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||||
|
|
||||||
|
write(fileSystem, p_string, outputPath + "/" + entity + ".tar", entity);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dir_name)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
Path hdfsWritePath = new Path(outputPath);
|
||||||
|
FSDataOutputStream fsDataOutputStream = null;
|
||||||
|
if (fileSystem.exists(hdfsWritePath)) {
|
||||||
|
fileSystem.delete(hdfsWritePath, true);
|
||||||
|
|
||||||
|
}
|
||||||
|
fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
||||||
|
|
||||||
|
TarArchiveOutputStream ar = new TarArchiveOutputStream(fsDataOutputStream.getWrappedStream());
|
||||||
|
|
||||||
|
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||||
|
.listFiles(
|
||||||
|
new Path(inputPath), true);
|
||||||
|
|
||||||
|
while (fileStatusListIterator.hasNext()) {
|
||||||
|
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||||
|
|
||||||
|
Path p = fileStatus.getPath();
|
||||||
|
String p_string = p.toString();
|
||||||
|
if (!p_string.endsWith("_SUCCESS")) {
|
||||||
|
String name = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||||
|
TarArchiveEntry entry = new TarArchiveEntry(dir_name + "/" + name + ".json.gz");
|
||||||
|
entry.setSize(fileStatus.getLen());
|
||||||
|
ar.putArchiveEntry(entry);
|
||||||
|
|
||||||
|
InputStream is = fileSystem.open(fileStatus.getPath());
|
||||||
|
|
||||||
|
BufferedInputStream bis = new BufferedInputStream(is);
|
||||||
|
|
||||||
|
int count;
|
||||||
|
byte data[] = new byte[1024];
|
||||||
|
while ((count = bis.read(data, 0, data.length)) != -1) {
|
||||||
|
ar.write(data, 0, count);
|
||||||
|
}
|
||||||
|
bis.close();
|
||||||
|
ar.closeArchiveEntry();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ar.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue