2020-05-18 13:04:06 +02:00
|
|
|
|
|
|
|
package eu.dnetlib.dhp.actionmanager.project;
|
|
|
|
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
|
|
|
2020-05-19 18:42:50 +02:00
|
|
|
import java.util.Arrays;
|
|
|
|
import java.util.HashMap;
|
2020-05-28 10:00:45 +02:00
|
|
|
import java.util.Objects;
|
2020-05-18 13:04:06 +02:00
|
|
|
import java.util.Optional;
|
|
|
|
|
|
|
|
import org.apache.commons.io.IOUtils;
|
2020-09-23 17:31:15 +02:00
|
|
|
import org.apache.commons.lang3.StringUtils;
|
2020-05-22 15:26:57 +02:00
|
|
|
import org.apache.hadoop.io.Text;
|
|
|
|
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
|
2020-05-18 13:04:06 +02:00
|
|
|
import org.apache.spark.SparkConf;
|
2020-05-19 18:42:50 +02:00
|
|
|
import org.apache.spark.api.java.function.MapFunction;
|
2020-05-28 10:00:45 +02:00
|
|
|
import org.apache.spark.api.java.function.MapGroupsFunction;
|
2020-05-19 18:42:50 +02:00
|
|
|
import org.apache.spark.sql.Dataset;
|
|
|
|
import org.apache.spark.sql.Encoders;
|
2020-05-18 13:04:06 +02:00
|
|
|
import org.apache.spark.sql.SparkSession;
|
|
|
|
import org.slf4j.Logger;
|
|
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
|
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
|
|
|
2020-09-28 12:16:34 +02:00
|
|
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProgramme;
|
|
|
|
import eu.dnetlib.dhp.actionmanager.project.utils.CSVProject;
|
|
|
|
import eu.dnetlib.dhp.actionmanager.project.utils.EXCELTopic;
|
2020-05-18 13:04:06 +02:00
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
|
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
2020-05-22 15:26:57 +02:00
|
|
|
import eu.dnetlib.dhp.schema.action.AtomicAction;
|
2020-05-19 18:42:50 +02:00
|
|
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
2020-09-23 17:31:15 +02:00
|
|
|
import eu.dnetlib.dhp.schema.oaf.H2020Classification;
|
|
|
|
import eu.dnetlib.dhp.schema.oaf.H2020Programme;
|
2020-05-19 18:42:50 +02:00
|
|
|
import eu.dnetlib.dhp.schema.oaf.Project;
|
|
|
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
2020-05-21 16:30:39 +02:00
|
|
|
import scala.Tuple2;
|
|
|
|
|
2020-05-18 13:04:06 +02:00
|
|
|
public class SparkAtomicActionJob {
|
|
|
|
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class);
|
|
|
|
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
2020-05-19 18:42:50 +02:00
|
|
|
private static final HashMap<String, String> programmeMap = new HashMap<>();
|
2020-05-18 13:04:06 +02:00
|
|
|
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
|
|
|
|
|
|
String jsonConfiguration = IOUtils
|
|
|
|
.toString(
|
|
|
|
SparkAtomicActionJob.class
|
|
|
|
.getResourceAsStream(
|
|
|
|
"/eu/dnetlib/dhp/actionmanager/project/action_set_parameters.json"));
|
|
|
|
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
|
|
|
|
|
|
parser.parseArgument(args);
|
|
|
|
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
|
|
.map(Boolean::valueOf)
|
|
|
|
.orElse(Boolean.TRUE);
|
|
|
|
|
|
|
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
|
|
|
|
|
|
String projectPath = parser.get("projectPath");
|
|
|
|
log.info("projectPath: {}", projectPath);
|
|
|
|
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
|
|
log.info("outputPath {}: ", outputPath);
|
|
|
|
|
|
|
|
final String programmePath = parser.get("programmePath");
|
|
|
|
log.info("programmePath {}: ", programmePath);
|
|
|
|
|
2020-09-28 12:16:34 +02:00
|
|
|
final String topicPath = parser.get("topicPath");
|
|
|
|
log.info("topic path {}: ", topicPath);
|
|
|
|
|
2020-05-18 13:04:06 +02:00
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
|
|
|
|
runWithSparkSession(
|
|
|
|
conf,
|
|
|
|
isSparkSessionManaged,
|
|
|
|
spark -> {
|
|
|
|
removeOutputDir(spark, outputPath);
|
|
|
|
getAtomicActions(
|
|
|
|
spark,
|
|
|
|
projectPath,
|
|
|
|
programmePath,
|
2020-09-28 12:16:34 +02:00
|
|
|
topicPath,
|
2020-05-22 15:26:57 +02:00
|
|
|
outputPath);
|
2020-05-18 13:04:06 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void removeOutputDir(SparkSession spark, String path) {
|
|
|
|
HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration());
|
|
|
|
}
|
|
|
|
|
2020-05-19 18:42:50 +02:00
|
|
|
private static void getAtomicActions(SparkSession spark, String projectPatH,
|
|
|
|
String programmePath,
|
2020-09-28 12:16:34 +02:00
|
|
|
String topicPath,
|
2020-05-22 15:26:57 +02:00
|
|
|
String outputPath) {
|
2020-05-18 13:04:06 +02:00
|
|
|
|
2020-05-19 18:42:50 +02:00
|
|
|
Dataset<CSVProject> project = readPath(spark, projectPatH, CSVProject.class);
|
|
|
|
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
|
2020-09-28 12:16:34 +02:00
|
|
|
Dataset<EXCELTopic> topic = readPath(spark, topicPath, EXCELTopic.class);
|
2020-05-19 18:42:50 +02:00
|
|
|
|
|
|
|
project
|
2020-05-22 15:26:57 +02:00
|
|
|
.joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left")
|
2020-09-28 12:16:34 +02:00
|
|
|
.joinWith(topic, project.col("topics").equalTo(topic.col("code")), "left")
|
|
|
|
.map((MapFunction<Tuple2<Tuple2<CSVProject, CSVProgramme>, EXCELTopic>, Project>) c -> {
|
|
|
|
Tuple2<CSVProject, CSVProgramme> projectprogramme = c._1();
|
|
|
|
CSVProject csvProject = projectprogramme._1();
|
|
|
|
Optional<CSVProgramme> ocsvProgramme = Optional.ofNullable(projectprogramme._2());
|
|
|
|
|
|
|
|
String topicdescription = Optional
|
|
|
|
.ofNullable(c._2())
|
|
|
|
.map(t -> t.getTitle())
|
|
|
|
.orElse(null);
|
|
|
|
|
|
|
|
Project p = Optional
|
|
|
|
.ofNullable(projectprogramme._2())
|
|
|
|
.map(csvProgramme -> {
|
|
|
|
Project pp = new Project();
|
|
|
|
pp
|
|
|
|
.setId(
|
|
|
|
createOpenaireId(
|
|
|
|
ModelSupport.entityIdPrefix.get("project"),
|
|
|
|
"corda__h2020", csvProject.getId()));
|
|
|
|
pp.setH2020topiccode(csvProject.getTopics());
|
|
|
|
H2020Programme pm = new H2020Programme();
|
|
|
|
H2020Classification h2020classification = new H2020Classification();
|
|
|
|
pm.setCode(csvProject.getProgramme());
|
|
|
|
if (StringUtils.isNotEmpty(csvProgramme.getShortTitle())) {
|
|
|
|
pm.setDescription(csvProgramme.getShortTitle());
|
|
|
|
} else {
|
|
|
|
pm.setDescription(csvProgramme.getTitle());
|
|
|
|
}
|
|
|
|
h2020classification.setClassification(ocsvProgramme.get().getClassification());
|
|
|
|
setLevels(h2020classification, ocsvProgramme.get().getClassification());
|
|
|
|
h2020classification.setH2020Programme(pm);
|
|
|
|
pp.setH2020classification(Arrays.asList(h2020classification));
|
|
|
|
if (topicdescription != null) {
|
|
|
|
pp.setH2020topicdescription(topicdescription);
|
|
|
|
}
|
|
|
|
return pp;
|
|
|
|
})
|
|
|
|
.orElse(null);
|
|
|
|
|
|
|
|
return p;
|
2020-05-22 15:26:57 +02:00
|
|
|
}, Encoders.bean(Project.class))
|
2020-05-28 10:00:45 +02:00
|
|
|
.filter(Objects::nonNull)
|
|
|
|
.groupByKey(
|
|
|
|
(MapFunction<Project, String>) p -> p.getId(),
|
|
|
|
Encoders.STRING())
|
|
|
|
.mapGroups((MapGroupsFunction<String, Project, Project>) (s, it) -> {
|
|
|
|
Project first = it.next();
|
|
|
|
it.forEachRemaining(p -> {
|
|
|
|
first.mergeFrom(p);
|
|
|
|
});
|
|
|
|
return first;
|
|
|
|
}, Encoders.bean(Project.class))
|
2020-05-22 15:26:57 +02:00
|
|
|
.toJavaRDD()
|
|
|
|
.map(p -> new AtomicAction(Project.class, p))
|
|
|
|
.mapToPair(
|
|
|
|
aa -> new Tuple2<>(new Text(aa.getClazz().getCanonicalName()),
|
|
|
|
new Text(OBJECT_MAPPER.writeValueAsString(aa))))
|
|
|
|
.saveAsHadoopFile(outputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
|
2020-05-21 16:30:39 +02:00
|
|
|
|
2020-05-19 18:42:50 +02:00
|
|
|
}
|
|
|
|
|
2020-09-23 17:31:15 +02:00
|
|
|
private static void setLevels(H2020Classification h2020Classification, String classification) {
|
2020-09-25 13:32:34 +02:00
|
|
|
String[] tmp = classification.split(" \\| ");
|
2020-09-23 17:31:15 +02:00
|
|
|
h2020Classification.setLevel1(tmp[0]);
|
|
|
|
if (tmp.length > 1) {
|
|
|
|
h2020Classification.setLevel2(tmp[1]);
|
|
|
|
}
|
|
|
|
if (tmp.length > 2) {
|
|
|
|
h2020Classification.setLevel3(tmp[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-19 18:42:50 +02:00
|
|
|
public static <R> Dataset<R> readPath(
|
|
|
|
SparkSession spark, String inputPath, Class<R> clazz) {
|
|
|
|
return spark
|
|
|
|
.read()
|
|
|
|
.textFile(inputPath)
|
|
|
|
.map((MapFunction<String, R>) value -> OBJECT_MAPPER.readValue(value, clazz), Encoders.bean(clazz));
|
|
|
|
}
|
|
|
|
|
|
|
|
public static String createOpenaireId(
|
|
|
|
final String prefix, final String nsPrefix, final String id) {
|
|
|
|
|
|
|
|
return String.format("%s|%s::%s", prefix, nsPrefix, DHPUtils.md5(id));
|
|
|
|
|
2020-05-18 13:04:06 +02:00
|
|
|
}
|
|
|
|
}
|