BrBETA_dnet-hadoop/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/project/PrepareProjects.java

105 lines
3.1 KiB
Java
Raw Normal View History

package eu.dnetlib.dhp.actionmanager.project;
2020-05-19 18:42:50 +02:00
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
2020-05-28 10:07:00 +02:00
import java.util.*;
import org.apache.commons.io.IOUtils;
2020-05-19 18:42:50 +02:00
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
2020-05-19 18:42:50 +02:00
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme;
import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
2020-05-19 18:42:50 +02:00
import eu.dnetlib.dhp.common.HdfsSupport;
import scala.Tuple2;
2020-05-19 18:42:50 +02:00
public class PrepareProjects {
2020-05-19 18:42:50 +02:00
private static final Logger log = LoggerFactory.getLogger(PrepareProgramme.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final HashMap<String, CSVProgramme> programmeMap = new HashMap<>();
2020-05-19 18:42:50 +02:00
public static void main(String[] args) throws Exception {
2020-05-19 18:42:50 +02:00
String jsonConfiguration = IOUtils
.toString(
PrepareProjects.class
.getResourceAsStream(
"/eu/dnetlib/dhp/actionmanager/project/prepare_project_parameters.json"));
2020-05-19 18:42:50 +02:00
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
2020-05-19 18:42:50 +02:00
parser.parseArgument(args);
2020-05-19 18:42:50 +02:00
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
2020-05-19 18:42:50 +02:00
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
2020-05-19 18:42:50 +02:00
final String projectPath = parser.get("projectPath");
log.info("projectPath {}: ", projectPath);
2020-05-19 18:42:50 +02:00
final String outputPath = parser.get("outputPath");
log.info("outputPath {}: ", outputPath);
2020-05-19 18:42:50 +02:00
SparkConf conf = new SparkConf();
2020-05-19 18:42:50 +02:00
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
removeOutputDir(spark, outputPath);
exec(spark, projectPath, outputPath);
});
}
2020-05-19 18:42:50 +02:00
private static void removeOutputDir(SparkSession spark, String path) {
HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration());
}
2020-05-19 18:42:50 +02:00
private static void exec(SparkSession spark, String progjectPath, String outputPath) {
Dataset<CSVProject> project = readPath(spark, progjectPath, CSVProject.class);
project
.toJavaRDD()
.flatMap(p -> {
List<CSVProject> csvProjectList = new ArrayList<>();
String[] programme = p.getProgramme().split(";");
2020-05-28 10:07:00 +02:00
Arrays
.stream(programme)
.forEach(value -> {
2020-05-19 18:42:50 +02:00
CSVProject csvProject = new CSVProject();
2020-05-28 10:07:00 +02:00
csvProject.setProgramme(value);
csvProject.setId(p.getId());
2020-05-19 18:42:50 +02:00
csvProjectList.add(csvProject);
2020-05-28 10:07:00 +02:00
});
2020-05-19 18:42:50 +02:00
return csvProjectList.iterator();
})
.map(p -> OBJECT_MAPPER.writeValueAsString(p))
.saveAsTextFile(outputPath);
}
2020-05-19 18:42:50 +02:00
public static <R> Dataset<R> readPath(
SparkSession spark, String inputPath, Class<R> clazz) {
return spark
.read()
.textFile(inputPath)
.map((MapFunction<String, R>) value -> OBJECT_MAPPER.readValue(value, clazz), Encoders.bean(clazz));
}
}