package eu.dnetlib.dhp.actionmanager.project; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import java.util.Arrays; import java.util.HashMap; import java.util.Optional; import org.apache.commons.io.IOUtils; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.ObjectMapper; import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProgramme; import eu.dnetlib.dhp.actionmanager.project.csvutils.CSVProject; import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.common.HdfsSupport; import eu.dnetlib.dhp.schema.common.ModelSupport; import eu.dnetlib.dhp.schema.oaf.Programme; import eu.dnetlib.dhp.schema.oaf.Project; import eu.dnetlib.dhp.utils.DHPUtils; public class SparkAtomicActionJob { private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final HashMap programmeMap = new HashMap<>(); public static void main(String[] args) throws Exception { String jsonConfiguration = IOUtils .toString( SparkAtomicActionJob.class .getResourceAsStream( "/eu/dnetlib/dhp/actionmanager/project/action_set_parameters.json")); final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration); parser.parseArgument(args); Boolean isSparkSessionManaged = Optional .ofNullable(parser.get("isSparkSessionManaged")) .map(Boolean::valueOf) .orElse(Boolean.TRUE); log.info("isSparkSessionManaged: {}", isSparkSessionManaged); String projectPath = parser.get("projectPath"); log.info("projectPath: {}", projectPath); final String outputPath = parser.get("outputPath"); log.info("outputPath {}: ", outputPath); final String programmePath = parser.get("programmePath"); log.info("programmePath {}: ", programmePath); SparkConf conf = new SparkConf(); runWithSparkSession( conf, isSparkSessionManaged, spark -> { removeOutputDir(spark, outputPath); getAtomicActions( spark, projectPath, programmePath, outputPath); }); } private static void removeOutputDir(SparkSession spark, String path) { HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration()); } private static void getAtomicActions(SparkSession spark, String projectPatH, String programmePath, String outputPath) { Dataset project = readPath(spark, projectPatH, CSVProject.class); Dataset programme = readPath(spark, programmePath, CSVProgramme.class); project .joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left") .map(c -> { CSVProject csvProject = c._1(); Optional csvProgramme = Optional.ofNullable(c._2()); if (csvProgramme.isPresent()) { Project p = new Project(); p .setId( createOpenaireId( ModelSupport.entityIdPrefix.get("project"), "corda__h2020", csvProject.getId())); Programme pm = new Programme(); pm.setCode(csvProject.getProgramme()); pm.setDescription(csvProgramme.get().getShortTitle()); p.setProgramme(Arrays.asList(pm)); return p; } return null; }, Encoders.bean(Project.class)) .filter(p -> !(p == null)) // .map(p -> new AtomicAction<>(Project.class, p), Encoders.bean(AtomicAction.class)) .write() .option("compression", "gzip") .mode(SaveMode.Overwrite) .json(outputPath); } public static Dataset readPath( SparkSession spark, String inputPath, Class clazz) { return spark .read() .textFile(inputPath) .map((MapFunction) value -> OBJECT_MAPPER.readValue(value, clazz), Encoders.bean(clazz)); } public static String createOpenaireId( final String prefix, final String nsPrefix, final String id) { return String.format("%s|%s::%s", prefix, nsPrefix, DHPUtils.md5(id)); } }