generate action sets and saves them in the hdfs path for the actions sets

This commit is contained in:
Miriam Baglioni 2020-05-21 16:30:39 +02:00
parent 055eec5a77
commit 4589c428b1
2 changed files with 44 additions and 28 deletions

View File

@ -3,11 +3,16 @@ package eu.dnetlib.dhp.actionmanager.project;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.Optional; import java.util.Optional;
import java.util.function.Consumer;
import eu.dnetlib.dhp.schema.action.AtomicAction;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Dataset;
@ -28,6 +33,15 @@ import eu.dnetlib.dhp.schema.oaf.Programme;
import eu.dnetlib.dhp.schema.oaf.Project; import eu.dnetlib.dhp.schema.oaf.Project;
import eu.dnetlib.dhp.utils.DHPUtils; import eu.dnetlib.dhp.utils.DHPUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.SequenceFile;
import org.apache.spark.rdd.SequenceFileRDDFunctions;
import org.apache.hadoop.io.Text;
import scala.Function1;
import scala.Tuple2;
import scala.runtime.BoxedUnit;
public class SparkAtomicActionJob { public class SparkAtomicActionJob {
private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class); private static final Logger log = LoggerFactory.getLogger(SparkAtomicActionJob.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@ -61,6 +75,8 @@ public class SparkAtomicActionJob {
final String programmePath = parser.get("programmePath"); final String programmePath = parser.get("programmePath");
log.info("programmePath {}: ", programmePath); log.info("programmePath {}: ", programmePath);
final String nameNode = parser.get("hdfsNameNode");
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
runWithSparkSession( runWithSparkSession(
@ -72,7 +88,8 @@ public class SparkAtomicActionJob {
spark, spark,
projectPath, projectPath,
programmePath, programmePath,
outputPath); outputPath,
nameNode);
}); });
} }
@ -82,38 +99,38 @@ public class SparkAtomicActionJob {
private static void getAtomicActions(SparkSession spark, String projectPatH, private static void getAtomicActions(SparkSession spark, String projectPatH,
String programmePath, String programmePath,
String outputPath) { String outputPath,
String nameNode) throws Exception{
Dataset<CSVProject> project = readPath(spark, projectPatH, CSVProject.class); Dataset<CSVProject> project = readPath(spark, projectPatH, CSVProject.class);
Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class); Dataset<CSVProgramme> programme = readPath(spark, programmePath, CSVProgramme.class);
project project
.joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left") .joinWith(programme, project.col("programme").equalTo(programme.col("code")), "left")
.map(c -> { .map(c -> {
CSVProject csvProject = c._1(); CSVProject csvProject = c._1();
Optional<CSVProgramme> csvProgramme = Optional.ofNullable(c._2()); Optional<CSVProgramme> csvProgramme = Optional.ofNullable(c._2());
if (csvProgramme.isPresent()) { if (csvProgramme.isPresent()) {
Project p = new Project(); Project p = new Project();
p p
.setId( .setId(
createOpenaireId( createOpenaireId(
ModelSupport.entityIdPrefix.get("project"), ModelSupport.entityIdPrefix.get("project"),
"corda__h2020", csvProject.getId())); "corda__h2020", csvProject.getId()));
Programme pm = new Programme(); Programme pm = new Programme();
pm.setCode(csvProject.getProgramme()); pm.setCode(csvProject.getProgramme());
pm.setDescription(csvProgramme.get().getShortTitle()); pm.setDescription(csvProgramme.get().getShortTitle());
p.setProgramme(Arrays.asList(pm)); p.setProgramme(Arrays.asList(pm));
return p; return new AtomicAction<>(Project.class, p);
} }
return null;
}, Encoders.bean(AtomicAction.class))
.filter(aa -> !(aa == null))
.toJavaRDD()
.mapToPair(aa->new Tuple2<>(aa.getClazz().getCanonicalName(), OBJECT_MAPPER.writeValueAsString(aa)))
.saveAsHadoopFile(outputPath, Text.class, Text.class, null);
return null;
}, Encoders.bean(Project.class))
.filter(p -> !(p == null))
// .map(p -> new AtomicAction<>(Project.class, p), Encoders.bean(AtomicAction.class))
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(outputPath);
} }
public static <R> Dataset<R> readPath( public static <R> Dataset<R> readPath(

View File

@ -24,7 +24,6 @@
<fs> <fs>
<delete path='${outputPath}'/> <delete path='${outputPath}'/>
<mkdir path='${outputPath}'/> <mkdir path='${outputPath}'/>
<delete path="/tmp/h2020programme"/>
</fs> </fs>
<ok to="get_project_file"/> <ok to="get_project_file"/>
<error to="Kill"/> <error to="Kill"/>