dnet-hadoop/dhp-workflows/dhp-graph-mapper/src/main/java/eu/dnetlib/dhp/graph/SparkGraphImporterJob.java

65 lines
2.1 KiB
Java
Raw Normal View History

2019-10-24 16:00:28 +02:00
package eu.dnetlib.dhp.graph;
2019-10-25 10:23:51 +02:00
import eu.dnetlib.dhp.schema.oaf.Organization;
2019-10-24 16:00:28 +02:00
import org.apache.hadoop.io.Text;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
2019-10-25 09:24:18 +02:00
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
2019-10-24 16:00:28 +02:00
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
public class SparkGraphImporterJob {
public static void main(String[] args) throws Exception{
//TODO add argument parser
// final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/graph_importer_parameters.json")));
// parser.parseArgument(args);
final SparkSession spark = SparkSession
.builder()
.appName("ImportGraph")
//TODO replace with: master(parser.get("master"))
.master("local[16]")
.getOrCreate();
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
final String path = "file:///Users/miconis/Downloads/part-m-02236";
2019-10-25 09:45:12 +02:00
final JavaRDD<Tuple2<String, String>> inputRDD = sc.sequenceFile(path, Text.class, Text.class)
.map(item -> new Tuple2<>(item._1.toString(), item._2.toString()));
2019-10-25 11:58:20 +02:00
2019-10-25 11:56:28 +02:00
final String body = inputRDD.filter(s -> s._1().contains("20|") && s._1().split("@")[2].equalsIgnoreCase("body")).map(Tuple2::_2).first();
System.out.println(body);
2019-10-24 16:00:28 +02:00
final JavaRDD<Organization> organization = inputRDD
2019-10-24 16:00:28 +02:00
.filter(s -> s._1().split("@")[2].equalsIgnoreCase("body"))
2019-10-25 09:24:18 +02:00
.map(Tuple2::_2)
.map(ProtoConverter::convert)
2019-10-25 10:23:51 +02:00
.filter(s-> s instanceof Organization)
.map(s->(Organization)s);
final Encoder<Organization> encoder = Encoders.bean(Organization.class);
final Dataset<Organization> mdstore = spark.createDataset(organization.rdd(), encoder);
2019-10-25 09:24:18 +02:00
System.out.println(mdstore.count());
//
//
// .filter(s -> s instanceof Publication)
// .count();
2019-10-24 16:00:28 +02:00
}
}