2019-10-24 16:00:28 +02:00
package eu.dnetlib.dhp.graph ;
2019-10-25 10:23:51 +02:00
import eu.dnetlib.dhp.schema.oaf.Organization ;
2019-10-24 16:00:28 +02:00
import org.apache.hadoop.io.Text ;
import org.apache.spark.api.java.JavaRDD ;
import org.apache.spark.api.java.JavaSparkContext ;
2019-10-25 09:24:18 +02:00
import org.apache.spark.sql.Dataset ;
import org.apache.spark.sql.Encoder ;
import org.apache.spark.sql.Encoders ;
2019-10-24 16:00:28 +02:00
import org.apache.spark.sql.SparkSession ;
import scala.Tuple2 ;
public class SparkGraphImporterJob {
public static void main ( String [ ] args ) throws Exception {
//TODO add argument parser
// final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/graph_importer_parameters.json")));
// parser.parseArgument(args);
final SparkSession spark = SparkSession
. builder ( )
. appName ( " ImportGraph " )
//TODO replace with: master(parser.get("master"))
. master ( " local[16] " )
. getOrCreate ( ) ;
final JavaSparkContext sc = new JavaSparkContext ( spark . sparkContext ( ) ) ;
2019-10-25 10:57:19 +02:00
final String path = " file:///Users/miconis/Downloads/part-m-02236 " ;
2019-10-25 09:45:12 +02:00
final JavaRDD < Tuple2 < String , String > > inputRDD = sc . sequenceFile ( path , Text . class , Text . class )
. map ( item - > new Tuple2 < > ( item . _1 . toString ( ) , item . _2 . toString ( ) ) ) ;
2019-10-25 11:58:20 +02:00
2019-10-25 11:56:28 +02:00
final String body = inputRDD . filter ( s - > s . _1 ( ) . contains ( " 20| " ) & & s . _1 ( ) . split ( " @ " ) [ 2 ] . equalsIgnoreCase ( " body " ) ) . map ( Tuple2 : : _2 ) . first ( ) ;
System . out . println ( body ) ;
2019-10-24 16:00:28 +02:00
2019-10-25 11:54:14 +02:00
final JavaRDD < Organization > organization = inputRDD
2019-10-24 16:00:28 +02:00
. filter ( s - > s . _1 ( ) . split ( " @ " ) [ 2 ] . equalsIgnoreCase ( " body " ) )
2019-10-25 09:24:18 +02:00
. map ( Tuple2 : : _2 )
. map ( ProtoConverter : : convert )
2019-10-25 10:23:51 +02:00
. filter ( s - > s instanceof Organization )
. map ( s - > ( Organization ) s ) ;
final Encoder < Organization > encoder = Encoders . bean ( Organization . class ) ;
2019-10-25 11:54:14 +02:00
final Dataset < Organization > mdstore = spark . createDataset ( organization . rdd ( ) , encoder ) ;
2019-10-24 17:02:35 +02:00
2019-10-25 09:24:18 +02:00
System . out . println ( mdstore . count ( ) ) ;
2019-10-24 17:02:35 +02:00
//
//
// .filter(s -> s instanceof Publication)
// .count();
2019-10-24 16:00:28 +02:00
}
}