forked from D-Net/dnet-hadoop
reading graph dump as text files, encoded as newline-delimited JSON records, as indicated in the wiki
This commit is contained in:
parent
60aedb1110
commit
7b6f0c8756
|
@ -3,7 +3,7 @@ package eu.dnetlib.dhp.graph;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.spark.SparkConf;
|
||||||
import org.apache.spark.api.java.JavaSparkContext;
|
import org.apache.spark.api.java.JavaSparkContext;
|
||||||
import org.apache.spark.sql.Encoders;
|
import org.apache.spark.sql.Encoders;
|
||||||
import org.apache.spark.sql.SaveMode;
|
import org.apache.spark.sql.SaveMode;
|
||||||
|
@ -13,7 +13,9 @@ public class SparkGraphImporterJob {
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||||
|
IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream(
|
||||||
|
"/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
|
|
||||||
try(SparkSession spark = getSparkSession(parser)) {
|
try(SparkSession spark = getSparkSession(parser)) {
|
||||||
|
@ -26,23 +28,24 @@ public class SparkGraphImporterJob {
|
||||||
spark.sql(String.format("CREATE DATABASE IF NOT EXISTS %s", hiveDbName));
|
spark.sql(String.format("CREATE DATABASE IF NOT EXISTS %s", hiveDbName));
|
||||||
|
|
||||||
// Read the input file and convert it into RDD of serializable object
|
// Read the input file and convert it into RDD of serializable object
|
||||||
GraphMappingUtils.types.forEach((name, clazz) -> {
|
GraphMappingUtils.types.forEach((name, clazz) -> spark.createDataset(sc.textFile(inputPath + "/" + name)
|
||||||
spark.createDataset(sc.sequenceFile(inputPath + "/" + name, Text.class, Text.class)
|
.map(s -> new ObjectMapper().readValue(s, clazz))
|
||||||
.map(s -> new ObjectMapper().readValue(s._2().toString(), clazz))
|
|
||||||
.rdd(), Encoders.bean(clazz))
|
.rdd(), Encoders.bean(clazz))
|
||||||
.write()
|
.write()
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.saveAsTable(hiveDbName + "." + name);
|
.saveAsTable(hiveDbName + "." + name));
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static SparkSession getSparkSession(ArgumentApplicationParser parser) {
|
private static SparkSession getSparkSession(ArgumentApplicationParser parser) {
|
||||||
|
SparkConf conf = new SparkConf();
|
||||||
|
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
|
||||||
|
|
||||||
return SparkSession
|
return SparkSession
|
||||||
.builder()
|
.builder()
|
||||||
.appName(SparkGraphImporterJob.class.getSimpleName())
|
.appName(SparkGraphImporterJob.class.getSimpleName())
|
||||||
.master(parser.get("master"))
|
.master(parser.get("master"))
|
||||||
.config("hive.metastore.uris", parser.get("hive_metastore_uris"))
|
.config(conf)
|
||||||
.enableHiveSupport()
|
.enableHiveSupport()
|
||||||
.getOrCreate();
|
.getOrCreate();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue