1
0
Fork 0

unit test for SparkGraphImporterJob

This commit is contained in:
Claudio Atzori 2020-03-26 18:26:40 +01:00
parent abcd3f5bf5
commit 43cbcda7ef
7 changed files with 72 additions and 50 deletions

View File

@ -19,6 +19,11 @@
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>eu.dnetlib.dhp</groupId>

View File

@ -18,15 +18,26 @@ public class SparkGraphImporterJob {
"/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
parser.parseArgument(args);
new SparkGraphImporterJob().run(parser);
}
private void run(ArgumentApplicationParser parser) {
try(SparkSession spark = getSparkSession(parser)) {
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
final String inputPath = parser.get("sourcePath");
final String hiveDbName = parser.get("hive_db_name");
runWith(spark, inputPath, hiveDbName);
}
}
// public for testing
public void runWith(SparkSession spark, String inputPath, String hiveDbName) {
spark.sql(String.format("DROP DATABASE IF EXISTS %s CASCADE", hiveDbName));
spark.sql(String.format("CREATE DATABASE IF NOT EXISTS %s", hiveDbName));
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
// Read the input file and convert it into RDD of serializable object
GraphMappingUtils.types.forEach((name, clazz) -> spark.createDataset(sc.textFile(inputPath + "/" + name)
.map(s -> new ObjectMapper().readValue(s, clazz))
@ -35,12 +46,10 @@ public class SparkGraphImporterJob {
.mode(SaveMode.Overwrite)
.saveAsTable(hiveDbName + "." + name));
}
}
private static SparkSession getSparkSession(ArgumentApplicationParser parser) {
SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
return SparkSession
.builder()
.appName(SparkGraphImporterJob.class.getSimpleName())

View File

@ -59,10 +59,10 @@
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
--conf spark.sql.warehouse.dir="/user/hive/warehouse"
</spark-opts>
<arg>-mt</arg> <arg>yarn-cluster</arg>
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
<arg>--hive_db_name</arg><arg>${hive_db_name}</arg>
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
<arg>-mt</arg> <arg>yarn</arg>
<arg>-s</arg><arg>${sourcePath}</arg>
<arg>-db</arg><arg>${hive_db_name}</arg>
<arg>-h</arg><arg>${hive_metastore_uris}</arg>
</spark>
<ok to="PostProcessing"/>
<error to="Kill"/>

View File

@ -1,52 +1,54 @@
package eu.dnetlib.dhp.graph;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Encoders;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import scala.Tuple2;
import java.nio.file.Path;
import java.util.List;
import java.util.stream.Collectors;
public class SparkGraphImporterJobTest {
private static final long MAX = 1000L;
private final static String TEST_DB_NAME = "test";
@Disabled("must be parametrized to run locally")
public void testImport(@TempDir Path outPath) throws Exception {
SparkGraphImporterJob.main(new String[] {
"-mt", "local[*]",
"-s", getClass().getResource("/eu/dnetlib/dhp/graph/sample").getPath(),
"-h", "",
"-db", "test"
});
@Test
public void testImport(@TempDir Path outPath) {
try(SparkSession spark = testSparkSession(outPath.toString())) {
countEntities(outPath.toString()).forEach(t -> {
System.out.println(t);
Assertions.assertEquals(MAX, t._2().longValue(), String.format("mapped %s must be %s", t._1(), MAX));
new SparkGraphImporterJob().runWith(
spark,
getClass().getResource("/eu/dnetlib/dhp/graph/sample").getPath(),
TEST_DB_NAME);
GraphMappingUtils.types.forEach((name, clazz) -> {
final long count = spark.read().table(TEST_DB_NAME + "." + name).count();
if (name.equals("relation")) {
Assertions.assertEquals(100, count, String.format("%s should be 100", name));
} else {
Assertions.assertEquals(10, count, String.format("%s should be 10", name));
}
});
}
}
public static List<Tuple2<String, Long>> countEntities(final String inputPath) {
private SparkSession testSparkSession(final String inputPath) {
SparkConf conf = new SparkConf();
final SparkSession spark = SparkSession
conf.set("spark.driver.host", "localhost");
conf.set("hive.metastore.local", "true");
conf.set("hive.metastore.warehouse.dir", inputPath + "/warehouse");
conf.set("spark.sql.warehouse.dir", inputPath);
conf.set("javax.jdo.option.ConnectionURL", String.format("jdbc:derby:;databaseName=%s/junit_metastore_db;create=true", inputPath));
conf.set("spark.ui.enabled", "false");
return SparkSession
.builder()
.appName(SparkGraphImporterJobTest.class.getSimpleName())
.master("local[*]")
.config(conf)
.enableHiveSupport()
.getOrCreate();
//final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
}
return GraphMappingUtils.types.entrySet()
.stream()
.map(entry -> {
final Long count = spark.read().load(inputPath + "/" + entry.getKey()).as(Encoders.bean(entry.getValue())).count();
return new Tuple2<String, Long>(entry.getKey(), count);
})
.collect(Collectors.toList());
}
}

View File

@ -143,6 +143,12 @@
<version>${dhp.spark.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>${dhp.spark.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>