forked from D-Net/dnet-hadoop
Merge branch 'master' of code-repo.d4science.org:D-Net/dnet-hadoop
This commit is contained in:
commit
7a2a466161
|
@ -15,13 +15,8 @@ public class SparkGraphImporterJob {
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphImporterJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
|
||||||
parser.parseArgument(args);
|
parser.parseArgument(args);
|
||||||
final SparkSession spark = SparkSession
|
|
||||||
.builder()
|
try(SparkSession spark = getSparkSession(parser)) {
|
||||||
.appName(SparkGraphImporterJob.class.getSimpleName())
|
|
||||||
.master(parser.get("master"))
|
|
||||||
.config("hive.metastore.uris", parser.get("hive_metastore_uris"))
|
|
||||||
.enableHiveSupport()
|
|
||||||
.getOrCreate();
|
|
||||||
|
|
||||||
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
|
||||||
final String inputPath = parser.get("sourcePath");
|
final String inputPath = parser.get("sourcePath");
|
||||||
|
@ -39,6 +34,16 @@ public class SparkGraphImporterJob {
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.saveAsTable(hiveDbName + "." + name);
|
.saveAsTable(hiveDbName + "." + name);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static SparkSession getSparkSession(ArgumentApplicationParser parser) {
|
||||||
|
return SparkSession
|
||||||
|
.builder()
|
||||||
|
.appName(SparkGraphImporterJob.class.getSimpleName())
|
||||||
|
.master(parser.get("master"))
|
||||||
|
.config("hive.metastore.uris", parser.get("hive_metastore_uris"))
|
||||||
|
.enableHiveSupport()
|
||||||
|
.getOrCreate();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
CREATE view result as
|
||||||
|
select id, dateofcollection, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, instance from ${hive_db_name}.publication p
|
||||||
|
union all
|
||||||
|
select id, dateofcollection, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, instance from ${hive_db_name}.dataset d
|
||||||
|
union all
|
||||||
|
select id, dateofcollection, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, instance from ${hive_db_name}.software s
|
||||||
|
union all
|
||||||
|
select id, dateofcollection, bestaccessright, datainfo, collectedfrom, pid, author, resulttype, language, country, subject, description, dateofacceptance, embargoenddate, resourcetype, context, instance from ${hive_db_name}.otherresearchproduct o;
|
|
@ -37,12 +37,29 @@
|
||||||
<name>MapGraphIntoDataFrame</name>
|
<name>MapGraphIntoDataFrame</name>
|
||||||
<class>eu.dnetlib.dhp.graph.SparkGraphImporterJob</class>
|
<class>eu.dnetlib.dhp.graph.SparkGraphImporterJob</class>
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
<spark-opts>--executor-memory ${sparkExecutorMemory} --executor-cores ${sparkExecutorCores} --driver-memory=${sparkDriverMemory} --conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener" --conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener" --conf spark.sql.warehouse.dir="/user/hive/warehouse"</spark-opts>
|
<spark-opts>
|
||||||
|
--executor-memory ${sparkExecutorMemory}
|
||||||
|
--executor-cores ${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
|
||||||
|
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
|
||||||
|
--conf spark.sql.warehouse.dir="/user/hive/warehouse"</spark-opts>
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
||||||
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||||
<arg>--hive_db_name</arg><arg>${hive_db_name}</arg>
|
<arg>--hive_db_name</arg><arg>${hive_db_name}</arg>
|
||||||
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
|
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
|
<ok to="PostProcessing"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="PostProcessing">
|
||||||
|
<hive xmlns="uri:oozie:hive-action:0.2">
|
||||||
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
<name-node>${nameNode}</name-node>
|
||||||
|
<script>/eu/dnetlib/dhp/graph/hive/postprocessing.sql</script>
|
||||||
|
<param>hive_db_name=${hive_db_name}</param>
|
||||||
|
</hive>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
|
@ -50,9 +50,9 @@
|
||||||
<class>eu.dnetlib.dhp.graph.SparkXmlRecordBuilderJob</class>
|
<class>eu.dnetlib.dhp.graph.SparkXmlRecordBuilderJob</class>
|
||||||
<jar>dhp-graph-provision-${projectVersion}.jar</jar>
|
<jar>dhp-graph-provision-${projectVersion}.jar</jar>
|
||||||
<spark-opts>
|
<spark-opts>
|
||||||
--executor-memory ${sparkExecutorMemory}
|
--executor-cores ${sparkExecutorCoresForJoining}
|
||||||
--executor-cores ${sparkExecutorCores}
|
--executor-memory ${sparkExecutorMemoryForJoining}
|
||||||
--driver-memory=${sparkDriverMemory}
|
--driver-memory=${sparkDriverMemoryForJoining}
|
||||||
--conf spark.dynamicAllocation.maxExecutors=${sparkExecutorCoresForJoining}
|
--conf spark.dynamicAllocation.maxExecutors=${sparkExecutorCoresForJoining}
|
||||||
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
|
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
|
||||||
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
|
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
|
||||||
|
@ -79,8 +79,9 @@
|
||||||
<class>eu.dnetlib.dhp.graph.SparkXmlIndexingJob</class>
|
<class>eu.dnetlib.dhp.graph.SparkXmlIndexingJob</class>
|
||||||
<jar>dhp-graph-provision-${projectVersion}.jar</jar>
|
<jar>dhp-graph-provision-${projectVersion}.jar</jar>
|
||||||
<spark-opts>
|
<spark-opts>
|
||||||
--executor-memory ${sparkExecutorMemory}
|
--executor-cores ${sparkExecutorCoresForIndexing}
|
||||||
--driver-memory=${sparkDriverMemory}
|
--executor-memory ${sparkExecutorMemoryForIndexing}
|
||||||
|
--driver-memory=${sparkDriverMemoryForIndexing}
|
||||||
--conf spark.dynamicAllocation.maxExecutors=${sparkExecutorCoresForIndexing}
|
--conf spark.dynamicAllocation.maxExecutors=${sparkExecutorCoresForIndexing}
|
||||||
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
|
--conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
|
||||||
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
|
--conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
|
||||||
|
|
Loading…
Reference in New Issue