Use SparkSQL in place of Hive for executing step16-createIndicatorsTables.sql of stats update wf
This commit is contained in:
parent
9e8fc6aa88
commit
078df0b4d1
|
@ -8,6 +8,11 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>dhp-stats-update</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-common</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -64,6 +64,26 @@
|
|||
<name>hadoop_user_name</name>
|
||||
<description>user name of the wf owner</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>sparkSqlWarehouseDir</name>
|
||||
</property>
|
||||
<!-- General oozie workflow properties -->
|
||||
<property>
|
||||
<name>sparkClusterOpts</name>
|
||||
<value>--conf spark.network.timeout=600 --conf spark.extraListeners= --conf spark.sql.queryExecutionListeners= --conf spark.yarn.historyServer.address=http://iis-cdh5-test-m3.ocean.icm.edu.pl:18088 --conf spark.eventLog.dir=hdfs://nameservice1/user/spark/applicationHistory</value>
|
||||
<description>spark cluster-wide options</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkResourceOpts</name>
|
||||
<value>--executor-memory=6G --conf spark.executor.memoryOverhead=4G --executor-cores=6 --driver-memory=8G --driver-cores=4</value>
|
||||
<description>spark resource options</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>sparkApplicationOpts</name>
|
||||
<value>--conf spark.sql.shuffle.partitions=3840</value>
|
||||
<description>spark resource options</description>
|
||||
</property>
|
||||
</parameters>
|
||||
|
||||
<global>
|
||||
|
@ -82,6 +102,10 @@
|
|||
<name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
|
||||
<value>0.80</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>oozie.action.sharelib.for.spark</name>
|
||||
<value>${oozieActionShareLibForSpark2}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapred.job.queue.name</name>
|
||||
<value>analytics</value>
|
||||
|
@ -322,12 +346,23 @@
|
|||
</action>
|
||||
|
||||
<action name="Step16-createIndicatorsTables">
|
||||
<hive2 xmlns="uri:oozie:hive2-action:0.1">
|
||||
<jdbc-url>${hive_jdbc_url}</jdbc-url>
|
||||
<script>scripts/step16-createIndicatorsTables.sql</script>
|
||||
<param>stats_db_name=${stats_db_name}</param>
|
||||
<param>external_stats_db_name=${external_stats_db_name}</param>
|
||||
</hive2>
|
||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||
<master>yarn</master>
|
||||
<mode>cluster</mode>
|
||||
<name>Step16-createIndicatorsTables</name>
|
||||
<class>eu.dnetlib.dhp.oozie.RunSQLSparkJob</class>
|
||||
<jar>dhp-stats-update-${projectVersion}.jar</jar>
|
||||
<spark-opts>
|
||||
--conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}
|
||||
${sparkClusterOpts}
|
||||
${sparkResourceOpts}
|
||||
${sparkApplicationOpts}
|
||||
</spark-opts>
|
||||
<arg>--hiveMetastoreUris</arg><arg>${hive_metastore_uris}</arg>
|
||||
<arg>--sql</arg><arg>eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql</arg>
|
||||
<arg>--stats_db_name</arg><arg>${stats_db_name}</arg>
|
||||
<arg>--external_stats_db_name</arg><arg>${external_stats_db_name}</arg>
|
||||
</spark>
|
||||
<ok to="Step16_1-definitions"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
|
@ -387,18 +422,18 @@
|
|||
<error to="Kill"/>
|
||||
</action>
|
||||
|
||||
<!-- <action name="step20-createMonitorDB-post">-->
|
||||
<!-- <shell xmlns="uri:oozie:shell-action:0.1">-->
|
||||
<!-- <job-tracker>${jobTracker}</job-tracker>-->
|
||||
<!-- <name-node>${nameNode}</name-node>-->
|
||||
<!-- <exec>monitor-post.sh</exec>-->
|
||||
<!-- <argument>${monitor_db_name}</argument>-->
|
||||
<!-- <argument>${monitor_db_shadow_name}</argument>-->
|
||||
<!-- <file>monitor-post.sh</file>-->
|
||||
<!-- </shell>-->
|
||||
<!-- <ok to="step21-createObservatoryDB-pre"/>-->
|
||||
<!-- <error to="Kill"/>-->
|
||||
<!-- </action>-->
|
||||
<!-- <action name="step20-createMonitorDB-post">-->
|
||||
<!-- <shell xmlns="uri:oozie:shell-action:0.1">-->
|
||||
<!-- <job-tracker>${jobTracker}</job-tracker>-->
|
||||
<!-- <name-node>${nameNode}</name-node>-->
|
||||
<!-- <exec>monitor-post.sh</exec>-->
|
||||
<!-- <argument>${monitor_db_name}</argument>-->
|
||||
<!-- <argument>${monitor_db_shadow_name}</argument>-->
|
||||
<!-- <file>monitor-post.sh</file>-->
|
||||
<!-- </shell>-->
|
||||
<!-- <ok to="step21-createObservatoryDB-pre"/>-->
|
||||
<!-- <error to="Kill"/>-->
|
||||
<!-- </action>-->
|
||||
|
||||
<action name="step21-createObservatoryDB-pre">
|
||||
<shell xmlns="uri:oozie:shell-action:0.1">
|
||||
|
@ -443,8 +478,8 @@
|
|||
<job-tracker>${jobTracker}</job-tracker>
|
||||
<name-node>${nameNode}</name-node>
|
||||
<exec>copyDataToImpalaCluster.sh</exec>
|
||||
<!-- <env-var>HADOOP_USER_NAME=${wf:user()}</env-var>-->
|
||||
<!-- <argument>${external_stats_db_name}</argument>-->
|
||||
<!-- <env-var>HADOOP_USER_NAME=${wf:user()}</env-var>-->
|
||||
<!-- <argument>${external_stats_db_name}</argument>-->
|
||||
<argument>${stats_db_name}</argument>
|
||||
<argument>${monitor_db_name}</argument>
|
||||
<argument>${observatory_db_name}</argument>
|
||||
|
|
Loading…
Reference in New Issue