Historical Snapshots Workflow

Create historical snapshots db with parameters:

hist_db_name=openaire_beta_historical_snapshots_xxx
hist_db_name_prev=openaire_beta_historical_snapshots_xxx (previous run of wf)
stats_db_name=openaire_beta_stats_xxx
stats_irish_db_name=openaire_beta_stats_monitor_ie_xxx
monitor_db_name=openaire_beta_stats_monitor_xxx
monitor_db_prod_name=openaire_beta_stats_monitor
monitor_irish_db_name=openaire_beta_stats_monitor_ie_xxx
monitor_irish_db_prod_name=openaire_beta_stats_monitor_ie
hist_db_prod_name=openaire_beta_historical_snapshots
hist_db_shadow_name=openaire_beta_historical_snapshots_shadow
hist_date=122023
hive_timeout=150000
hadoop_user_name=xxx
resumeFrom=CreateDB
This commit is contained in:
dimitrispie 2024-01-04 15:11:04 +02:00
parent ffdd03d2f4
commit 75bfde043c
9 changed files with 627 additions and 0 deletions

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>dhp-workflows</artifactId>
<groupId>eu.dnetlib.dhp</groupId>
<version>1.2.4-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>dhp-stats-hist-snaps</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>pl.project13.maven</groupId>
<artifactId>git-commit-id-plugin</artifactId>
<version>2.1.11</version>
<configuration>
<failOnNoGitDirectory>false</failOnNoGitDirectory>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,30 @@
<configuration>
<property>
<name>jobTracker</name>
<value>${jobTracker}</value>
</property>
<property>
<name>nameNode</name>
<value>${nameNode}</value>
</property>
<property>
<name>oozie.use.system.libpath</name>
<value>true</value>
</property>
<property>
<name>oozie.action.sharelib.for.spark</name>
<value>spark2</value>
</property>
<property>
<name>hive_metastore_uris</name>
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
</property>
<property>
<name>hive_jdbc_url</name>
<value>jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000/;UseNativeQuery=1;?spark.executor.memory=22166291558;spark.yarn.executor.memoryOverhead=3225;spark.driver.memory=15596411699;spark.yarn.driver.memoryOverhead=1228</value>
</property>
<property>
<name>oozie.wf.workflow.notification.url</name>
<value>{serviceUrl}/v1/oozieNotification/jobUpdate?jobId=$jobId%26status=$status</value>
</property>
</configuration>

View File

@ -0,0 +1,71 @@
export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs
export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami)
if ! [ -L $link_folder ]
then
rm -Rf "$link_folder"
ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder}
fi
export HADOOP_USER_NAME=$2
function copydb() {
db=$1
FILE=("hive_wf_tmp_"$RANDOM)
hdfs dfs -mkdir hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/
# change ownership to impala
# hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/${db}.db
hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/
# copy the databases from ocean to impala
echo "copying $db"
hadoop distcp -Dmapreduce.map.memory.mb=6144 -pb hdfs://nameservice1/user/hive/warehouse/${db}.db hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/
hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/${db}.db
# drop tables from db
for i in `impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`;
do
`impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "drop table $i;"`;
done
# drop views from db
for i in `impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`;
do
`impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "drop view $i;"`;
done
# delete the database
impala-shell -i impala-cluster-dn1.openaire.eu -q "drop database if exists ${db} cascade";
# create the databases
impala-shell -i impala-cluster-dn1.openaire.eu -q "create database ${db}";
impala-shell -q "INVALIDATE METADATA"
echo "creating schema for ${db}"
for (( k = 0; k < 5; k ++ )); do
for i in `impala-shell -d ${db} --delimited -q "show tables"`;
do
impala-shell -d ${db} --delimited -q "show create table $i";
done | sed 's/"$/;/' | sed 's/^"//' | sed 's/[[:space:]]\date[[:space:]]/`date`/g' | impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -c -f -
done
# load the data from /tmp in the respective tables
echo "copying data in tables and computing stats"
for i in `impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`;
do
impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "load data inpath '/tmp/$FILE/${db}.db/$i' into table $i";
impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "compute stats $i";
done
# deleting the remaining directory from hdfs
hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -rm -R /tmp/$FILE/${db}.db
}
MONITOR_DB=$1
#HADOOP_USER_NAME=$2
copydb $MONITOR_DB

View File

@ -0,0 +1,43 @@
export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs
export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami)
if ! [ -L $link_folder ]
then
rm -Rf "$link_folder"
ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder}
fi
SOURCE=$1
PRODUCTION=$2
SHADOW=$3
MONITOR=$4
MONITOR_PROD=$5
MONITOR_IRISH_PROD=$6
MONITOR_IRISH=$7
echo ${SOURCE}
echo ${PRODUCTION}
#echo "Updating ${PRODUCTION} monitor database old cluster"
#impala-shell -q "create database if not exists ${PRODUCTION}"
#impala-shell -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -c -f -
#impala-shell -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -c -f -
echo "Updating ${PRODUCTION} historical snapshots database"
impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${PRODUCTION}"
impala-shell -i impala-cluster-dn1.openaire.eu -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f -
impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f -
echo "Production monitor db ready!"
impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_PROD}.historical_snapshots"
impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_PROD}.historical_snapshots_fos"
impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_PROD}.historical_snapshots as select * from ${SOURCE}.historical_snapshots"
impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_PROD}.historical_snapshots_fos as select * from ${SOURCE}.historical_snapshots_fos"
impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_IRISH_PROD}.historical_snapshots_irish"
impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_IRISH_PROD}.historical_snapshots_irish_fos"
impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_IRISH_PROD}.historical_snapshots_irish as select * from ${SOURCE}.historical_snapshots_irish"
impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_IRISH_PROD}.historical_snapshots_irish_fos as select * from ${SOURCE}.historical_snapshots_irish"

View File

@ -0,0 +1,27 @@
export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs
export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami)
if ! [ -L $link_folder ]
then
rm -Rf "$link_folder"
ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder}
fi
export SOURCE=$1
export TARGET=$2
export SHADOW=$3
export SCRIPT_PATH=$4
export HIVE_OPTS="-hiveconf mapred.job.queue.name=analytics -hiveconf hive.spark.client.connect.timeout=120000ms -hiveconf hive.spark.client.server.connect.timeout=300000ms -hiveconf spark.executor.memory=19166291558 -hiveconf spark.yarn.executor.memoryOverhead=3225 -hiveconf spark.driver.memory=11596411699 -hiveconf spark.yarn.driver.memoryOverhead=1228"
export HADOOP_USER_NAME="oozie"
echo "Getting file from " $4
hdfs dfs -copyToLocal $4
#update Monitor DB IRISH
#cat CreateDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" | sed "s/GRAPHDB/$3/g1" > foo
cat buildIrishMonitorDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" > foo
hive $HIVE_OPTS -f foo
echo "Hive shell finished"

View File

@ -0,0 +1,82 @@
INSERT INTO ${hist_db_name}.historical_snapshots_fos_tmp
SELECT * FROM ${hist_db_name_prev}.historical_snapshots_fos;
INSERT INTO ${hist_db_name}.historical_snapshots_fos_tmp
select
cast(${hist_date} as STRING),
count(distinct r.id),
r.type,
rf.lvl1,
rf.lvl2,
pf.publicly_funded,
r.access_mode,
r.gold,
r.green,
coalesce(gl.green_with_license,0),
h.is_hybrid,
b.is_bronze_oa,
d.in_diamond_journal,
t.is_transformative,
pr.refereed
from ${stats_db_name}.result r
left outer join ${stats_db_name}.result_fos rf on rf.id=r.id
left outer join ${stats_db_name}.indi_pub_publicly_funded pf on pf.id=r.id
left outer join ${stats_db_name}.indi_pub_green_with_license gl on gl.id=r.id
left outer join ${stats_db_name}.indi_pub_bronze_oa b on b.id=r.id
left outer join ${stats_db_name}.indi_pub_diamond d on d.id=r.id
left outer join ${stats_db_name}.indi_pub_in_transformative t on t.id=r.id
left outer join ${stats_db_name}.indi_pub_hybrid h on h.id=r.id
left outer join ${stats_db_name}.result_refereed pr on pr.id=r.id
group by r.green, r.gold, r.access_mode, r.type, rf.lvl1,rf.lvl2, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed;
drop table if exists ${hist_db_name}.historical_snapshots_fos purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_fos STORED AS PARQUET AS
SELECT * FROM ${hist_db_name}.historical_snapshots_fos_tmp;
drop table if exists ${monitor_db_name}.historical_snapshots_fos purge;
create table ${monitor_db_name}.historical_snapshots_fos stored as parquet
as select * from ${hist_db_name}.historical_snapshots_fos;
drop table ${hist_db_name}.historical_snapshots_fos_tmp purge;
INSERT INTO ${hist_db_name}.historical_snapshots_tmp as
SELECT * FROM ${hist_db_name_prev}.historical_snapshots;
INSERT INTO ${hist_db_name}.historical_snapshots_tmp
select
cast(${hist_date} as STRING),
count(distinct r.id),
r.type,
pf.publicly_funded,
r.access_mode,
r.gold,
r.green,
coalesce(gl.green_with_license,0),
h.is_hybrid,
b.is_bronze_oa,
d.in_diamond_journal,
t.is_transformative,
pr.refereed
from ${stats_db_name}.result r
left outer join ${stats_db_name}.indi_pub_publicly_funded pf on pf.id=r.id
left outer join ${stats_db_name}.indi_pub_green_with_license gl on gl.id=r.id
left outer join ${stats_db_name}.indi_pub_bronze_oa b on b.id=r.id
left outer join ${stats_db_name}.indi_pub_diamond d on d.id=r.id
left outer join ${stats_db_name}.indi_pub_in_transformative t on t.id=r.id
left outer join ${stats_db_name}.indi_pub_hybrid h on h.id=r.id
left outer join ${stats_db_name}.result_refereed pr on pr.id=r.id
group by r.green, r.gold, r.access_mode, r.type, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed;
drop table if exists ${hist_db_name}.historical_snapshots purge;
CREATE TABLE ${hist_db_name}.historical_snapshots STORED AS PARQUET AS
SELECT * FROM ${hist_db_name}.historical_snapshots_tmp;
drop table if exists ${monitor_db_name}.historical_snapshots purge;
create table ${monitor_db_name}.historical_snapshots stored as parquet
as select * from ${hist_db_name}.historical_snapshots;
drop table ${hist_db_name}.historical_snapshots_tmp purge;

View File

@ -0,0 +1,91 @@
INSERT INTO ${hist_db_name}.historical_snapshots_fos_irish_tmp
SELECT * FROM ${hist_db_name_prev}.historical_snapshots_irish_fos;
INSERT INTO ${hist_db_name}.historical_snapshots_fos_irish_tmp
select
cast(${hist_date} as STRING),
count(distinct r.id),
r.type,
rf.lvl1,
rf.lvl2,
pf.publicly_funded,
r.access_mode,
r.gold,
r.green,
coalesce(gl.green_with_license,0),
h.is_hybrid,
b.is_bronze_oa,
d.in_diamond_journal,
t.is_transformative,
pr.refereed
from ${stats_irish_db_name}.result r
left outer join ${stats_irish_db_name}.result_fos rf on rf.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_publicly_funded pf on pf.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_green_with_license gl on gl.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_bronze_oa b on b.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_diamond d on d.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_in_transformative t on t.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_hybrid h on h.id=r.id
left outer join ${stats_irish_db_name}.result_refereed pr on pr.id=r.id
group by r.green, r.gold, r.access_mode, r.type, rf.lvl1,rf.lvl2, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed;
drop table if exists ${hist_db_name}.historical_snapshots_irish_fos purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_irish_fos STORED AS PARQUET AS
SELECT * FROM ${hist_db_name}.historical_snapshots_fos_irish_tmp;
drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish_fos purge;
create table ${monitor_irish_db_name}.historical_snapshots_irish_fos stored as parquet
as select * from ${hist_db_name}.historical_snapshots_irish_fos;
drop table ${hist_db_name}.historical_snapshots_fos_irish_tmp purge;
INSERT INTO ${hist_db_name}.historical_snapshots_irish_tmp
SELECT * FROM ${hist_db_name_prev}.historical_snapshots_irish;
INSERT INTO ${hist_db_name}.historical_snapshots_irish_tmp
select
cast(${hist_date} as STRING),
count(distinct r.id),
r.type,
pf.publicly_funded,
r.access_mode,
r.gold,
r.green,
coalesce(gl.green_with_license,0),
h.is_hybrid,
b.is_bronze_oa,
d.in_diamond_journal,
t.is_transformative,
pr.refereed
from ${stats_irish_db_name}.result r
left outer join ${stats_irish_db_name}.indi_pub_publicly_funded pf on pf.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_green_with_license gl on gl.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_bronze_oa b on b.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_diamond d on d.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_in_transformative t on t.id=r.id
left outer join ${stats_irish_db_name}.indi_pub_hybrid h on h.id=r.id
left outer join ${stats_irish_db_name}.result_refereed pr on pr.id=r.id
group by r.green, r.gold, r.access_mode, r.type, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed;
drop table if exists ${hist_db_name}.historical_snapshots_irish purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_irish STORED AS PARQUET AS
SELECT * FROM ${hist_db_name}.historical_snapshots_irish_tmp;
drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish purge;
create table ${monitor_irish_db_name}.historical_snapshots_irish stored as parquet
as select * from ${hist_db_name}.historical_snapshots_irish;
drop table ${hist_db_name}.historical_snapshots_irish_tmp purge;
drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish_fos purge;
create table ${monitor_irish_db_name}.historical_snapshots_irish_fos stored as parquet
as select * from ${hist_db_name}.historical_snapshots_irish_fos;
drop table ${hist_db_name}.historical_snapshots_fos_irish_tmp purge;

View File

@ -0,0 +1,92 @@
--------------------------------------------------------------
--------------------------------------------------------------
-- Historical Snapshots database creation
--------------------------------------------------------------
--------------------------------------------------------------
DROP database IF EXISTS ${hist_db_name} CASCADE;
CREATE database ${hist_db_name};
drop table if exists ${hist_db_name}.historical_snapshots_fos_tmp purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_fos_tmp
(
hist_date STRING,
total INT,
type STRING,
lvl1 STRING,
lvl2 STRING,
publicly_funded INT,
accessrights STRING,
gold INT,
green INT,
green_with_license INT,
hybrid INT,
bronze INT,
diamond INT,
transformative INT,
peer_reviewed STRING
)
CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true');
drop table if exists ${hist_db_name}.historical_snapshots_fos_irish_tmp purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_fos_irish_tmp
(
hist_date STRING,
total INT,
type STRING,
lvl1 STRING,
lvl2 STRING,
publicly_funded INT,
accessrights STRING,
gold INT,
green INT,
green_with_license INT,
hybrid INT,
bronze INT,
diamond INT,
transformative INT,
peer_reviewed STRING
)
CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true');
drop table if exists ${hist_db_name}.historical_snapshots_tmp purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_tmp
(
hist_date STRING,
total INT,
type STRING,
publicly_funded INT,
accessrights STRING,
gold INT,
green INT,
green_with_license INT,
hybrid INT,
bronze INT,
diamond INT,
transformative INT,
peer_reviewed STRING
)
CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true');
drop table if exists ${hist_db_name}.historical_snapshots_irish_tmp purge;
CREATE TABLE ${hist_db_name}.historical_snapshots_irish_tmp
(
hist_date STRING,
total INT,
type STRING,
publicly_funded INT,
accessrights STRING,
gold INT,
green INT,
green_with_license INT,
hybrid INT,
bronze INT,
diamond INT,
transformative INT,
peer_reviewed STRING
)
CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true');

View File

@ -0,0 +1,159 @@
<workflow-app name="Stats Hist Snapshots" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>hist_db_name</name>
<description>the target hist database name</description>
</property>
<property>
<name>hist_db_name_prev</name>
<description>the hist database name of previous_month</description>
</property>
<property>
<name>stats_db_name</name>
<description>the stats db name</description>
</property>
<property>
<name>stats_irish_db_name</name>
<description>the stats irish db name</description>
</property>
<property>
<name>monitor_db_name</name>
<description>the monitor db name</description>
</property>
<property>
<name>monitor_irish_db_name</name>
<description>the irish monitor db name</description>
</property>
<property>
<name>hist_db_prod_name</name>
<description>the production db</description>
</property>
<property>
<name>hist_db_shadow_name</name>
<description>the production shadow db</description>
</property>
<property>
<name>hist_date</name>
<description>the snaps date</description>
</property>
<property>
<name>hive_metastore_uris</name>
<description>hive server metastore URIs</description>
</property>
<property>
<name>hive_jdbc_url</name>
<description>hive server jdbc url</description>
</property>
<property>
<name>hive_timeout</name>
<description>the time period, in seconds, after which Hive fails a transaction if a Hive client has not sent a hearbeat. The default value is 300 seconds.</description>
</property>
<property>
<name>hadoop_user_name</name>
<description>user name of the wf owner</description>
</property>
</parameters>
<global>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>hive.metastore.uris</name>
<value>${hive_metastore_uris}</value>
</property>
<property>
<name>hive.txn.timeout</name>
<value>${hive_timeout}</value>
</property>
<property>
<name>mapred.job.queue.name</name>
<value>analytics</value>
</property>
</configuration>
</global>
<start to="resume_from"/>
<decision name="resume_from">
<switch>
<case to="CreateDB">${wf:conf('resumeFrom') eq 'CreateDB'}</case>
<case to="BuildHistSnaps">${wf:conf('resumeFrom') eq 'BuildHistSnaps'}</case>
<case to="BuildHistSnapsIrish">${wf:conf('resumeFrom') eq 'BuildHistSnapsIrish'}</case>
<case to="Step2-copyDataToImpalaCluster">${wf:conf('resumeFrom') eq 'Step2-copyDataToImpalaCluster'}</case>
<case to="Step3-finalizeImpalaCluster">${wf:conf('resumeFrom') eq 'Step3-finalizeImpalaCluster'}</case>
<default to="BuildHistSnaps"/>
</switch>
</decision>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="CreateDB">
<hive2 xmlns="uri:oozie:hive2-action:0.1">
<jdbc-url>${hive_jdbc_url}</jdbc-url>
<script>scripts/CreateDB.sql</script>
<param>hist_db_name=${hist_db_name}</param>
</hive2>
<ok to="BuildHistSnaps"/>
<error to="Kill"/>
</action>
<action name="BuildHistSnaps">
<hive2 xmlns="uri:oozie:hive2-action:0.1">
<jdbc-url>${hive_jdbc_url}</jdbc-url>
<script>scripts/BuildHistSnapsAll.sql</script>
<param>hist_db_name=${hist_db_name}</param>
<param>hist_db_name_prev=${hist_db_name_prev}</param>
<param>stats_db_name=${stats_db_name}</param>
<param>monitor_db_name=${monitor_db_name}</param>
<param>hist_date=${hist_date}</param>
</hive2>
<ok to="BuildHistSnapsIrish"/>
<error to="Kill"/>
</action>
<action name="BuildHistSnapsIrish">
<hive2 xmlns="uri:oozie:hive2-action:0.1">
<jdbc-url>${hive_jdbc_url}</jdbc-url>
<script>scripts/BuildHistSnapsIrish.sql</script>
<param>hist_db_name=${hist_db_name}</param>
<param>hist_db_name_prev=${hist_db_name_prev}</param>
<param>stats_irish_db_name=${stats_irish_db_name}</param>
<param>monitor_irish_db_name=${monitor_irish_db_name}</param>
<param>hist_date=${hist_date}</param>
</hive2>
<ok to="Step2-copyDataToImpalaCluster"/>
<error to="Kill"/>
</action>
<action name="Step2-copyDataToImpalaCluster">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>copyDataToImpalaCluster.sh</exec>
<argument>${hist_db_name}</argument>
<argument>${hadoop_user_name}</argument>
<file>copyDataToImpalaCluster.sh</file>
</shell>
<ok to="Step3-finalizeImpalaCluster"/>
<error to="Kill"/>
</action>
<action name="Step3-finalizeImpalaCluster">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>finalizeImpalaCluster.sh</exec>
<argument>${hist_db_name}</argument>
<argument>${hist_db_prod_name}</argument>
<argument>${monitor_db_name}</argument>
<argument>${monitor_db_name}</argument>
<argument>${monitor_irish_db_name}</argument>
<file>finalizeImpalaCluster.sh</file>
</shell>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>