1
0
Fork 0

Use the ACTIVE HDFS NODE for Impala cluster, in "copyDataToImpalaCluster.sh" script.

This commit is contained in:
Lampros Smyrnaios 2024-03-26 12:34:11 +02:00
parent 4c40c96e30
commit 92cc27e7eb
1 changed files with 13 additions and 2 deletions

View File

@ -6,16 +6,27 @@ then
ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder}
fi fi
IMPALA_HDFS_NODE=''
if hdfs dfs -test -e hdfs://impala-cluster-mn1.openaire.eu >/dev/null 2>&1; then
IMPALA_HDFS_NODE='hdfs://impala-cluster-mn1.openaire.eu:8020'
elif hdfs dfs -test -e hdfs://impala-cluster-mn2.openaire.eu >/dev/null 2>&1; then
IMPALA_HDFS_NODE='hdfs://impala-cluster-mn2.openaire.eu:8020'
else
echo -e "\n\nPROBLEM WHEN SETTING THE HDFS-NODE FOR IMPALA CLUSTER!\n\n"
exit 1
fi
echo "Active IMPALA HDFS Node: ${IMPALA_HDFS_NODE}"
export HADOOP_USER_NAME=$6 export HADOOP_USER_NAME=$6
export PROD_USAGE_STATS_DB="openaire_prod_usage_stats" export PROD_USAGE_STATS_DB="openaire_prod_usage_stats"
function copydb() { function copydb() {
db=$1 db=$1
FILE=("hive_wf_tmp_"$RANDOM) FILE=("hive_wf_tmp_"$RANDOM)
hdfs dfs -mkdir hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/ hdfs dfs -mkdir ${IMPALA_HDFS_NODE}/tmp/$FILE/
# copy the databases from ocean to impala # copy the databases from ocean to impala
echo "copying $db" echo "copying $db"
hadoop distcp -Dmapreduce.map.memory.mb=6144 -pb hdfs://nameservice1/user/hive/warehouse/${db}.db hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/ hadoop distcp -Dmapreduce.map.memory.mb=6144 -pb hdfs://nameservice1/user/hive/warehouse/${db}.db ${IMPALA_HDFS_NODE}/tmp/$FILE/
# change ownership to impala # change ownership to impala
hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/${db}.db hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/${db}.db