diff --git a/dhp-workflows/dhp-aggregation/src/main/resources/eu/dnetlib/dhp/datacite/hostedBy_map.json b/dhp-workflows/dhp-aggregation/src/main/resources/eu/dnetlib/dhp/datacite/hostedBy_map.json index d07cc33cb..bc1af3417 100644 --- a/dhp-workflows/dhp-aggregation/src/main/resources/eu/dnetlib/dhp/datacite/hostedBy_map.json +++ b/dhp-workflows/dhp-aggregation/src/main/resources/eu/dnetlib/dhp/datacite/hostedBy_map.json @@ -1048,5 +1048,10 @@ "openaire_id": "re3data_____::r3d100010399", "datacite_name": "ZEW Forschungsdatenzentrum", "official_name": "ZEW Forschungsdatenzentrum" + }, + "HBP.NEUROINF": { + "openaire_id": "fairsharing_::2975", + "datacite_name": "EBRAINS", + "official_name": "EBRAINS" } } \ No newline at end of file diff --git a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/config-default.xml b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/config-default.xml index 2e0ed9aee..6d375f03f 100644 --- a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/config-default.xml +++ b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/config-default.xml @@ -15,4 +15,12 @@ oozie.action.sharelib.for.spark spark2 + + hiveMetastoreUris + thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083 + + + pivotHistoryDatabase + + \ No newline at end of file diff --git a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/workflow.xml b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/workflow.xml index 6947019e8..7c633facc 100644 --- a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/openorgs/oozie_app/workflow.xml @@ -198,6 +198,8 @@ --isLookUpUrl${isLookUpUrl} --actionSetId${actionSetId} --cutConnectedComponent${cutConnectedComponent} + --hiveMetastoreUris${hiveMetastoreUris} + --pivotHistoryDatabase${pivotHistoryDatabase} diff --git a/dhp-workflows/dhp-doiboost/src/main/resources/eu/dnetlib/dhp/doiboost/crossref/irish_funder.json b/dhp-workflows/dhp-doiboost/src/main/resources/eu/dnetlib/dhp/doiboost/crossref/irish_funder.json index 15eb1b711..598fe2ba5 100644 --- a/dhp-workflows/dhp-doiboost/src/main/resources/eu/dnetlib/dhp/doiboost/crossref/irish_funder.json +++ b/dhp-workflows/dhp-doiboost/src/main/resources/eu/dnetlib/dhp/doiboost/crossref/irish_funder.json @@ -73,12 +73,6 @@ "name": "Irish Nephrology Society", "synonym": [] }, - { - "id": "100011062", - "uri": "http://dx.doi.org/10.13039/100011062", - "name": "Asian Spinal Cord Network", - "synonym": [] - }, { "id": "100011096", "uri": "http://dx.doi.org/10.13039/100011096", @@ -223,12 +217,6 @@ "name": "Global Brain Health Institute", "synonym": [] }, - { - "id": "100015776", - "uri": "http://dx.doi.org/10.13039/100015776", - "name": "Health and Social Care Board", - "synonym": [] - }, { "id": "100015992", "uri": "http://dx.doi.org/10.13039/100015992", @@ -403,18 +391,6 @@ "name": "Irish Hospice Foundation", "synonym": [] }, - { - "id": "501100001596", - "uri": "http://dx.doi.org/10.13039/501100001596", - "name": "Irish Research Council for Science, Engineering and Technology", - "synonym": [] - }, - { - "id": "501100001597", - "uri": "http://dx.doi.org/10.13039/501100001597", - "name": "Irish Research Council for the Humanities and Social Sciences", - "synonym": [] - }, { "id": "501100001598", "uri": "http://dx.doi.org/10.13039/501100001598", @@ -515,7 +491,7 @@ "id": "501100002081", "uri": "http://dx.doi.org/10.13039/501100002081", "name": "Irish Research Council", - "synonym": [] + "synonym": ["501100001596", "501100001597"] }, { "id": "501100002736", diff --git a/dhp-workflows/dhp-doiboost/src/main/scala/eu/dnetlib/doiboost/crossref/Crossref2Oaf.scala b/dhp-workflows/dhp-doiboost/src/main/scala/eu/dnetlib/doiboost/crossref/Crossref2Oaf.scala index 64090733d..7053213da 100644 --- a/dhp-workflows/dhp-doiboost/src/main/scala/eu/dnetlib/doiboost/crossref/Crossref2Oaf.scala +++ b/dhp-workflows/dhp-doiboost/src/main/scala/eu/dnetlib/doiboost/crossref/Crossref2Oaf.scala @@ -587,7 +587,15 @@ case object Crossref2Oaf { "10.13039/501100000266" | "10.13039/501100006041" | "10.13039/501100000265" | "10.13039/501100000270" | "10.13039/501100013589" | "10.13039/501100000271" => generateSimpleRelationFromAward(funder, "ukri________", a => a) - + //HFRI + case "10.13039/501100013209" => + generateSimpleRelationFromAward(funder, "hfri________", a => a) + val targetId = getProjectId("hfri________", "1e5e62235d094afd01cd56e65112fc63") + queue += generateRelation(sourceId, targetId, ModelConstants.IS_PRODUCED_BY) + queue += generateRelation(targetId, sourceId, ModelConstants.PRODUCES) + //ERASMUS+ + case "10.13039/501100010790" => + generateSimpleRelationFromAward(funder, "erasmusplus_", a => a) case _ => logger.debug("no match for " + funder.DOI.get) } diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml index 9eab960f0..1a66e2797 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml @@ -185,6 +185,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -212,6 +213,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -239,6 +241,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -266,6 +269,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -293,6 +297,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -320,6 +325,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -347,6 +353,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -386,6 +393,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -414,6 +422,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -442,6 +451,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -470,6 +480,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -498,6 +509,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -526,6 +538,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -554,6 +567,7 @@ --executor-cores=${sparkExecutorCoresForJoining} --executor-memory=${sparkExecutorMemoryForJoining} --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=${sparkExecutorMemoryForJoining} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} diff --git a/dhp-workflows/dhp-stats-hist-snaps/pom.xml b/dhp-workflows/dhp-stats-hist-snaps/pom.xml new file mode 100644 index 000000000..b31d909f9 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/pom.xml @@ -0,0 +1,32 @@ + + + + dhp-workflows + eu.dnetlib.dhp + 1.2.5-SNAPSHOT + + 4.0.0 + dhp-stats-hist-snaps + + + org.apache.spark + spark-core_2.11 + + + org.apache.spark + spark-sql_2.11 + + + + + + pl.project13.maven + git-commit-id-plugin + 2.1.11 + + false + + + + + diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/config-default.xml b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/config-default.xml new file mode 100644 index 000000000..b2a1322e6 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/config-default.xml @@ -0,0 +1,30 @@ + + + jobTracker + ${jobTracker} + + + nameNode + ${nameNode} + + + oozie.use.system.libpath + true + + + oozie.action.sharelib.for.spark + spark2 + + + hive_metastore_uris + thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083 + + + hive_jdbc_url + jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000/;UseNativeQuery=1;?spark.executor.memory=22166291558;spark.yarn.executor.memoryOverhead=3225;spark.driver.memory=15596411699;spark.yarn.driver.memoryOverhead=1228 + + + oozie.wf.workflow.notification.url + {serviceUrl}/v1/oozieNotification/jobUpdate?jobId=$jobId%26status=$status + + \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh new file mode 100644 index 000000000..3d9986b64 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh @@ -0,0 +1,223 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export HADOOP_USER_NAME=$2 + + +# Set the active HDFS node of OCEAN and IMPALA cluster. +OCEAN_HDFS_NODE='hdfs://nameservice1' +echo -e "\nOCEAN HDFS virtual-name which resolves automatically to the active-node: ${OCEAN_HDFS_NODE}" + +IMPALA_HDFS_NODE='' +COUNTER=0 +while [ $COUNTER -lt 3 ]; do + if hdfs dfs -test -e hdfs://impala-cluster-mn1.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn1.openaire.eu:8020' + break + elif hdfs dfs -test -e hdfs://impala-cluster-mn2.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn2.openaire.eu:8020' + break + else + IMPALA_HDFS_NODE='' + sleep 1 + fi + ((COUNTER++)) +done +if [ -z "$IMPALA_HDFS_NODE" ]; then + echo -e "\n\nERROR: PROBLEM WHEN SETTING THE HDFS-NODE FOR IMPALA CLUSTER! | AFTER ${COUNTER} RETRIES.\n\n" + exit 1 +fi +echo -e "Active IMPALA HDFS Node: ${IMPALA_HDFS_NODE} , after ${COUNTER} retries.\n\n" + +IMPALA_HOSTNAME='impala-cluster-dn1.openaire.eu' +IMPALA_CONFIG_FILE='/etc/impala_cluster/hdfs-site.xml' + +IMPALA_HDFS_DB_BASE_PATH="${IMPALA_HDFS_NODE}/user/hive/warehouse" + + +# Set sed arguments. +LOCATION_HDFS_NODE_SED_ARG="s|${OCEAN_HDFS_NODE}|${IMPALA_HDFS_NODE}|g" # This requires to be used with "sed -e" in order to have the "|" delimiter (as the "/" conflicts with the URIs) + +# Set the SED command arguments for column-names with reserved words: +DATE_SED_ARG_1='s/[[:space:]]\date[[:space:]]/\`date\`/g' +DATE_SED_ARG_2='s/\.date,/\.\`date\`,/g' # the "date" may be part of a larger field name like "datestamp" or "date_aggregated", so we need to be careful with what we are replacing. +DATE_SED_ARG_3='s/\.date[[:space:]]/\.\`date\` /g' + +HASH_SED_ARG_1='s/[[:space:]]\hash[[:space:]]/\`hash\`/g' +HASH_SED_ARG_2='s/\.hash,/\.\`hash\`,/g' +HASH_SED_ARG_3='s/\.hash[[:space:]]/\.\`hash\` /g' + +LOCATION_SED_ARG_1='s/[[:space:]]\location[[:space:]]/\`location\`/g' +LOCATION_SED_ARG_2='s/\.location,/\.\`location\`,/g' +LOCATION_SED_ARG_3='s/\.location[[:space:]]/\.\`location\` /g' + + +function copydb() { + db=$1 + echo -e "\nStart processing db: '${db}'..\n" + + # Delete the old DB from Impala cluster (if exists). + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "drop database if exists ${db} cascade" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" + rm -f error.log + return 1 + fi + + # Make Impala aware of the deletion of the old DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + + echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" + # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s + # Using max memory of: 50 * 6144 = 300 Gb + # Using 1MB as a buffer-size. + # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The "ug" args cannot be used as we get a "User does not belong to hive" error. + # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. + hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ + -numListstatusThreads 40 \ + -copybuffersize 1048576 \ + -strategy dynamic \ + -pb \ + ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} + + # Check the exit status of the "hadoop distcp" command. + if [ $? -eq 0 ]; then + echo -e "\nSuccessfully copied the files of '${db}'.\n" + else + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + rm -f error.log + return 2 + fi + + # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. + #hdfs dfs -conf ${IMPALA_CONFIG_FILE} -chmod -R 777 ${TEMP_SUBDIR_FULLPATH}/${db}.db + + echo -e "\nCreating schema for db: '${db}'\n" + + # create the new database (with the same name) + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" + + # Make Impala aware of the creation of the new DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. + # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. + + all_create_view_statements=() + + entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. + for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Check if this is a view by showing the create-statement where it should print "create view" for a view, not the "create table". Unfortunately, there is no "show views" command. + create_entity_statement=`hive -e "show create table ${db}.${i};"` # It needs to happen in two stages, otherwise the "grep" is not able to match multi-line statement. + + create_view_statement_test=`echo -e "$create_entity_statement" | grep 'CREATE VIEW'` + if [ -n "$create_view_statement_test" ]; then + echo -e "\n'${i}' is a view, so we will save its 'create view' statement and execute it on Impala, after all tables have been created.\n" + create_view_statement=`echo -e "$create_entity_statement" | sed 's/WARN:.*//g' | sed 's/\`//g' \ + | sed 's/"$/;/' | sed 's/^"//' | sed 's/\\"\\"/\"/g' | sed -e "${LOCATION_HDFS_NODE_SED_ARG}" | sed "${DATE_SED_ARG_1}" | sed "${HASH_SED_ARG_1}" | sed "${LOCATION_SED_ARG_1}" \ + | sed "${DATE_SED_ARG_2}" | sed "${HASH_SED_ARG_2}" | sed "${LOCATION_SED_ARG_2}" \ + | sed "${DATE_SED_ARG_3}" | sed "${HASH_SED_ARG_3}" | sed "${LOCATION_SED_ARG_3}"` + all_create_view_statements+=("$create_view_statement") + else + echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` + if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. + echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + else + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN CREATING TABLE '${i}'!\n\n" + fi + fi + fi + done + + echo -e "\nAll tables have been created, going to create the views..\n" + + # Time to loop through the views and create them. + # At this point all table-schemas should have been created. + + previous_num_of_views_to_retry=${#all_create_view_statements} + if [[ $previous_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG + # Make Impala aware of the new tables, so it knows them when creating the views. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + else + echo -e "\nDB '${db}' does not contain any views.\n" + fi + + level_counter=0 + while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + ((level_counter++)) + # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. + # In this case, we should retry creating this particular view again. + should_retry_create_view_statements=() + + for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` + if [ -n "$specific_errors" ]; then + echo -e "\nspecific_errors: ${specific_errors}\n" + echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" + should_retry_create_view_statements+=("$create_view_statement") + else + sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. + fi + done + + new_num_of_views_to_retry=${#should_retry_create_view_statements} + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then + echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" + return 3 + elif [[ $new_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" + previous_num_of_views_to_retry=$new_num_of_views_to_retry + else + echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" + fi + all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + done + + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + + echo -e "\nComputing stats for tables..\n" + entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` + for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. + create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. + if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; + fi + done + + if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then + echo -e "\nAll entities have been copied to Impala cluster.\n" + else + echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" + rm -f error.log + return 4 + fi + + rm -f error.log + echo -e "\n\nFinished processing db: ${db}\n\n" +} + + +MONITOR_DB=$1 +#HADOOP_USER_NAME=$2 +copydb $MONITOR_DB + diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/finalizeImpalaCluster.sh b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/finalizeImpalaCluster.sh new file mode 100644 index 000000000..d780f103b --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/finalizeImpalaCluster.sh @@ -0,0 +1,41 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +SOURCE=$1 +PRODUCTION=$2 +SHADOW=$3 +MONITOR_PROD=$4 +MONITOR_IRISH_PROD=$5 + + +echo ${SOURCE} +echo ${PRODUCTION} + +#echo "Updating ${PRODUCTION} monitor database old cluster" +#impala-shell -q "create database if not exists ${PRODUCTION}" +#impala-shell -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -c -f - +#impala-shell -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -c -f - + +echo "Updating ${PRODUCTION} historical snapshots database" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${PRODUCTION}" +impala-shell -i impala-cluster-dn1.openaire.eu -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +echo "Production monitor db ready!" + +impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_PROD}.historical_snapshots" +impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_PROD}.historical_snapshots_fos" + +impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_PROD}.historical_snapshots as select * from ${SOURCE}.historical_snapshots" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_PROD}.historical_snapshots_fos as select * from ${SOURCE}.historical_snapshots_fos" + +impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_IRISH_PROD}.historical_snapshots_irish" +impala-shell -i impala-cluster-dn1.openaire.eu -q "drop view ${MONITOR_IRISH_PROD}.historical_snapshots_irish_fos" + + +impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_IRISH_PROD}.historical_snapshots_irish as select * from ${SOURCE}.historical_snapshots_irish" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create view ${MONITOR_IRISH_PROD}.historical_snapshots_irish_fos as select * from ${SOURCE}.historical_snapshots_irish" diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/hist_snaps.sh b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/hist_snaps.sh new file mode 100644 index 000000000..bcaa7984c --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/hist_snaps.sh @@ -0,0 +1,27 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export SOURCE=$1 +export TARGET=$2 +export SHADOW=$3 +export SCRIPT_PATH=$4 + + +export HIVE_OPTS="-hiveconf mapred.job.queue.name=analytics -hiveconf hive.spark.client.connect.timeout=120000ms -hiveconf hive.spark.client.server.connect.timeout=300000ms -hiveconf spark.executor.memory=19166291558 -hiveconf spark.yarn.executor.memoryOverhead=3225 -hiveconf spark.driver.memory=11596411699 -hiveconf spark.yarn.driver.memoryOverhead=1228" +export HADOOP_USER_NAME="oozie" + +echo "Getting file from " $4 +hdfs dfs -copyToLocal $4 + +#update Monitor DB IRISH +#cat CreateDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" | sed "s/GRAPHDB/$3/g1" > foo +cat buildIrishMonitorDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" > foo +hive $HIVE_OPTS -f foo + +echo "Hive shell finished" + diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsAll.sql b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsAll.sql new file mode 100644 index 000000000..93d804820 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsAll.sql @@ -0,0 +1,82 @@ +INSERT INTO ${hist_db_name}.historical_snapshots_fos_tmp +SELECT * FROM ${hist_db_name_prev}.historical_snapshots_fos; + +INSERT INTO ${hist_db_name}.historical_snapshots_fos_tmp +select + cast(${hist_date} as STRING), + count(distinct r.id), + r.type, + rf.lvl1, + rf.lvl2, + pf.publicly_funded, + r.access_mode, + r.gold, + r.green, + coalesce(gl.green_with_license,0), + h.is_hybrid, + b.is_bronze_oa, + d.in_diamond_journal, + t.is_transformative, + pr.refereed +from ${stats_db_name}.result r + left outer join ${stats_db_name}.result_fos rf on rf.id=r.id + left outer join ${stats_db_name}.indi_pub_publicly_funded pf on pf.id=r.id + left outer join ${stats_db_name}.indi_pub_green_with_license gl on gl.id=r.id + left outer join ${stats_db_name}.indi_pub_bronze_oa b on b.id=r.id + left outer join ${stats_db_name}.indi_pub_diamond d on d.id=r.id + left outer join ${stats_db_name}.indi_pub_in_transformative t on t.id=r.id + left outer join ${stats_db_name}.indi_pub_hybrid h on h.id=r.id + left outer join ${stats_db_name}.result_refereed pr on pr.id=r.id +group by r.green, r.gold, r.access_mode, r.type, rf.lvl1,rf.lvl2, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed; + +drop table if exists ${hist_db_name}.historical_snapshots_fos purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_fos STORED AS PARQUET AS +SELECT * FROM ${hist_db_name}.historical_snapshots_fos_tmp; + +drop table if exists ${monitor_db_name}.historical_snapshots_fos purge; + +create table ${monitor_db_name}.historical_snapshots_fos stored as parquet +as select * from ${hist_db_name}.historical_snapshots_fos; + +drop table ${hist_db_name}.historical_snapshots_fos_tmp purge; + +INSERT INTO ${hist_db_name}.historical_snapshots_tmp as +SELECT * FROM ${hist_db_name_prev}.historical_snapshots; + +INSERT INTO ${hist_db_name}.historical_snapshots_tmp +select + cast(${hist_date} as STRING), + count(distinct r.id), + r.type, + pf.publicly_funded, + r.access_mode, + r.gold, + r.green, + coalesce(gl.green_with_license,0), + h.is_hybrid, + b.is_bronze_oa, + d.in_diamond_journal, + t.is_transformative, + pr.refereed +from ${stats_db_name}.result r + left outer join ${stats_db_name}.indi_pub_publicly_funded pf on pf.id=r.id + left outer join ${stats_db_name}.indi_pub_green_with_license gl on gl.id=r.id + left outer join ${stats_db_name}.indi_pub_bronze_oa b on b.id=r.id + left outer join ${stats_db_name}.indi_pub_diamond d on d.id=r.id + left outer join ${stats_db_name}.indi_pub_in_transformative t on t.id=r.id + left outer join ${stats_db_name}.indi_pub_hybrid h on h.id=r.id + left outer join ${stats_db_name}.result_refereed pr on pr.id=r.id +group by r.green, r.gold, r.access_mode, r.type, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed; + +drop table if exists ${hist_db_name}.historical_snapshots purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots STORED AS PARQUET AS +SELECT * FROM ${hist_db_name}.historical_snapshots_tmp; + +drop table if exists ${monitor_db_name}.historical_snapshots purge; + +create table ${monitor_db_name}.historical_snapshots stored as parquet +as select * from ${hist_db_name}.historical_snapshots; + +drop table ${hist_db_name}.historical_snapshots_tmp purge; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsIrish.sql b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsIrish.sql new file mode 100644 index 000000000..95e811f64 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/BuildHistSnapsIrish.sql @@ -0,0 +1,91 @@ +INSERT INTO ${hist_db_name}.historical_snapshots_fos_irish_tmp +SELECT * FROM ${hist_db_name_prev}.historical_snapshots_irish_fos; + +INSERT INTO ${hist_db_name}.historical_snapshots_fos_irish_tmp +select + cast(${hist_date} as STRING), + count(distinct r.id), + r.type, + rf.lvl1, + rf.lvl2, + pf.publicly_funded, + r.access_mode, + r.gold, + r.green, + coalesce(gl.green_with_license,0), + h.is_hybrid, + b.is_bronze_oa, + d.in_diamond_journal, + t.is_transformative, + pr.refereed +from ${stats_irish_db_name}.result r + left outer join ${stats_irish_db_name}.result_fos rf on rf.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_publicly_funded pf on pf.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_green_with_license gl on gl.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_bronze_oa b on b.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_diamond d on d.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_in_transformative t on t.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_hybrid h on h.id=r.id + left outer join ${stats_irish_db_name}.result_refereed pr on pr.id=r.id +group by r.green, r.gold, r.access_mode, r.type, rf.lvl1,rf.lvl2, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed; + +drop table if exists ${hist_db_name}.historical_snapshots_irish_fos purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_irish_fos STORED AS PARQUET AS +SELECT * FROM ${hist_db_name}.historical_snapshots_fos_irish_tmp; + +drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish_fos purge; + +create table ${monitor_irish_db_name}.historical_snapshots_irish_fos stored as parquet +as select * from ${hist_db_name}.historical_snapshots_irish_fos; + +drop table ${hist_db_name}.historical_snapshots_fos_irish_tmp purge; + +INSERT INTO ${hist_db_name}.historical_snapshots_irish_tmp +SELECT * FROM ${hist_db_name_prev}.historical_snapshots_irish; + +INSERT INTO ${hist_db_name}.historical_snapshots_irish_tmp +select + cast(${hist_date} as STRING), + count(distinct r.id), + r.type, + pf.publicly_funded, + r.access_mode, + r.gold, + r.green, + coalesce(gl.green_with_license,0), + h.is_hybrid, + b.is_bronze_oa, + d.in_diamond_journal, + t.is_transformative, + pr.refereed +from ${stats_irish_db_name}.result r + left outer join ${stats_irish_db_name}.indi_pub_publicly_funded pf on pf.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_green_with_license gl on gl.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_bronze_oa b on b.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_diamond d on d.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_in_transformative t on t.id=r.id + left outer join ${stats_irish_db_name}.indi_pub_hybrid h on h.id=r.id + left outer join ${stats_irish_db_name}.result_refereed pr on pr.id=r.id +group by r.green, r.gold, r.access_mode, r.type, pf.publicly_funded,r.green, gl.green_with_license,b.is_bronze_oa,d.in_diamond_journal,t.is_transformative,h.is_hybrid,pr.refereed; + + +drop table if exists ${hist_db_name}.historical_snapshots_irish purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_irish STORED AS PARQUET AS +SELECT * FROM ${hist_db_name}.historical_snapshots_irish_tmp; + +drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish purge; + +create table ${monitor_irish_db_name}.historical_snapshots_irish stored as parquet +as select * from ${hist_db_name}.historical_snapshots_irish; + +drop table ${hist_db_name}.historical_snapshots_irish_tmp purge; + + +drop table if exists ${monitor_irish_db_name}.historical_snapshots_irish_fos purge; + +create table ${monitor_irish_db_name}.historical_snapshots_irish_fos stored as parquet +as select * from ${hist_db_name}.historical_snapshots_irish_fos; + +drop table ${hist_db_name}.historical_snapshots_fos_irish_tmp purge; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/CreateDB.sql b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/CreateDB.sql new file mode 100644 index 000000000..18af135bf --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/scripts/CreateDB.sql @@ -0,0 +1,92 @@ +-------------------------------------------------------------- +-------------------------------------------------------------- +-- Historical Snapshots database creation +-------------------------------------------------------------- +-------------------------------------------------------------- + +DROP database IF EXISTS ${hist_db_name} CASCADE; +CREATE database ${hist_db_name}; + +drop table if exists ${hist_db_name}.historical_snapshots_fos_tmp purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_fos_tmp +( + hist_date STRING, + total INT, + type STRING, + lvl1 STRING, + lvl2 STRING, + publicly_funded INT, + accessrights STRING, + gold INT, + green INT, + green_with_license INT, + hybrid INT, + bronze INT, + diamond INT, + transformative INT, + peer_reviewed STRING +) +CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true'); + +drop table if exists ${hist_db_name}.historical_snapshots_fos_irish_tmp purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_fos_irish_tmp +( + hist_date STRING, + total INT, + type STRING, + lvl1 STRING, + lvl2 STRING, + publicly_funded INT, + accessrights STRING, + gold INT, + green INT, + green_with_license INT, + hybrid INT, + bronze INT, + diamond INT, + transformative INT, + peer_reviewed STRING +) +CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true'); + +drop table if exists ${hist_db_name}.historical_snapshots_tmp purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_tmp +( + hist_date STRING, + total INT, + type STRING, + publicly_funded INT, + accessrights STRING, + gold INT, + green INT, + green_with_license INT, + hybrid INT, + bronze INT, + diamond INT, + transformative INT, + peer_reviewed STRING +) +CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true'); + +drop table if exists ${hist_db_name}.historical_snapshots_irish_tmp purge; + +CREATE TABLE ${hist_db_name}.historical_snapshots_irish_tmp +( + hist_date STRING, + total INT, + type STRING, + publicly_funded INT, + accessrights STRING, + gold INT, + green INT, + green_with_license INT, + hybrid INT, + bronze INT, + diamond INT, + transformative INT, + peer_reviewed STRING +) +CLUSTERED BY (hist_date) INTO 100 buckets stored as orc tblproperties ('transactional' = 'true'); \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/workflow.xml b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/workflow.xml new file mode 100644 index 000000000..8846fcdd7 --- /dev/null +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/workflow.xml @@ -0,0 +1,159 @@ + + + + hist_db_name + the target hist database name + + + hist_db_name_prev + the hist database name of previous_month + + + + stats_db_name + the stats db name + + + stats_irish_db_name + the stats irish db name + + + monitor_db_name + the monitor db name + + + monitor_irish_db_name + the irish monitor db name + + + hist_db_prod_name + the production db + + + hist_db_shadow_name + the production shadow db + + + hist_date + the snaps date + + + hive_metastore_uris + hive server metastore URIs + + + hive_jdbc_url + hive server jdbc url + + + hive_timeout + the time period, in seconds, after which Hive fails a transaction if a Hive client has not sent a hearbeat. The default value is 300 seconds. + + + hadoop_user_name + user name of the wf owner + + + + + ${jobTracker} + ${nameNode} + + + hive.metastore.uris + ${hive_metastore_uris} + + + hive.txn.timeout + ${hive_timeout} + + + mapred.job.queue.name + analytics + + + + + + + + ${wf:conf('resumeFrom') eq 'CreateDB'} + ${wf:conf('resumeFrom') eq 'BuildHistSnaps'} + ${wf:conf('resumeFrom') eq 'BuildHistSnapsIrish'} + ${wf:conf('resumeFrom') eq 'Step2-copyDataToImpalaCluster'} + ${wf:conf('resumeFrom') eq 'Step3-finalizeImpalaCluster'} + + + + + + Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}] + + + + + ${hive_jdbc_url} + + hist_db_name=${hist_db_name} + + + + + + + + ${hive_jdbc_url} + + hist_db_name=${hist_db_name} + hist_db_name_prev=${hist_db_name_prev} + stats_db_name=${stats_db_name} + monitor_db_name=${monitor_db_name} + hist_date=${hist_date} + + + + + + + + ${hive_jdbc_url} + + hist_db_name=${hist_db_name} + hist_db_name_prev=${hist_db_name_prev} + stats_irish_db_name=${stats_irish_db_name} + monitor_irish_db_name=${monitor_irish_db_name} + hist_date=${hist_date} + + + + + + + ${jobTracker} + ${nameNode} + copyDataToImpalaCluster.sh + ${hist_db_name} + ${hadoop_user_name} + copyDataToImpalaCluster.sh + + + + + + + ${jobTracker} + ${nameNode} + finalizeImpalaCluster.sh + ${hist_db_name} + ${hist_db_prod_name} + ${hist_db_shadow_name} + ${monitor_db_prod_name} + ${monitor_irish_db_prod_name} + finalizeImpalaCluster.sh + + + + + + + diff --git a/dhp-workflows/dhp-stats-monitor-irish/pom.xml b/dhp-workflows/dhp-stats-monitor-irish/pom.xml new file mode 100644 index 000000000..6ab19dced --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/pom.xml @@ -0,0 +1,32 @@ + + + + dhp-workflows + eu.dnetlib.dhp + 1.2.5-SNAPSHOT + + 4.0.0 + dhp-stats-monitor-irish + + + org.apache.spark + spark-core_2.11 + + + org.apache.spark + spark-sql_2.11 + + + + + + pl.project13.maven + git-commit-id-plugin + 2.1.11 + + false + + + + + diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/config-default.xml b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/config-default.xml new file mode 100644 index 000000000..b2a1322e6 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/config-default.xml @@ -0,0 +1,30 @@ + + + jobTracker + ${jobTracker} + + + nameNode + ${nameNode} + + + oozie.use.system.libpath + true + + + oozie.action.sharelib.for.spark + spark2 + + + hive_metastore_uris + thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083 + + + hive_jdbc_url + jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000/;UseNativeQuery=1;?spark.executor.memory=22166291558;spark.yarn.executor.memoryOverhead=3225;spark.driver.memory=15596411699;spark.yarn.driver.memoryOverhead=1228 + + + oozie.wf.workflow.notification.url + {serviceUrl}/v1/oozieNotification/jobUpdate?jobId=$jobId%26status=$status + + \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh new file mode 100644 index 000000000..2711d6e12 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh @@ -0,0 +1,222 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export HADOOP_USER_NAME=$2 + +# Set the active HDFS node of OCEAN and IMPALA cluster. +OCEAN_HDFS_NODE='hdfs://nameservice1' +echo -e "\nOCEAN HDFS virtual-name which resolves automatically to the active-node: ${OCEAN_HDFS_NODE}" + +IMPALA_HDFS_NODE='' +COUNTER=0 +while [ $COUNTER -lt 3 ]; do + if hdfs dfs -test -e hdfs://impala-cluster-mn1.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn1.openaire.eu:8020' + break + elif hdfs dfs -test -e hdfs://impala-cluster-mn2.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn2.openaire.eu:8020' + break + else + IMPALA_HDFS_NODE='' + sleep 1 + fi + ((COUNTER++)) +done +if [ -z "$IMPALA_HDFS_NODE" ]; then + echo -e "\n\nERROR: PROBLEM WHEN SETTING THE HDFS-NODE FOR IMPALA CLUSTER! | AFTER ${COUNTER} RETRIES.\n\n" + exit 1 +fi +echo -e "Active IMPALA HDFS Node: ${IMPALA_HDFS_NODE} , after ${COUNTER} retries.\n\n" + +IMPALA_HOSTNAME='impala-cluster-dn1.openaire.eu' +IMPALA_CONFIG_FILE='/etc/impala_cluster/hdfs-site.xml' + +IMPALA_HDFS_DB_BASE_PATH="${IMPALA_HDFS_NODE}/user/hive/warehouse" + + +# Set sed arguments. +LOCATION_HDFS_NODE_SED_ARG="s|${OCEAN_HDFS_NODE}|${IMPALA_HDFS_NODE}|g" # This requires to be used with "sed -e" in order to have the "|" delimiter (as the "/" conflicts with the URIs) + +# Set the SED command arguments for column-names with reserved words: +DATE_SED_ARG_1='s/[[:space:]]\date[[:space:]]/\`date\`/g' +DATE_SED_ARG_2='s/\.date,/\.\`date\`,/g' # the "date" may be part of a larger field name like "datestamp" or "date_aggregated", so we need to be careful with what we are replacing. +DATE_SED_ARG_3='s/\.date[[:space:]]/\.\`date\` /g' + +HASH_SED_ARG_1='s/[[:space:]]\hash[[:space:]]/\`hash\`/g' +HASH_SED_ARG_2='s/\.hash,/\.\`hash\`,/g' +HASH_SED_ARG_3='s/\.hash[[:space:]]/\.\`hash\` /g' + +LOCATION_SED_ARG_1='s/[[:space:]]\location[[:space:]]/\`location\`/g' +LOCATION_SED_ARG_2='s/\.location,/\.\`location\`,/g' +LOCATION_SED_ARG_3='s/\.location[[:space:]]/\.\`location\` /g' + + +function copydb() { + db=$1 + echo -e "\nStart processing db: '${db}'..\n" + + # Delete the old DB from Impala cluster (if exists). + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "drop database if exists ${db} cascade" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" + rm -f error.log + return 1 + fi + + # Make Impala aware of the deletion of the old DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + + echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" + # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s + # Using max memory of: 50 * 6144 = 300 Gb + # Using 1MB as a buffer-size. + # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The "ug" args cannot be used as we get a "User does not belong to hive" error. + # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. + hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ + -numListstatusThreads 40 \ + -copybuffersize 1048576 \ + -strategy dynamic \ + -pb \ + ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} + + # Check the exit status of the "hadoop distcp" command. + if [ $? -eq 0 ]; then + echo -e "\nSuccessfully copied the files of '${db}'.\n" + else + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + rm -f error.log + return 2 + fi + + # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. + #hdfs dfs -conf ${IMPALA_CONFIG_FILE} -chmod -R 777 ${TEMP_SUBDIR_FULLPATH}/${db}.db + + echo -e "\nCreating schema for db: '${db}'\n" + + # create the new database (with the same name) + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" + + # Make Impala aware of the creation of the new DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. + # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. + + all_create_view_statements=() + + entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. + for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Check if this is a view by showing the create-statement where it should print "create view" for a view, not the "create table". Unfortunately, there is no "show views" command. + create_entity_statement=`hive -e "show create table ${db}.${i};"` # It needs to happen in two stages, otherwise the "grep" is not able to match multi-line statement. + + create_view_statement_test=`echo -e "$create_entity_statement" | grep 'CREATE VIEW'` + if [ -n "$create_view_statement_test" ]; then + echo -e "\n'${i}' is a view, so we will save its 'create view' statement and execute it on Impala, after all tables have been created.\n" + create_view_statement=`echo -e "$create_entity_statement" | sed 's/WARN:.*//g' | sed 's/\`//g' \ + | sed 's/"$/;/' | sed 's/^"//' | sed 's/\\"\\"/\"/g' | sed -e "${LOCATION_HDFS_NODE_SED_ARG}" | sed "${DATE_SED_ARG_1}" | sed "${HASH_SED_ARG_1}" | sed "${LOCATION_SED_ARG_1}" \ + | sed "${DATE_SED_ARG_2}" | sed "${HASH_SED_ARG_2}" | sed "${LOCATION_SED_ARG_2}" \ + | sed "${DATE_SED_ARG_3}" | sed "${HASH_SED_ARG_3}" | sed "${LOCATION_SED_ARG_3}"` + all_create_view_statements+=("$create_view_statement") + else + echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` + if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. + echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + else + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN CREATING TABLE '${i}'!\n\n" + fi + fi + fi + done + + echo -e "\nAll tables have been created, going to create the views..\n" + + # Time to loop through the views and create them. + # At this point all table-schemas should have been created. + + previous_num_of_views_to_retry=${#all_create_view_statements} + if [[ $previous_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG + # Make Impala aware of the new tables, so it knows them when creating the views. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + else + echo -e "\nDB '${db}' does not contain any views.\n" + fi + + level_counter=0 + while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + ((level_counter++)) + # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. + # In this case, we should retry creating this particular view again. + should_retry_create_view_statements=() + + for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` + if [ -n "$specific_errors" ]; then + echo -e "\nspecific_errors: ${specific_errors}\n" + echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" + should_retry_create_view_statements+=("$create_view_statement") + else + sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. + fi + done + + new_num_of_views_to_retry=${#should_retry_create_view_statements} + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then + echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" + return 3 + elif [[ $new_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" + previous_num_of_views_to_retry=$new_num_of_views_to_retry + else + echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" + fi + all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + done + + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + + echo -e "\nComputing stats for tables..\n" + entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` + for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. + create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. + if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; + fi + done + + if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then + echo -e "\nAll entities have been copied to Impala cluster.\n" + else + echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" + rm -f error.log + return 4 + fi + + rm -f error.log + echo -e "\n\nFinished processing db: ${db}\n\n" +} + + +MONITOR_DB=$1 +#HADOOP_USER_NAME=$2 +copydb $MONITOR_DB + diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/finalizeImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/finalizeImpalaCluster.sh new file mode 100644 index 000000000..38a2f61bc --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/finalizeImpalaCluster.sh @@ -0,0 +1,23 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +SOURCE=$1 +PRODUCTION=$2 +echo ${SOURCE} +echo ${PRODUCTION} + +#echo "Updating ${PRODUCTION} monitor database old cluster" +#impala-shell -q "create database if not exists ${PRODUCTION}" +#impala-shell -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -c -f - +#impala-shell -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -c -f - + +echo "Updating ${PRODUCTION} monitor database" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${PRODUCTION}" +impala-shell -i impala-cluster-dn1.openaire.eu -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +echo "Production monitor db ready!" diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/monitor_irish.sh b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/monitor_irish.sh new file mode 100644 index 000000000..27e399e98 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/monitor_irish.sh @@ -0,0 +1,28 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export SOURCE=$1 +export TARGET=$2 +export SHADOW=$3 +export SCRIPT_PATH=$4 +export GRAPHDB=$5 + + +export HIVE_OPTS="-hiveconf mapred.job.queue.name=analytics -hiveconf hive.spark.client.connect.timeout=120000ms -hiveconf hive.spark.client.server.connect.timeout=300000ms -hiveconf spark.executor.memory=19166291558 -hiveconf spark.yarn.executor.memoryOverhead=3225 -hiveconf spark.driver.memory=11596411699 -hiveconf spark.yarn.driver.memoryOverhead=1228" +export HADOOP_USER_NAME="oozie" + +echo "Getting file from " $4 +hdfs dfs -copyToLocal $4 + +#update Monitor DB IRISH +#cat CreateDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" | sed "s/GRAPHDB/$3/g1" > foo +cat buildIrishMonitorDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" | sed "s/GRAPHDB/$5/g1" > foo +hive $HIVE_OPTS -f foo + +echo "Hive shell finished" + diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/scripts/buildIrishMonitorDB.sql b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/scripts/buildIrishMonitorDB.sql new file mode 100644 index 000000000..3f0922020 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/scripts/buildIrishMonitorDB.sql @@ -0,0 +1,241 @@ +drop database if exists TARGET cascade; +create database if not exists TARGET; + +create view if not exists TARGET.category as select * from SOURCE.category; +create view if not exists TARGET.concept as select * from SOURCE.concept; +create view if not exists TARGET.context as select * from SOURCE.context; +create view if not exists TARGET.country as select * from SOURCE.country; +create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; +--create view if not exists TARGET.funder as select * from SOURCE.funder; +create view if not exists TARGET.fundref as select * from SOURCE.fundref; +create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; + +drop table if exists TARGET.irish_funders; + +create TEMPORARY table TARGET.irish_funders as +select distinct xpath_string(fundingtree[0].value, '//funder/name') as funder from GRAPHDB.project + where xpath_string(fundingtree[0].value, '//funder/jurisdiction')='IE'; +--create TEMPORARY table TARGET.irish_funders as +--select distinct name as funder from SOURCE.fundref where country='IE'; + +drop table if exists TARGET.result; + +create table TARGET.result stored as parquet as +select distinct * from ( + select r.* + from SOURCE.result r + join SOURCE.result_projects rp on rp.id=r.id + join SOURCE.project p on p.id=rp.project + join openaire_prod_stats_monitor_ie_20231226b.irish_funders irf on irf.funder=p.funder + union all + select r.* + from SOURCE.result r + join SOURCE.result_organization ro on ro.id=r.id + join SOURCE.organization o on o.id=ro.organization and o.country='IE' + union all + select r.* + from SOURCE.result r + join SOURCE.result_pids pid on pid.id=r.id + join stats_ext.transformative_facts tf on tf.doi=pid.pid + ) foo; + +create view if not exists TARGET.category as select * from SOURCE.category; +create view if not exists TARGET.concept as select * from SOURCE.concept; +create view if not exists TARGET.context as select * from SOURCE.context; +create view if not exists TARGET.country as select * from SOURCE.country; +create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; + +create table TARGET.funder stored as parquet as select * from SOURCE.funder where country='IE'; + +create view if not exists TARGET.fundref as select * from SOURCE.fundref; +create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; +--create view if not exists TARGET.graduatedoctorates as select * from SOURCE.graduatedoctorates; + +create table TARGET.result_citations stored as parquet as select * from SOURCE.result_citations orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_references_oc stored as parquet as select * from SOURCE.result_references_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_citations_oc stored as parquet as select * from SOURCE.result_citations_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_classifications stored as parquet as select * from SOURCE.result_classifications orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_apc stored as parquet as select * from SOURCE.result_apc orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_concepts stored as parquet as select * from SOURCE.result_concepts orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_datasources stored as parquet as select * from SOURCE.result_datasources orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_fundercount stored as parquet as select * from SOURCE.result_fundercount orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_gold stored as parquet as select * from SOURCE.result_gold orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_greenoa stored as parquet as select * from SOURCE.result_greenoa orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_languages stored as parquet as select * from SOURCE.result_languages orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_licenses stored as parquet as select * from SOURCE.result_licenses orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.licenses_normalized STORED AS PARQUET as select * from SOURCE.licenses_normalized; + +create table TARGET.result_oids stored as parquet as select * from SOURCE.result_oids orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_organization stored as parquet as select * from SOURCE.result_organization orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_peerreviewed stored as parquet as select * from SOURCE.result_peerreviewed orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_pids stored as parquet as select * from SOURCE.result_pids orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_projectcount stored as parquet as select * from SOURCE.result_projectcount orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_projects stored as parquet as select * from SOURCE.result_projects orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_refereed stored as parquet as select * from SOURCE.result_refereed orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_sources stored as parquet as select * from SOURCE.result_sources orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_topics stored as parquet as select * from SOURCE.result_topics orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_fos stored as parquet as select * from SOURCE.result_fos orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_accessroute stored as parquet as select * from SOURCE.result_accessroute orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_instance stored as parquet as select * from SOURCE.result_instance orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_orcid stored as parquet as select * from SOURCE.result_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create view TARGET.foo1 as select * from SOURCE.result_result rr where rr.source in (select id from TARGET.result); +create view TARGET.foo2 as select * from SOURCE.result_result rr where rr.target in (select id from TARGET.result); +create table TARGET.result_result STORED AS PARQUET as select distinct * from (select * from TARGET.foo1 union all select * from TARGET.foo2) foufou; +drop view TARGET.foo1; +drop view TARGET.foo2; + +-- datasources +create view if not exists TARGET.datasource as select * from SOURCE.datasource; +create view if not exists TARGET.datasource_oids as select * from SOURCE.datasource_oids; +create view if not exists TARGET.datasource_organizations as select * from SOURCE.datasource_organizations; +create view if not exists TARGET.datasource_sources as select * from SOURCE.datasource_sources; + +create table TARGET.datasource_results stored as parquet as select id as result, datasource as id from TARGET.result_datasources; + +-- organizations +create view if not exists TARGET.organization as select * from SOURCE.organization; +create view if not exists TARGET.organization_datasources as select * from SOURCE.organization_datasources; +create view if not exists TARGET.organization_pids as select * from SOURCE.organization_pids; +create view if not exists TARGET.organization_projects as select * from SOURCE.organization_projects; +create view if not exists TARGET.organization_sources as select * from SOURCE.organization_sources; + +-- projects +create view if not exists TARGET.project as select * from SOURCE.project; +create view if not exists TARGET.project_oids as select * from SOURCE.project_oids; +create view if not exists TARGET.project_organizations as select * from SOURCE.project_organizations; +create view if not exists TARGET.project_resultcount as select * from SOURCE.project_resultcount; +create view if not exists TARGET.project_classification as select * from SOURCE.project_classification; +create view if not exists TARGET.project_organization_contribution as select * from SOURCE.project_organization_contribution; + +create table TARGET.project_results stored as parquet as select id as result, project as id from TARGET.result_projects; + + +-- indicators +-- Sprint 1 ---- +create table TARGET.indi_pub_green_oa stored as parquet as select * from SOURCE.indi_pub_green_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_grey_lit stored as parquet as select * from SOURCE.indi_pub_grey_lit orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_doi_from_crossref stored as parquet as select * from SOURCE.indi_pub_doi_from_crossref orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +-- Sprint 2 ---- +create table TARGET.indi_result_has_cc_licence stored as parquet as select * from SOURCE.indi_result_has_cc_licence orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_has_cc_licence_url stored as parquet as select * from SOURCE.indi_result_has_cc_licence_url orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_has_abstract stored as parquet as select * from SOURCE.indi_pub_has_abstract orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_with_orcid stored as parquet as select * from SOURCE.indi_result_with_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +---- Sprint 3 ---- +create table TARGET.indi_funded_result_with_fundref stored as parquet as select * from SOURCE.indi_funded_result_with_fundref orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create view TARGET.indi_result_org_collab as select * from SOURCE.indi_result_org_collab; +create view TARGET.indi_result_org_country_collab as select * from SOURCE.indi_result_org_country_collab; +create view TARGET.indi_project_collab_org as select * from SOURCE.indi_project_collab_org; +create view TARGET.indi_project_collab_org_country as select * from SOURCE.indi_project_collab_org_country; +create view TARGET.indi_funder_country_collab as select * from SOURCE.indi_funder_country_collab; +create view TARGET.indi_result_country_collab as select * from SOURCE.indi_result_country_collab; +---- Sprint 4 ---- +create table TARGET.indi_pub_diamond stored as parquet as select * from SOURCE.indi_pub_diamond orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_in_transformative stored as parquet as select * from SOURCE.indi_pub_in_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_closed_other_open stored as parquet as select * from SOURCE.indi_pub_closed_other_open orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +---- Sprint 5 ---- +create table TARGET.indi_result_no_of_copies stored as parquet as select * from SOURCE.indi_result_no_of_copies orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +---- Sprint 6 ---- +create table TARGET.indi_pub_hybrid_oa_with_cc stored as parquet as select * from SOURCE.indi_pub_hybrid_oa_with_cc orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_bronze_oa stored as parquet as select * from SOURCE.indi_pub_bronze_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_downloads stored as parquet as select * from SOURCE.indi_pub_downloads orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); + +create table TARGET.indi_pub_downloads_datasource stored as parquet as select * from SOURCE.indi_pub_downloads_datasource orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); + +create table TARGET.indi_pub_downloads_year stored as parquet as select * from SOURCE.indi_pub_downloads_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); + +create table TARGET.indi_pub_downloads_datasource_year stored as parquet as select * from SOURCE.indi_pub_downloads_datasource_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); + +---- Sprint 7 ---- +create table TARGET.indi_pub_gold_oa stored as parquet as select * from SOURCE.indi_pub_gold_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_hybrid stored as parquet as select * from SOURCE.indi_pub_hybrid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create view TARGET.indi_org_fairness as select * from SOURCE.indi_org_fairness; +create view TARGET.indi_org_fairness_pub_pr as select * from SOURCE.indi_org_fairness_pub_pr; +create view TARGET.indi_org_fairness_pub_year as select * from SOURCE.indi_org_fairness_pub_year; +create view TARGET.indi_org_fairness_pub as select * from SOURCE.indi_org_fairness_pub; +create view TARGET.indi_org_fairness_year as select * from SOURCE.indi_org_fairness_year; +create view TARGET.indi_org_findable_year as select * from SOURCE.indi_org_findable_year; +create view TARGET.indi_org_findable as select * from SOURCE.indi_org_findable; +create view TARGET.indi_org_openess as select * from SOURCE.indi_org_openess; +create view TARGET.indi_org_openess_year as select * from SOURCE.indi_org_openess_year; +create table TARGET.indi_pub_has_preprint stored as parquet as select * from SOURCE.indi_pub_has_preprint orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_in_subscribed stored as parquet as select * from SOURCE.indi_pub_in_subscribed orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_with_pid stored as parquet as select * from SOURCE.indi_result_with_pid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_impact_measures stored as parquet as select * from SOURCE.indi_impact_measures orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_pub_interdisciplinarity stored as parquet as select * from SOURCE.indi_pub_interdisciplinarity orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.result_apc_affiliations stored as parquet as select * from SOURCE.result_apc_affiliations orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_is_project_result_after stored as parquet as select * from SOURCE.indi_is_project_result_after orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +create view TARGET.indi_is_funder_plan_s as select * from SOURCE.indi_is_funder_plan_s; +create view TARGET.indi_funder_fairness as select * from SOURCE.indi_funder_fairness; +create view TARGET.indi_funder_openess as select * from SOURCE.indi_funder_openess; +create view TARGET.indi_funder_findable as select * from SOURCE.indi_funder_findable; +create view TARGET.indi_ris_fairness as select * from SOURCE.indi_ris_fairness; +create view TARGET.indi_ris_openess as select * from SOURCE.indi_ris_openess; +create view TARGET.indi_ris_findable as select * from SOURCE.indi_ris_findable; + +create table TARGET.indi_pub_green_with_license stored as parquet as select * from SOURCE.indi_pub_green_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.result_country stored as parquet as select * from SOURCE.result_country orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.indi_pub_publicly_funded stored as parquet as select * from SOURCE.indi_pub_publicly_funded orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_oa_with_license stored as parquet as select * from SOURCE.indi_result_oa_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.indi_result_oa_without_license stored as parquet as select * from SOURCE.indi_result_oa_without_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_under_transformative stored as parquet as select * from SOURCE.indi_result_under_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/workflow.xml b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/workflow.xml new file mode 100644 index 000000000..e49552c60 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/workflow.xml @@ -0,0 +1,118 @@ + + + + stats_db_name + the target stats database name + + + graph_db_name + the graph database name + + + monitor_irish_db_name + the target monitor db name + + + monitor_irish_db_prod_name + the name of the production monitor db + + + monitor_irish_db_shadow_name + the name of the shadow monitor db + + + hive_metastore_uris + hive server metastore URIs + + + hive_jdbc_url + hive server jdbc url + + + hive_timeout + the time period, in seconds, after which Hive fails a transaction if a Hive client has not sent a hearbeat. The default value is 300 seconds. + + + hadoop_user_name + user name of the wf owner + + + + + ${jobTracker} + ${nameNode} + + + hive.metastore.uris + ${hive_metastore_uris} + + + hive.txn.timeout + ${hive_timeout} + + + mapred.job.queue.name + analytics + + + + + + + + ${wf:conf('resumeFrom') eq 'Step1-buildIrishMonitorDB'} + ${wf:conf('resumeFrom') eq 'Step2-copyDataToImpalaCluster'} + ${wf:conf('resumeFrom') eq 'Step3-finalizeImpalaCluster'} + + + + + + Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}] + + + + + ${jobTracker} + ${nameNode} + monitor_irish.sh + ${stats_db_name} + ${monitor_irish_db_name} + ${monitor_irish_db_shadow_name} + ${wf:appPath()}/scripts/buildIrishMonitorDB.sql + ${graph_db_name} + monitor_irish.sh + + + + + + + + ${jobTracker} + ${nameNode} + copyDataToImpalaCluster.sh + ${monitor_irish_db_name} + ${hadoop_user_name} + copyDataToImpalaCluster.sh + + + + + + + + ${jobTracker} + ${nameNode} + finalizeImpalaCluster.sh + ${monitor_irish_db_name} + ${monitor_irish_db_prod_name} + ${monitor_irish_db_shadow_name} + finalizeImpalaCluster.sh + + + + + + + diff --git a/dhp-workflows/dhp-stats-monitor-update/pom.xml b/dhp-workflows/dhp-stats-monitor-update/pom.xml new file mode 100644 index 000000000..f2bc35f8d --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/pom.xml @@ -0,0 +1,32 @@ + + + + dhp-workflows + eu.dnetlib.dhp + 1.2.5-SNAPSHOT + + 4.0.0 + dhp-stats-monitor-update + + + org.apache.spark + spark-core_2.11 + + + org.apache.spark + spark-sql_2.11 + + + + + + pl.project13.maven + git-commit-id-plugin + 2.1.11 + + false + + + + + diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/config-default.xml b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/config-default.xml new file mode 100644 index 000000000..b2a1322e6 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/config-default.xml @@ -0,0 +1,30 @@ + + + jobTracker + ${jobTracker} + + + nameNode + ${nameNode} + + + oozie.use.system.libpath + true + + + oozie.action.sharelib.for.spark + spark2 + + + hive_metastore_uris + thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083 + + + hive_jdbc_url + jdbc:hive2://iis-cdh5-test-m3.ocean.icm.edu.pl:10000/;UseNativeQuery=1;?spark.executor.memory=22166291558;spark.yarn.executor.memoryOverhead=3225;spark.driver.memory=15596411699;spark.yarn.driver.memoryOverhead=1228 + + + oozie.wf.workflow.notification.url + {serviceUrl}/v1/oozieNotification/jobUpdate?jobId=$jobId%26status=$status + + \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh new file mode 100644 index 000000000..5ad9df762 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh @@ -0,0 +1,223 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export HADOOP_USER_NAME=$2 + +# Set the active HDFS node of OCEAN and IMPALA cluster. +OCEAN_HDFS_NODE='hdfs://nameservice1' +echo -e "\nOCEAN HDFS virtual-name which resolves automatically to the active-node: ${OCEAN_HDFS_NODE}" + +IMPALA_HDFS_NODE='' +COUNTER=0 +while [ $COUNTER -lt 3 ]; do + if hdfs dfs -test -e hdfs://impala-cluster-mn1.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn1.openaire.eu:8020' + break + elif hdfs dfs -test -e hdfs://impala-cluster-mn2.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn2.openaire.eu:8020' + break + else + IMPALA_HDFS_NODE='' + sleep 1 + fi + ((COUNTER++)) +done +if [ -z "$IMPALA_HDFS_NODE" ]; then + echo -e "\n\nERROR: PROBLEM WHEN SETTING THE HDFS-NODE FOR IMPALA CLUSTER! | AFTER ${COUNTER} RETRIES.\n\n" + exit 1 +fi +echo -e "Active IMPALA HDFS Node: ${IMPALA_HDFS_NODE} , after ${COUNTER} retries.\n\n" + +IMPALA_HOSTNAME='impala-cluster-dn1.openaire.eu' +IMPALA_CONFIG_FILE='/etc/impala_cluster/hdfs-site.xml' + +IMPALA_HDFS_DB_BASE_PATH="${IMPALA_HDFS_NODE}/user/hive/warehouse" + + +# Set sed arguments. +LOCATION_HDFS_NODE_SED_ARG="s|${OCEAN_HDFS_NODE}|${IMPALA_HDFS_NODE}|g" # This requires to be used with "sed -e" in order to have the "|" delimiter (as the "/" conflicts with the URIs) + +# Set the SED command arguments for column-names with reserved words: +DATE_SED_ARG_1='s/[[:space:]]\date[[:space:]]/\`date\`/g' +DATE_SED_ARG_2='s/\.date,/\.\`date\`,/g' # the "date" may be part of a larger field name like "datestamp" or "date_aggregated", so we need to be careful with what we are replacing. +DATE_SED_ARG_3='s/\.date[[:space:]]/\.\`date\` /g' + +HASH_SED_ARG_1='s/[[:space:]]\hash[[:space:]]/\`hash\`/g' +HASH_SED_ARG_2='s/\.hash,/\.\`hash\`,/g' +HASH_SED_ARG_3='s/\.hash[[:space:]]/\.\`hash\` /g' + +LOCATION_SED_ARG_1='s/[[:space:]]\location[[:space:]]/\`location\`/g' +LOCATION_SED_ARG_2='s/\.location,/\.\`location\`,/g' +LOCATION_SED_ARG_3='s/\.location[[:space:]]/\.\`location\` /g' + + +function copydb() { + db=$1 + echo -e "\nStart processing db: '${db}'..\n" + + # Delete the old DB from Impala cluster (if exists). + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "drop database if exists ${db} cascade" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" + rm -f error.log + return 1 + fi + + # Make Impala aware of the deletion of the old DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + + echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" + # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s + # Using max memory of: 50 * 6144 = 300 Gb + # Using 1MB as a buffer-size. + # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The "ug" args cannot be used as we get a "User does not belong to hive" error. + # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. + hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ + -numListstatusThreads 40 \ + -copybuffersize 1048576 \ + -strategy dynamic \ + -pb \ + ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} + + # Check the exit status of the "hadoop distcp" command. + if [ $? -eq 0 ]; then + echo -e "\nSuccessfully copied the files of '${db}'.\n" + else + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + rm -f error.log + return 2 + fi + + # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. + #hdfs dfs -conf ${IMPALA_CONFIG_FILE} -chmod -R 777 ${TEMP_SUBDIR_FULLPATH}/${db}.db + + echo -e "\nCreating schema for db: '${db}'\n" + + # create the new database (with the same name) + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" + + # Make Impala aware of the creation of the new DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. + # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. + + all_create_view_statements=() + + entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. + for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Check if this is a view by showing the create-statement where it should print "create view" for a view, not the "create table". Unfortunately, there is no "show views" command. + create_entity_statement=`hive -e "show create table ${db}.${i};"` # It needs to happen in two stages, otherwise the "grep" is not able to match multi-line statement. + + create_view_statement_test=`echo -e "$create_entity_statement" | grep 'CREATE VIEW'` + if [ -n "$create_view_statement_test" ]; then + echo -e "\n'${i}' is a view, so we will save its 'create view' statement and execute it on Impala, after all tables have been created.\n" + create_view_statement=`echo -e "$create_entity_statement" | sed 's/WARN:.*//g' | sed 's/\`//g' \ + | sed 's/"$/;/' | sed 's/^"//' | sed 's/\\"\\"/\"/g' | sed -e "${LOCATION_HDFS_NODE_SED_ARG}" | sed "${DATE_SED_ARG_1}" | sed "${HASH_SED_ARG_1}" | sed "${LOCATION_SED_ARG_1}" \ + | sed "${DATE_SED_ARG_2}" | sed "${HASH_SED_ARG_2}" | sed "${LOCATION_SED_ARG_2}" \ + | sed "${DATE_SED_ARG_3}" | sed "${HASH_SED_ARG_3}" | sed "${LOCATION_SED_ARG_3}"` + all_create_view_statements+=("$create_view_statement") + else + echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` + if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. + echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + else + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN CREATING TABLE '${i}'!\n\n" + fi + fi + fi + done + + echo -e "\nAll tables have been created, going to create the views..\n" + + # Time to loop through the views and create them. + # At this point all table-schemas should have been created. + + previous_num_of_views_to_retry=${#all_create_view_statements} + if [[ $previous_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG + # Make Impala aware of the new tables, so it knows them when creating the views. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + else + echo -e "\nDB '${db}' does not contain any views.\n" + fi + + level_counter=0 + while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + ((level_counter++)) + # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. + # In this case, we should retry creating this particular view again. + should_retry_create_view_statements=() + + for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` + if [ -n "$specific_errors" ]; then + echo -e "\nspecific_errors: ${specific_errors}\n" + echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" + should_retry_create_view_statements+=("$create_view_statement") + else + sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. + fi + done + + new_num_of_views_to_retry=${#should_retry_create_view_statements} + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then + echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" + return 3 + elif [[ $new_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" + previous_num_of_views_to_retry=$new_num_of_views_to_retry + else + echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" + fi + all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + done + + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + + echo -e "\nComputing stats for tables..\n" + entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` + for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. + create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. + if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; + fi + done + + if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then + echo -e "\nAll entities have been copied to Impala cluster.\n" + else + echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" + rm -f error.log + return 4 + fi + + rm -f error.log + echo -e "\n\nFinished processing db: ${db}\n\n" +} + + +MONITOR_DB=$1 + +copydb $MONITOR_DB'_institutions' +copydb $MONITOR_DB + diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/finalizeImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/finalizeImpalaCluster.sh new file mode 100644 index 000000000..cb5452154 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/finalizeImpalaCluster.sh @@ -0,0 +1,57 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi +# +#function createShadowDB() { +# SOURCE=$1 +# SHADOW=$2 +# +# # drop views from db +# for i in `impala-shell -i impala-cluster-dn1.openaire.eu -d ${SHADOW} --delimited -q "show tables"`; +# do +# `impala-shell -i impala-cluster-dn1.openaire.eu -d ${SHADOW} -q "drop view $i;"`; +# done +# +# impala-shell -i impala-cluster-dn1.openaire.eu -q "drop database ${SHADOW} CASCADE"; +# impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${SHADOW}"; +## impala-shell -i impala-cluster-dn1.openaire.eu -d ${SHADOW} -q "show tables" | sed "s/^/drop view if exists ${SHADOW}./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -f - +# impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${SHADOW}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -f - +#} +# +#MONITOR_DB=$1 +#MONITOR_DB_SHADOW=$2 +# +#createShadowDB $MONITOR_DB'_institutions' $MONITOR_DB'_institutions_shadow' +#createShadowDB $MONITOR_DB $MONITOR_DB'_shadow' + +SOURCE=$1 +PRODUCTION=$2 +echo ${SOURCE} +echo ${PRODUCTION} + +#echo "Updating ${PRODUCTION} monitor database old cluster" +#impala-shell -q "create database if not exists ${PRODUCTION}" +#impala-shell -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -c -f - +#impala-shell -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -c -f - +# +#echo "Updating ${PRODUCTION}_institutions database old cluster" +#impala-shell -q "create database if not exists ${PRODUCTION}_institutions" +#impala-shell -d ${PRODUCTION}_institutions -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}_institutions./" | sed "s/$/;/" | impala-shell -c -f - +#impala-shell -d ${SOURCE}_institutions -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}_institutions.\1 as select * from ${SOURCE}_institutions.\1;/" | impala-shell -c -f - +#echo "Production insitutions db ready!" + +echo "Updating ${PRODUCTION} monitor database" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${PRODUCTION}" +impala-shell -i impala-cluster-dn1.openaire.eu -d ${PRODUCTION} -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE} -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}.\1 as select * from ${SOURCE}.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +echo "Production monitor db ready!" + +echo "Updating ${PRODUCTION}_institutions database" +impala-shell -i impala-cluster-dn1.openaire.eu -q "create database if not exists ${PRODUCTION}_institutions" +impala-shell -i impala-cluster-dn1.openaire.eu -d ${PRODUCTION}_institutions -q "show tables" --delimited | sed "s/^/drop view if exists ${PRODUCTION}_institutions./" | sed "s/$/;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +impala-shell -i impala-cluster-dn1.openaire.eu -d ${SOURCE}_institutions -q "show tables" --delimited | sed "s/\(.*\)/create view ${PRODUCTION}_institutions.\1 as select * from ${SOURCE}_institutions.\1;/" | impala-shell -i impala-cluster-dn1.openaire.eu -c -f - +echo "Production insitutions db ready!" \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/monitor.sh b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/monitor.sh new file mode 100644 index 000000000..50f5983e4 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/monitor.sh @@ -0,0 +1,60 @@ +export PYTHON_EGG_CACHE=/home/$(whoami)/.python-eggs +export link_folder=/tmp/impala-shell-python-egg-cache-$(whoami) +if ! [ -L $link_folder ] +then + rm -Rf "$link_folder" + ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} +fi + +export SOURCE=$1 +export TARGET=$2 +export SHADOW=$3 +export SCRIPT_PATH=$4 +export SCRIPT_PATH2=$5 +export SCRIPT_PATH2=$6 + +export HIVE_OPTS="-hiveconf mapred.job.queue.name=analytics -hiveconf hive.spark.client.connect.timeout=120000ms -hiveconf hive.spark.client.server.connect.timeout=300000ms -hiveconf spark.executor.memory=19166291558 -hiveconf spark.yarn.executor.memoryOverhead=3225 -hiveconf spark.driver.memory=11596411699 -hiveconf spark.yarn.driver.memoryOverhead=1228" +export HADOOP_USER_NAME="oozie" + +echo "Getting file from " $4 +hdfs dfs -copyToLocal $4 + +echo "Getting file from " $5 +hdfs dfs -copyToLocal $5 + +echo "Getting file from " $6 +hdfs dfs -copyToLocal $6 + +#update Monitor DB +cat updateMonitorDBAll.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" > foo +hive $HIVE_OPTS -f foo + +#update Institutions DB +cat updateMonitorDB_institutions.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2_institutions/g1" > foo +hive $HIVE_OPTS -f foo +cat updateMonitorDB.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2_institutions/g1" > foo +hive $HIVE_OPTS -f foo + + + +echo "Hive shell finished" + +#echo "Updating shadow monitor insitutions database" +#hive -e "drop database if exists ${SHADOW}_institutions cascade" +#hive -e "create database if not exists ${SHADOW}_institutions" +#hive $HIVE_OPTS --database ${2}_institutions -e "show tables" | grep -v WARN | sed "s/\(.*\)/create view ${SHADOW}_institutions.\1 as select * from ${2}_institutions.\1;/" > foo +#hive -f foo +#echo "Shadow db monitor insitutions ready!" +# +##update Monitor DB +#cat updateMonitorDBAll.sql | sed "s/SOURCE/$1/g" | sed "s/TARGET/$2/g1" > foo +#hive $HIVE_OPTS -f foo +# +#echo "Hive shell finished" +# +#echo "Updating shadow monitor database" +#hive -e "drop database if exists ${SHADOW} cascade" +#hive -e "create database if not exists ${SHADOW}" +#hive $HIVE_OPTS --database ${2} -e "show tables" | grep -v WARN | sed "s/\(.*\)/create view ${SHADOW}.\1 as select * from ${2}.\1;/" > foo +#hive -f foo +#echo "Shadow db monitor insitutions ready!" diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB.sql b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB.sql new file mode 100644 index 000000000..321fba87a --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB.sql @@ -0,0 +1,278 @@ +--drop database if exists TARGET cascade; +--create database if not exists TARGET; +-- +--create view if not exists TARGET.category as select * from SOURCE.category; +--create view if not exists TARGET.concept as select * from SOURCE.concept; +--create view if not exists TARGET.context as select * from SOURCE.context; +--create view if not exists TARGET.country as select * from SOURCE.country; +--create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +--create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; +--create view if not exists TARGET.funder as select * from SOURCE.funder; +--create view if not exists TARGET.fundref as select * from SOURCE.fundref; +--create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +--create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +--create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +--create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +--create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +--create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; +-- +--create table TARGET.result stored as parquet as +-- select distinct * from ( +-- select * from SOURCE.result r where exists (select 1 from SOURCE.result_projects rp join SOURCE.project p on rp.project=p.id where rp.id=r.id) +-- union all +-- select * from SOURCE.result r where exists (select 1 from SOURCE.result_concepts rc where rc.id=r.id) +-- union all +-- select * from SOURCE.result r where exists (select 1 from SOURCE.result_organization ro where ro.id=r.id and ro.organization in ( +-- 'openorgs____::b84450f9864182c67b8611b5593f4250', --"Athena Research and Innovation Center In Information Communication & Knowledge Technologies', --ARC" +-- 'openorgs____::d41cf6bd4ab1b1362a44397e0b95c975', --National Research Council +-- 'openorgs____::d2a09b9d5eabb10c95f9470e172d05d2', --??? Not exists ?? +-- 'openorgs____::d169c7407dd417152596908d48c11460', --Masaryk University +-- 'openorgs____::1ec924b1759bb16d0a02f2dad8689b21', --University of Belgrade +-- 'openorgs____::0ae431b820e4c33db8967fbb2b919150', --University of Helsinki +-- 'openorgs____::759d59f05d77188faee99b7493b46805', --University of Minho +-- 'openorgs____::cad284878801b9465fa51a95b1d779db', --Universidad Politécnica de Madrid +-- 'openorgs____::eadc8da90a546e98c03f896661a2e4d4', --University of Göttingen +-- 'openorgs____::c0286313e36479eff8676dba9b724b40', --National and Kapodistrian University of Athens +-- -- 'openorgs____::c80a8243a5e5c620d7931c88d93bf17a', --Université Paris Diderot +-- 'openorgs____::c08634f0a6b0081c3dc6e6c93a4314f3', --Bielefeld University +-- 'openorgs____::6fc85e4a8f7ecaf4b0c738d010e967ea', --University of Southern Denmark +-- 'openorgs____::3d6122f87f9a97a99d8f6e3d73313720', --Humboldt-Universität zu Berlin +-- 'openorgs____::16720ada63d0fa8ca41601feae7d1aa5', --TU Darmstadt +-- 'openorgs____::ccc0a066b56d2cfaf90c2ae369df16f5', --KU Leuven +-- 'openorgs____::4c6f119632adf789746f0a057ed73e90', --University of the Western Cape +-- 'openorgs____::ec3665affa01aeafa28b7852c4176dbd', --Rudjer Boskovic Institute +-- 'openorgs____::5f31346d444a7f06a28c880fb170b0f6', --Ghent University +-- 'openorgs____::2dbe47117fd5409f9c61620813456632', --University of Luxembourg +-- 'openorgs____::6445d7758d3a40c4d997953b6632a368', --National Institute of Informatics (NII) +-- 'openorgs____::b77c01aa15de3675da34277d48de2ec1', -- Valencia Catholic University Saint Vincent Martyr +-- 'openorgs____::7fe2f66cdc43983c6b24816bfe9cf6a0', -- Unviersity of Warsaw +-- 'openorgs____::15e7921fc50d9aa1229a82a84429419e', -- University Of Thessaly +-- 'openorgs____::11f7919dadc8f8a7251af54bba60c956', -- Technical University of Crete +-- 'openorgs____::84f0c5f5dbb6daf42748485924efde4b', -- University of Piraeus +-- 'openorgs____::4ac562f0376fce3539504567649cb373', -- University of Patras +-- 'openorgs____::3e8d1f8c3f6cd7f418b09f1f58b4873b', -- Aristotle University of Thessaloniki +-- 'openorgs____::3fcef6e1c469c10f2a84b281372c9814', -- World Bank +-- 'openorgs____::1698a2eb1885ef8adb5a4a969e745ad3', -- École des Ponts ParisTech +-- 'openorgs____::e15adb13c4dadd49de4d35c39b5da93a', -- Nanyang Technological University +-- 'openorgs____::4b34103bde246228fcd837f5f1bf4212', -- Autonomous University of Barcelona +-- 'openorgs____::72ec75fcfc4e0df1a76dc4c49007fceb', -- McMaster University +-- 'openorgs____::51c7fc556e46381734a25a6fbc3fd398', -- University of Modena and Reggio Emilia +-- 'openorgs____::235d7f9ad18ecd7e6dc62ea4990cb9db', -- Bilkent University +-- 'openorgs____::31f2fa9e05b49d4cf40a19c3fed8eb06', -- Saints Cyril and Methodius University of Skopje +-- 'openorgs____::db7686f30f22cbe73a4fde872ce812a6', -- University of Milan +-- 'openorgs____::b8b8ca674452579f3f593d9f5e557483', -- University College Cork +-- 'openorgs____::38d7097854736583dde879d12dacafca' -- Brown University +-- 'openorgs____::57784c9e047e826fefdb1ef816120d92', --Arts et Métiers ParisTech +-- 'openorgs____::2530baca8a15936ba2e3297f2bce2e7e', -- University of Cape Town +-- 'openorgs____::d11f981828c485cd23d93f7f24f24db1', -- Technological University Dublin +-- 'openorgs____::5e6bf8962665cdd040341171e5c631d8', -- Delft University of Technology +-- 'openorgs____::846cb428d3f52a445f7275561a7beb5d', -- University of Manitoba +-- 'openorgs____::eb391317ed0dc684aa81ac16265de041', -- Universitat Rovira i Virgili +-- 'openorgs____::66aa9fc2fceb271423dfabcc38752dc0', -- Lund University +-- 'openorgs____::3cff625a4370d51e08624cc586138b2f' -- IMT Atlantique +-- ) )) foo; +-- +--ANALYZE TABLE TARGET.result COMPUTE STATISTICS; + +create view if not exists TARGET.category as select * from SOURCE.category; +create view if not exists TARGET.concept as select * from SOURCE.concept; +create view if not exists TARGET.context as select * from SOURCE.context; +create view if not exists TARGET.country as select * from SOURCE.country; +create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; +create view if not exists TARGET.funder as select * from SOURCE.funder; +create view if not exists TARGET.fundref as select * from SOURCE.fundref; +create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; +--create view if not exists TARGET.graduatedoctorates as select * from SOURCE.graduatedoctorates; + +create table TARGET.result_citations stored as parquet as select * from SOURCE.result_citations orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_citations COMPUTE STATISTICS; + +create table TARGET.result_references_oc stored as parquet as select * from SOURCE.result_references_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_references_oc COMPUTE STATISTICS; + +create table TARGET.result_citations_oc stored as parquet as select * from SOURCE.result_citations_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_citations_oc COMPUTE STATISTICS; + +create table TARGET.result_classifications stored as parquet as select * from SOURCE.result_classifications orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_classifications COMPUTE STATISTICS; + +create table TARGET.result_apc stored as parquet as select * from SOURCE.result_apc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_apc COMPUTE STATISTICS; + +create table TARGET.result_concepts stored as parquet as select * from SOURCE.result_concepts orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_concepts COMPUTE STATISTICS; + +create table TARGET.result_datasources stored as parquet as select * from SOURCE.result_datasources orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_datasources COMPUTE STATISTICS; + +create table TARGET.result_fundercount stored as parquet as select * from SOURCE.result_fundercount orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_fundercount COMPUTE STATISTICS; + +create table TARGET.result_gold stored as parquet as select * from SOURCE.result_gold orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_gold COMPUTE STATISTICS; + +create table TARGET.result_greenoa stored as parquet as select * from SOURCE.result_greenoa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_greenoa COMPUTE STATISTICS; + +create table TARGET.result_languages stored as parquet as select * from SOURCE.result_languages orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_languages COMPUTE STATISTICS; + +create table TARGET.result_licenses stored as parquet as select * from SOURCE.result_licenses orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_licenses COMPUTE STATISTICS; + +create table TARGET.licenses_normalized STORED AS PARQUET as select * from SOURCE.licenses_normalized; +--ANALYZE TABLE TARGET.licenses_normalized COMPUTE STATISTICS; + +create table TARGET.result_oids stored as parquet as select * from SOURCE.result_oids orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_oids COMPUTE STATISTICS; + +create table TARGET.result_organization stored as parquet as select * from SOURCE.result_organization orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_organization COMPUTE STATISTICS; + +create table TARGET.result_peerreviewed stored as parquet as select * from SOURCE.result_peerreviewed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_peerreviewed COMPUTE STATISTICS; + +create table TARGET.result_pids stored as parquet as select * from SOURCE.result_pids orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_pids COMPUTE STATISTICS; + +create table TARGET.result_projectcount stored as parquet as select * from SOURCE.result_projectcount orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_projectcount COMPUTE STATISTICS; + +create table TARGET.result_projects stored as parquet as select * from SOURCE.result_projects orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_projects COMPUTE STATISTICS; + +create table TARGET.result_refereed stored as parquet as select * from SOURCE.result_refereed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_refereed COMPUTE STATISTICS; + +create table TARGET.result_sources stored as parquet as select * from SOURCE.result_sources orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_sources COMPUTE STATISTICS; + +create table TARGET.result_topics stored as parquet as select * from SOURCE.result_topics orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_topics COMPUTE STATISTICS; + +create table TARGET.result_fos stored as parquet as select * from SOURCE.result_fos orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_fos COMPUTE STATISTICS; + +create table TARGET.result_accessroute stored as parquet as select * from SOURCE.result_accessroute orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_accessroute COMPUTE STATISTICS; + +create table TARGET.result_instance stored as parquet as select * from SOURCE.result_instance orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.result_orcid stored as parquet as select * from SOURCE.result_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create view TARGET.foo1 as select * from SOURCE.result_result rr where rr.source in (select id from TARGET.result); +create view TARGET.foo2 as select * from SOURCE.result_result rr where rr.target in (select id from TARGET.result); +create table TARGET.result_result STORED AS PARQUET as select distinct * from (select * from TARGET.foo1 union all select * from TARGET.foo2) foufou; +drop view TARGET.foo1; +drop view TARGET.foo2; +--ANALYZE TABLE TARGET.result_result COMPUTE STATISTICS; + +-- datasources +create view if not exists TARGET.datasource as select * from SOURCE.datasource; +create view if not exists TARGET.datasource_oids as select * from SOURCE.datasource_oids; +create view if not exists TARGET.datasource_organizations as select * from SOURCE.datasource_organizations; +create view if not exists TARGET.datasource_sources as select * from SOURCE.datasource_sources; + +create table TARGET.datasource_results stored as parquet as select id as result, datasource as id from TARGET.result_datasources; +--ANALYZE TABLE TARGET.datasource_results COMPUTE STATISTICS; + +-- organizations +create view if not exists TARGET.organization as select * from SOURCE.organization; +create view if not exists TARGET.organization_datasources as select * from SOURCE.organization_datasources; +create view if not exists TARGET.organization_pids as select * from SOURCE.organization_pids; +create view if not exists TARGET.organization_projects as select * from SOURCE.organization_projects; +create view if not exists TARGET.organization_sources as select * from SOURCE.organization_sources; + +-- projects +create view if not exists TARGET.project as select * from SOURCE.project; +create view if not exists TARGET.project_oids as select * from SOURCE.project_oids; +create view if not exists TARGET.project_organizations as select * from SOURCE.project_organizations; +create view if not exists TARGET.project_resultcount as select * from SOURCE.project_resultcount; +create view if not exists TARGET.project_classification as select * from SOURCE.project_classification; +create view if not exists TARGET.project_organization_contribution as select * from SOURCE.project_organization_contribution; + +create table TARGET.project_results stored as parquet as select id as result, project as id from TARGET.result_projects; +--ANALYZE TABLE TARGET.project_results COMPUTE STATISTICS; + +-- indicators +-- Sprint 1 ---- +create table TARGET.indi_pub_green_oa stored as parquet as select * from SOURCE.indi_pub_green_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_green_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_grey_lit stored as parquet as select * from SOURCE.indi_pub_grey_lit orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_grey_lit COMPUTE STATISTICS; +create table TARGET.indi_pub_doi_from_crossref stored as parquet as select * from SOURCE.indi_pub_doi_from_crossref orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_doi_from_crossref COMPUTE STATISTICS; +-- Sprint 2 ---- +create table TARGET.indi_result_has_cc_licence stored as parquet as select * from SOURCE.indi_result_has_cc_licence orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_has_cc_licence COMPUTE STATISTICS; +create table TARGET.indi_result_has_cc_licence_url stored as parquet as select * from SOURCE.indi_result_has_cc_licence_url orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_has_cc_licence_url COMPUTE STATISTICS; +create table TARGET.indi_pub_has_abstract stored as parquet as select * from SOURCE.indi_pub_has_abstract orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_has_abstract COMPUTE STATISTICS; +create table TARGET.indi_result_with_orcid stored as parquet as select * from SOURCE.indi_result_with_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_with_orcid COMPUTE STATISTICS; +---- Sprint 3 ---- +create table TARGET.indi_funded_result_with_fundref stored as parquet as select * from SOURCE.indi_funded_result_with_fundref orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_funded_result_with_fundref COMPUTE STATISTICS; +create view TARGET.indi_result_org_collab as select * from SOURCE.indi_result_org_collab; +create view TARGET.indi_result_org_country_collab as select * from SOURCE.indi_result_org_country_collab; +create view TARGET.indi_project_collab_org as select * from SOURCE.indi_project_collab_org; +create view TARGET.indi_project_collab_org_country as select * from SOURCE.indi_project_collab_org_country; +create view TARGET.indi_funder_country_collab as select * from SOURCE.indi_funder_country_collab; +create view TARGET.indi_result_country_collab as select * from SOURCE.indi_result_country_collab; +---- Sprint 4 ---- +create table TARGET.indi_pub_diamond stored as parquet as select * from SOURCE.indi_pub_diamond orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_diamond COMPUTE STATISTICS; +create table TARGET.indi_pub_in_transformative stored as parquet as select * from SOURCE.indi_pub_in_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_in_transformative COMPUTE STATISTICS; +create table TARGET.indi_pub_closed_other_open stored as parquet as select * from SOURCE.indi_pub_closed_other_open orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_closed_other_open COMPUTE STATISTICS; +---- Sprint 5 ---- +create table TARGET.indi_result_no_of_copies stored as parquet as select * from SOURCE.indi_result_no_of_copies orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_no_of_copies COMPUTE STATISTICS; +---- Sprint 6 ---- +create table TARGET.indi_pub_hybrid_oa_with_cc stored as parquet as select * from SOURCE.indi_pub_hybrid_oa_with_cc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_hybrid_oa_with_cc COMPUTE STATISTICS; +create table TARGET.indi_pub_bronze_oa stored as parquet as select * from SOURCE.indi_pub_bronze_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_bronze_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads stored as parquet as select * from SOURCE.indi_pub_downloads orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_datasource stored as parquet as select * from SOURCE.indi_pub_downloads_datasource orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_datasource COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_year stored as parquet as select * from SOURCE.indi_pub_downloads_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_year COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_datasource_year stored as parquet as select * from SOURCE.indi_pub_downloads_datasource_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_datasource_year COMPUTE STATISTICS; +---- Sprint 7 ---- +create table TARGET.indi_pub_gold_oa stored as parquet as select * from SOURCE.indi_pub_gold_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_gold_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_hybrid stored as parquet as select * from SOURCE.indi_pub_hybrid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_hybrid COMPUTE STATISTICS; +create view TARGET.indi_org_fairness as select * from SOURCE.indi_org_fairness; +create view TARGET.indi_org_fairness_pub_pr as select * from SOURCE.indi_org_fairness_pub_pr; +create view TARGET.indi_org_fairness_pub_year as select * from SOURCE.indi_org_fairness_pub_year; +create view TARGET.indi_org_fairness_pub as select * from SOURCE.indi_org_fairness_pub; +create view TARGET.indi_org_fairness_year as select * from SOURCE.indi_org_fairness_year; +create view TARGET.indi_org_findable_year as select * from SOURCE.indi_org_findable_year; +create view TARGET.indi_org_findable as select * from SOURCE.indi_org_findable; +create view TARGET.indi_org_openess as select * from SOURCE.indi_org_openess; +create view TARGET.indi_org_openess_year as select * from SOURCE.indi_org_openess_year; +create table TARGET.indi_pub_has_preprint stored as parquet as select * from SOURCE.indi_pub_has_preprint orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_has_preprint COMPUTE STATISTICS; +create table TARGET.indi_pub_in_subscribed stored as parquet as select * from SOURCE.indi_pub_in_subscribed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_in_subscribed COMPUTE STATISTICS; +create table TARGET.indi_result_with_pid stored as parquet as select * from SOURCE.indi_result_with_pid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_with_pid COMPUTE STATISTICS; +create table TARGET.indi_impact_measures stored as parquet as select * from SOURCE.indi_impact_measures orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_impact_measures COMPUTE STATISTICS; +create table TARGET.indi_pub_interdisciplinarity stored as parquet as select * from SOURCE.indi_pub_interdisciplinarity orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_interdisciplinarity COMPUTE STATISTICS; +create table TARGET.result_apc_affiliations stored as parquet as select * from SOURCE.result_apc_affiliations orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_apc_affiliations COMPUTE STATISTICS; +create table TARGET.indi_is_project_result_after stored as parquet as select * from SOURCE.indi_is_project_result_after orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +create table TARGET.indi_is_funder_plan_s stored as parquet as select * from SOURCE.indi_is_funder_plan_s orig where exists (select 1 from TARGET.result r where r.id=orig.id); diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDBAll.sql b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDBAll.sql new file mode 100644 index 000000000..35ab42029 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDBAll.sql @@ -0,0 +1,297 @@ +drop database if exists TARGET cascade; +create database if not exists TARGET; + +create view if not exists TARGET.category as select * from SOURCE.category; +create view if not exists TARGET.concept as select * from SOURCE.concept; +create view if not exists TARGET.context as select * from SOURCE.context; +create view if not exists TARGET.country as select * from SOURCE.country; +create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; +create view if not exists TARGET.funder as select * from SOURCE.funder; +create view if not exists TARGET.fundref as select * from SOURCE.fundref; +create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; +--create view if not exists TARGET.graduatedoctorates as select * from SOURCE.graduatedoctorates; + +create table TARGET.result stored as parquet as + select distinct * from ( + select * from SOURCE.result r where exists (select 1 from SOURCE.result_projects rp join SOURCE.project p on rp.project=p.id where rp.id=r.id) + union all + select * from SOURCE.result r where exists (select 1 from SOURCE.result_concepts rc where rc.id=r.id) + union all + select * from SOURCE.result r where exists (select 1 from SOURCE.result_organization ro where ro.id=r.id and ro.organization in ( + 'openorgs____::b84450f9864182c67b8611b5593f4250', --"Athena Research and Innovation Center In Information Communication & Knowledge Technologies', --ARC" + 'openorgs____::d41cf6bd4ab1b1362a44397e0b95c975', --National Research Council + 'openorgs____::d2a09b9d5eabb10c95f9470e172d05d2', --??? Not exists ?? + 'openorgs____::d169c7407dd417152596908d48c11460', --Masaryk University + 'openorgs____::1ec924b1759bb16d0a02f2dad8689b21', --University of Belgrade + 'openorgs____::0ae431b820e4c33db8967fbb2b919150', --University of Helsinki + 'openorgs____::759d59f05d77188faee99b7493b46805', --University of Minho + 'openorgs____::cad284878801b9465fa51a95b1d779db', --Universidad Politécnica de Madrid + 'openorgs____::eadc8da90a546e98c03f896661a2e4d4', --University of Göttingen + 'openorgs____::c0286313e36479eff8676dba9b724b40', --National and Kapodistrian University of Athens + -- 'openorgs____::c80a8243a5e5c620d7931c88d93bf17a', --Université Paris Diderot + 'openorgs____::c08634f0a6b0081c3dc6e6c93a4314f3', --Bielefeld University + 'openorgs____::6fc85e4a8f7ecaf4b0c738d010e967ea', --University of Southern Denmark + 'openorgs____::3d6122f87f9a97a99d8f6e3d73313720', --Humboldt-Universität zu Berlin + 'openorgs____::16720ada63d0fa8ca41601feae7d1aa5', --TU Darmstadt + 'openorgs____::ccc0a066b56d2cfaf90c2ae369df16f5', --KU Leuven + 'openorgs____::4c6f119632adf789746f0a057ed73e90', --University of the Western Cape + 'openorgs____::ec3665affa01aeafa28b7852c4176dbd', --Rudjer Boskovic Institute + 'openorgs____::5f31346d444a7f06a28c880fb170b0f6', --Ghent University + 'openorgs____::2dbe47117fd5409f9c61620813456632', --University of Luxembourg + 'openorgs____::6445d7758d3a40c4d997953b6632a368', --National Institute of Informatics (NII) + 'openorgs____::b77c01aa15de3675da34277d48de2ec1', -- Valencia Catholic University Saint Vincent Martyr + 'openorgs____::7fe2f66cdc43983c6b24816bfe9cf6a0', -- Unviersity of Warsaw + 'openorgs____::15e7921fc50d9aa1229a82a84429419e', -- University Of Thessaly + 'openorgs____::11f7919dadc8f8a7251af54bba60c956', -- Technical University of Crete + 'openorgs____::84f0c5f5dbb6daf42748485924efde4b', -- University of Piraeus + 'openorgs____::4ac562f0376fce3539504567649cb373', -- University of Patras + 'openorgs____::3e8d1f8c3f6cd7f418b09f1f58b4873b', -- Aristotle University of Thessaloniki + 'openorgs____::3fcef6e1c469c10f2a84b281372c9814', -- World Bank + 'openorgs____::1698a2eb1885ef8adb5a4a969e745ad3', -- École des Ponts ParisTech + 'openorgs____::e15adb13c4dadd49de4d35c39b5da93a', -- Nanyang Technological University + 'openorgs____::4b34103bde246228fcd837f5f1bf4212', -- Autonomous University of Barcelona + 'openorgs____::72ec75fcfc4e0df1a76dc4c49007fceb', -- McMaster University + 'openorgs____::51c7fc556e46381734a25a6fbc3fd398', -- University of Modena and Reggio Emilia + 'openorgs____::235d7f9ad18ecd7e6dc62ea4990cb9db', -- Bilkent University + 'openorgs____::31f2fa9e05b49d4cf40a19c3fed8eb06', -- Saints Cyril and Methodius University of Skopje + 'openorgs____::db7686f30f22cbe73a4fde872ce812a6', -- University of Milan + 'openorgs____::b8b8ca674452579f3f593d9f5e557483', -- University College Cork + 'openorgs____::38d7097854736583dde879d12dacafca', -- Brown University + 'openorgs____::57784c9e047e826fefdb1ef816120d92', --Arts et Métiers ParisTech + 'openorgs____::2530baca8a15936ba2e3297f2bce2e7e', -- University of Cape Town + 'openorgs____::d11f981828c485cd23d93f7f24f24db1', -- Technological University Dublin + 'openorgs____::5e6bf8962665cdd040341171e5c631d8', -- Delft University of Technology + 'openorgs____::846cb428d3f52a445f7275561a7beb5d', -- University of Manitoba + 'openorgs____::eb391317ed0dc684aa81ac16265de041', -- Universitat Rovira i Virgili + 'openorgs____::66aa9fc2fceb271423dfabcc38752dc0', -- Lund University + 'openorgs____::3cff625a4370d51e08624cc586138b2f', -- IMT Atlantique + 'openorgs____::c0b262bd6eab819e4c994914f9c010e2', -- National Institute of Geophysics and Volcanology + 'openorgs____::1624ff7c01bb641b91f4518539a0c28a', -- Vrije Universiteit Amsterdam + 'openorgs____::4d4051b56708688235252f1d8fddb8c1', -- Iscte - Instituto Universitário de Lisboa + 'openorgs____::ab4ac74c35fa5dada770cf08e5110fab', -- Universidade Católica Portuguesa + 'openorgs____::4d4051b56708688235252f1d8fddb8c1', -- Iscte - Instituto Universitário de Lisboa + 'openorgs____::5d55fb216b14691cf68218daf5d78cd9', -- Munster Technological University + 'openorgs____::0fccc7640f0cb44d5cd1b06b312a06b9', -- Cardiff University + 'openorgs____::8839b55dae0c84d56fd533f52d5d483a', -- Leibniz Institute of Ecological Urban and Regional Development + 'openorgs____::526468206bca24c1c90da6a312295cf4', -- Cyprus University of Technology + 'openorgs____::b5ca9d4340e26454e367e2908ef3872f', -- Alma Mater Studiorum University of Bologna + 'openorgs____::a6340e6ecf60f6bba163659df985b0f2' -- TU Dresden + ))) foo; + +--ANALYZE TABLE TARGET.result COMPUTE STATISTICS; + +create view if not exists TARGET.category as select * from SOURCE.category; +create view if not exists TARGET.concept as select * from SOURCE.concept; +create view if not exists TARGET.context as select * from SOURCE.context; +create view if not exists TARGET.country as select * from SOURCE.country; +create view if not exists TARGET.countrygdp as select * from SOURCE.countrygdp; +create view if not exists TARGET.creation_date as select * from SOURCE.creation_date; +create view if not exists TARGET.funder as select * from SOURCE.funder; +create view if not exists TARGET.fundref as select * from SOURCE.fundref; +create view if not exists TARGET.rndexpenditure as select * from SOURCE.rndexpediture; +create view if not exists TARGET.rndgdpexpenditure as select * from SOURCE.rndgdpexpenditure; +create view if not exists TARGET.doctoratestudents as select * from SOURCE.doctoratestudents; +create view if not exists TARGET.totalresearchers as select * from SOURCE.totalresearchers; +create view if not exists TARGET.totalresearchersft as select * from SOURCE.totalresearchersft; +create view if not exists TARGET.hrrst as select * from SOURCE.hrrst; +--create view if not exists TARGET.graduatedoctorates as select * from SOURCE.graduatedoctorates; + +create table TARGET.result_citations stored as parquet as select * from SOURCE.result_citations orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_citations COMPUTE STATISTICS; + +create table TARGET.result_references_oc stored as parquet as select * from SOURCE.result_references_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_references_oc COMPUTE STATISTICS; + +create table TARGET.result_citations_oc stored as parquet as select * from SOURCE.result_citations_oc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_citations_oc COMPUTE STATISTICS; + +create table TARGET.result_classifications stored as parquet as select * from SOURCE.result_classifications orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_classifications COMPUTE STATISTICS; + +create table TARGET.result_apc stored as parquet as select * from SOURCE.result_apc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_apc COMPUTE STATISTICS; + +create table TARGET.result_concepts stored as parquet as select * from SOURCE.result_concepts orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_concepts COMPUTE STATISTICS; + +create table TARGET.result_datasources stored as parquet as select * from SOURCE.result_datasources orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_datasources COMPUTE STATISTICS; + +create table TARGET.result_fundercount stored as parquet as select * from SOURCE.result_fundercount orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_fundercount COMPUTE STATISTICS; + +create table TARGET.result_gold stored as parquet as select * from SOURCE.result_gold orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_gold COMPUTE STATISTICS; + +create table TARGET.result_greenoa stored as parquet as select * from SOURCE.result_greenoa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_greenoa COMPUTE STATISTICS; + +create table TARGET.result_languages stored as parquet as select * from SOURCE.result_languages orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_languages COMPUTE STATISTICS; + +create table TARGET.result_licenses stored as parquet as select * from SOURCE.result_licenses orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_licenses COMPUTE STATISTICS; + +create table TARGET.licenses_normalized STORED AS PARQUET as select * from SOURCE.licenses_normalized; +--ANALYZE TABLE TARGET.licenses_normalized COMPUTE STATISTICS; + +create table TARGET.result_oids stored as parquet as select * from SOURCE.result_oids orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_oids COMPUTE STATISTICS; + +create table TARGET.result_organization stored as parquet as select * from SOURCE.result_organization orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_organization COMPUTE STATISTICS; + +create table TARGET.result_peerreviewed stored as parquet as select * from SOURCE.result_peerreviewed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_peerreviewed COMPUTE STATISTICS; + +create table TARGET.result_pids stored as parquet as select * from SOURCE.result_pids orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_pids COMPUTE STATISTICS; + +create table TARGET.result_projectcount stored as parquet as select * from SOURCE.result_projectcount orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_projectcount COMPUTE STATISTICS; + +create table TARGET.result_projects stored as parquet as select * from SOURCE.result_projects orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_projects COMPUTE STATISTICS; + +create table TARGET.result_refereed stored as parquet as select * from SOURCE.result_refereed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_refereed COMPUTE STATISTICS; + +create table TARGET.result_sources stored as parquet as select * from SOURCE.result_sources orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_sources COMPUTE STATISTICS; + +create table TARGET.result_topics stored as parquet as select * from SOURCE.result_topics orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_topics COMPUTE STATISTICS; + +create table TARGET.result_fos stored as parquet as select * from SOURCE.result_fos orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_fos COMPUTE STATISTICS; + +create table TARGET.result_accessroute stored as parquet as select * from SOURCE.result_accessroute orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_accessroute COMPUTE STATISTICS; + +create table TARGET.result_instance stored as parquet as select * from SOURCE.result_instance orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.result_orcid stored as parquet as select * from SOURCE.result_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); + + +create view TARGET.foo1 as select * from SOURCE.result_result rr where rr.source in (select id from TARGET.result); +create view TARGET.foo2 as select * from SOURCE.result_result rr where rr.target in (select id from TARGET.result); +create table TARGET.result_result STORED AS PARQUET as select distinct * from (select * from TARGET.foo1 union all select * from TARGET.foo2) foufou; +drop view TARGET.foo1; +drop view TARGET.foo2; +--ANALYZE TABLE TARGET.result_result COMPUTE STATISTICS; + +-- datasources +create view if not exists TARGET.datasource as select * from SOURCE.datasource; +create view if not exists TARGET.datasource_oids as select * from SOURCE.datasource_oids; +create view if not exists TARGET.datasource_organizations as select * from SOURCE.datasource_organizations; +create view if not exists TARGET.datasource_sources as select * from SOURCE.datasource_sources; + +create table TARGET.datasource_results stored as parquet as select id as result, datasource as id from TARGET.result_datasources; +--ANALYZE TABLE TARGET.datasource_results COMPUTE STATISTICS; + +-- organizations +create view if not exists TARGET.organization as select * from SOURCE.organization; +create view if not exists TARGET.organization_datasources as select * from SOURCE.organization_datasources; +create view if not exists TARGET.organization_pids as select * from SOURCE.organization_pids; +create view if not exists TARGET.organization_projects as select * from SOURCE.organization_projects; +create view if not exists TARGET.organization_sources as select * from SOURCE.organization_sources; + +-- projects +create view if not exists TARGET.project as select * from SOURCE.project; +create view if not exists TARGET.project_oids as select * from SOURCE.project_oids; +create view if not exists TARGET.project_organizations as select * from SOURCE.project_organizations; +create view if not exists TARGET.project_resultcount as select * from SOURCE.project_resultcount; +create view if not exists TARGET.project_classification as select * from SOURCE.project_classification; +create view if not exists TARGET.project_organization_contribution as select * from SOURCE.project_organization_contribution; + +create table TARGET.project_results stored as parquet as select id as result, project as id from TARGET.result_projects; +--ANALYZE TABLE TARGET.project_results COMPUTE STATISTICS; + +-- indicators +-- Sprint 1 ---- +create table TARGET.indi_pub_green_oa stored as parquet as select * from SOURCE.indi_pub_green_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_green_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_grey_lit stored as parquet as select * from SOURCE.indi_pub_grey_lit orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_grey_lit COMPUTE STATISTICS; +create table TARGET.indi_pub_doi_from_crossref stored as parquet as select * from SOURCE.indi_pub_doi_from_crossref orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_doi_from_crossref COMPUTE STATISTICS; +-- Sprint 2 ---- +create table TARGET.indi_result_has_cc_licence stored as parquet as select * from SOURCE.indi_result_has_cc_licence orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_has_cc_licence COMPUTE STATISTICS; +create table TARGET.indi_result_has_cc_licence_url stored as parquet as select * from SOURCE.indi_result_has_cc_licence_url orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_has_cc_licence_url COMPUTE STATISTICS; +create table TARGET.indi_pub_has_abstract stored as parquet as select * from SOURCE.indi_pub_has_abstract orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_has_abstract COMPUTE STATISTICS; +create table TARGET.indi_result_with_orcid stored as parquet as select * from SOURCE.indi_result_with_orcid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_with_orcid COMPUTE STATISTICS; +---- Sprint 3 ---- +create table TARGET.indi_funded_result_with_fundref stored as parquet as select * from SOURCE.indi_funded_result_with_fundref orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_funded_result_with_fundref COMPUTE STATISTICS; +create view TARGET.indi_result_org_collab as select * from SOURCE.indi_result_org_collab; +create view TARGET.indi_result_org_country_collab as select * from SOURCE.indi_result_org_country_collab; +create view TARGET.indi_project_collab_org as select * from SOURCE.indi_project_collab_org; +create view TARGET.indi_project_collab_org_country as select * from SOURCE.indi_project_collab_org_country; +create view TARGET.indi_funder_country_collab as select * from SOURCE.indi_funder_country_collab; +create view TARGET.indi_result_country_collab as select * from SOURCE.indi_result_country_collab; +---- Sprint 4 ---- +create table TARGET.indi_pub_diamond stored as parquet as select * from SOURCE.indi_pub_diamond orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_diamond COMPUTE STATISTICS; +create table TARGET.indi_pub_in_transformative stored as parquet as select * from SOURCE.indi_pub_in_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_in_transformative COMPUTE STATISTICS; +create table TARGET.indi_pub_closed_other_open stored as parquet as select * from SOURCE.indi_pub_closed_other_open orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_closed_other_open COMPUTE STATISTICS; +---- Sprint 5 ---- +create table TARGET.indi_result_no_of_copies stored as parquet as select * from SOURCE.indi_result_no_of_copies orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_no_of_copies COMPUTE STATISTICS; +---- Sprint 6 ---- +create table TARGET.indi_pub_hybrid_oa_with_cc stored as parquet as select * from SOURCE.indi_pub_hybrid_oa_with_cc orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_hybrid_oa_with_cc COMPUTE STATISTICS; +create table TARGET.indi_pub_bronze_oa stored as parquet as select * from SOURCE.indi_pub_bronze_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_bronze_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads stored as parquet as select * from SOURCE.indi_pub_downloads orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_datasource stored as parquet as select * from SOURCE.indi_pub_downloads_datasource orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_datasource COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_year stored as parquet as select * from SOURCE.indi_pub_downloads_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_year COMPUTE STATISTICS; +create table TARGET.indi_pub_downloads_datasource_year stored as parquet as select * from SOURCE.indi_pub_downloads_datasource_year orig where exists (select 1 from TARGET.result r where r.id=orig.result_id); +--ANALYZE TABLE TARGET.indi_pub_downloads_datasource_year COMPUTE STATISTICS; +---- Sprint 7 ---- +create table TARGET.indi_pub_gold_oa stored as parquet as select * from SOURCE.indi_pub_gold_oa orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_gold_oa COMPUTE STATISTICS; +create table TARGET.indi_pub_hybrid stored as parquet as select * from SOURCE.indi_pub_hybrid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_hybrid COMPUTE STATISTICS; +create view TARGET.indi_org_fairness as select * from SOURCE.indi_org_fairness; +create view TARGET.indi_org_fairness_pub_pr as select * from SOURCE.indi_org_fairness_pub_pr; +create view TARGET.indi_org_fairness_pub_year as select * from SOURCE.indi_org_fairness_pub_year; +create view TARGET.indi_org_fairness_pub as select * from SOURCE.indi_org_fairness_pub; +create view TARGET.indi_org_fairness_year as select * from SOURCE.indi_org_fairness_year; +create view TARGET.indi_org_findable_year as select * from SOURCE.indi_org_findable_year; +create view TARGET.indi_org_findable as select * from SOURCE.indi_org_findable; +create view TARGET.indi_org_openess as select * from SOURCE.indi_org_openess; +create view TARGET.indi_org_openess_year as select * from SOURCE.indi_org_openess_year; +create table TARGET.indi_pub_has_preprint stored as parquet as select * from SOURCE.indi_pub_has_preprint orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_has_preprint COMPUTE STATISTICS; +create table TARGET.indi_pub_in_subscribed stored as parquet as select * from SOURCE.indi_pub_in_subscribed orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_in_subscribed COMPUTE STATISTICS; +create table TARGET.indi_result_with_pid stored as parquet as select * from SOURCE.indi_result_with_pid orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_result_with_pid COMPUTE STATISTICS; +create table TARGET.indi_impact_measures stored as parquet as select * from SOURCE.indi_impact_measures orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_impact_measures COMPUTE STATISTICS; +create table TARGET.indi_pub_interdisciplinarity stored as parquet as select * from SOURCE.indi_pub_interdisciplinarity orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.indi_pub_interdisciplinarity COMPUTE STATISTICS; +create table TARGET.result_apc_affiliations stored as parquet as select * from SOURCE.result_apc_affiliations orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--ANALYZE TABLE TARGET.result_apc_affiliations COMPUTE STATISTICS; +--create table TARGET.indi_is_project_result_after stored as parquet as select * from SOURCE.indi_is_project_result_after orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--create table TARGET.indi_is_funder_plan_s stored as parquet as select * from SOURCE.indi_is_funder_plan_s orig where exists (select 1 from TARGET.result r where r.id=orig.id); +--create view TARGET.indi_funder_fairness as select * from SOURCE.indi_funder_fairness; +--create view TARGET.indi_funder_openess as select * from SOURCE.indi_funder_openess; +--create view TARGET.indi_funder_findable as select * from SOURCE.indi_funder_findable; +--create view TARGET.indi_ris_fairness as select * from SOURCE.indi_ris_fairness; +--create view TARGET.indi_ris_openess as select * from SOURCE.indi_ris_openess; +--create view TARGET.indi_ris_findable as select * from SOURCE.indi_ris_findable; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB_institutions.sql b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB_institutions.sql new file mode 100644 index 000000000..5ab8c88b5 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/scripts/updateMonitorDB_institutions.sql @@ -0,0 +1,67 @@ +drop database if exists TARGET cascade; +create database if not exists TARGET; + +create table TARGET.result stored as parquet as + select distinct * from ( + select * from SOURCE.result r where exists (select 1 from SOURCE.result_organization ro where ro.id=r.id and ro.organization in ( + 'openorgs____::b84450f9864182c67b8611b5593f4250', --"Athena Research and Innovation Center In Information Communication & Knowledge Technologies', --ARC" + 'openorgs____::d41cf6bd4ab1b1362a44397e0b95c975', --National Research Council + 'openorgs____::d2a09b9d5eabb10c95f9470e172d05d2', --??? Not exists ?? + 'openorgs____::d169c7407dd417152596908d48c11460', --Masaryk University + 'openorgs____::1ec924b1759bb16d0a02f2dad8689b21', --University of Belgrade + 'openorgs____::0ae431b820e4c33db8967fbb2b919150', --University of Helsinki + 'openorgs____::759d59f05d77188faee99b7493b46805', --University of Minho + 'openorgs____::cad284878801b9465fa51a95b1d779db', --Universidad Politécnica de Madrid + 'openorgs____::eadc8da90a546e98c03f896661a2e4d4', --University of Göttingen + 'openorgs____::c0286313e36479eff8676dba9b724b40', --National and Kapodistrian University of Athens + -- 'openorgs____::c80a8243a5e5c620d7931c88d93bf17a', --Université Paris Diderot + 'openorgs____::c08634f0a6b0081c3dc6e6c93a4314f3', --Bielefeld University + 'openorgs____::6fc85e4a8f7ecaf4b0c738d010e967ea', --University of Southern Denmark + 'openorgs____::3d6122f87f9a97a99d8f6e3d73313720', --Humboldt-Universität zu Berlin + 'openorgs____::16720ada63d0fa8ca41601feae7d1aa5', --TU Darmstadt + 'openorgs____::ccc0a066b56d2cfaf90c2ae369df16f5', --KU Leuven + 'openorgs____::4c6f119632adf789746f0a057ed73e90', --University of the Western Cape + 'openorgs____::ec3665affa01aeafa28b7852c4176dbd', --Rudjer Boskovic Institute + 'openorgs____::5f31346d444a7f06a28c880fb170b0f6', --Ghent University + 'openorgs____::2dbe47117fd5409f9c61620813456632', --University of Luxembourg + 'openorgs____::6445d7758d3a40c4d997953b6632a368', --National Institute of Informatics (NII) + 'openorgs____::b77c01aa15de3675da34277d48de2ec1', -- Valencia Catholic University Saint Vincent Martyr + 'openorgs____::7fe2f66cdc43983c6b24816bfe9cf6a0', -- Unviersity of Warsaw + 'openorgs____::15e7921fc50d9aa1229a82a84429419e', -- University Of Thessaly + 'openorgs____::11f7919dadc8f8a7251af54bba60c956', -- Technical University of Crete + 'openorgs____::84f0c5f5dbb6daf42748485924efde4b', -- University of Piraeus + 'openorgs____::4ac562f0376fce3539504567649cb373', -- University of Patras + 'openorgs____::3e8d1f8c3f6cd7f418b09f1f58b4873b', -- Aristotle University of Thessaloniki + 'openorgs____::3fcef6e1c469c10f2a84b281372c9814', -- World Bank + 'openorgs____::1698a2eb1885ef8adb5a4a969e745ad3', -- École des Ponts ParisTech + 'openorgs____::e15adb13c4dadd49de4d35c39b5da93a', -- Nanyang Technological University + 'openorgs____::4b34103bde246228fcd837f5f1bf4212', -- Autonomous University of Barcelona + 'openorgs____::72ec75fcfc4e0df1a76dc4c49007fceb', -- McMaster University + 'openorgs____::51c7fc556e46381734a25a6fbc3fd398', -- University of Modena and Reggio Emilia + 'openorgs____::235d7f9ad18ecd7e6dc62ea4990cb9db', -- Bilkent University + 'openorgs____::31f2fa9e05b49d4cf40a19c3fed8eb06', -- Saints Cyril and Methodius University of Skopje + 'openorgs____::db7686f30f22cbe73a4fde872ce812a6', -- University of Milan + 'openorgs____::b8b8ca674452579f3f593d9f5e557483', -- University College Cork + 'openorgs____::38d7097854736583dde879d12dacafca', -- Brown University + 'openorgs____::57784c9e047e826fefdb1ef816120d92', --Arts et Métiers ParisTech + 'openorgs____::2530baca8a15936ba2e3297f2bce2e7e', -- University of Cape Town + 'openorgs____::d11f981828c485cd23d93f7f24f24db1', -- Technological University Dublin + 'openorgs____::5e6bf8962665cdd040341171e5c631d8', -- Delft University of Technology + 'openorgs____::846cb428d3f52a445f7275561a7beb5d', -- University of Manitoba + 'openorgs____::eb391317ed0dc684aa81ac16265de041', -- Universitat Rovira i Virgili + 'openorgs____::66aa9fc2fceb271423dfabcc38752dc0', -- Lund University + 'openorgs____::3cff625a4370d51e08624cc586138b2f', -- IMT Atlantique + 'openorgs____::c0b262bd6eab819e4c994914f9c010e2', -- National Institute of Geophysics and Volcanology + 'openorgs____::1624ff7c01bb641b91f4518539a0c28a', -- Vrije Universiteit Amsterdam + 'openorgs____::4d4051b56708688235252f1d8fddb8c1', --Iscte - Instituto Universitário de Lisboa + 'openorgs____::ab4ac74c35fa5dada770cf08e5110fab', -- Universidade Católica Portuguesa + 'openorgs____::4d4051b56708688235252f1d8fddb8c1', -- Iscte - Instituto Universitário de Lisboa + 'openorgs____::5d55fb216b14691cf68218daf5d78cd9', -- Munster Technological University + 'openorgs____::0fccc7640f0cb44d5cd1b06b312a06b9', -- Cardiff University + 'openorgs____::8839b55dae0c84d56fd533f52d5d483a', -- Leibniz Institute of Ecological Urban and Regional Development + 'openorgs____::526468206bca24c1c90da6a312295cf4', -- Cyprus University of Technology + 'openorgs____::b5ca9d4340e26454e367e2908ef3872f', -- Alma Mater Studiorum University of Bologna + 'openorgs____::a6340e6ecf60f6bba163659df985b0f2' -- TU Dresden + ))) foo; + +--ANALYZE TABLE TARGET.result COMPUTE STATISTICS; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/workflow.xml b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/workflow.xml new file mode 100644 index 000000000..0542c0c87 --- /dev/null +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/workflow.xml @@ -0,0 +1,111 @@ + + + + stats_db_name + the target stats database name + + + monitor_db_name + the target monitor db name + + + monitor_db_shadow_name + the name of the shadow monitor db + + + hive_metastore_uris + hive server metastore URIs + + + hive_jdbc_url + hive server jdbc url + + + hive_timeout + the time period, in seconds, after which Hive fails a transaction if a Hive client has not sent a hearbeat. The default value is 300 seconds. + + + hadoop_user_name + user name of the wf owner + + + + + ${jobTracker} + ${nameNode} + + + hive.metastore.uris + ${hive_metastore_uris} + + + hive.txn.timeout + ${hive_timeout} + + + mapred.job.queue.name + analytics + + + + + + + + ${wf:conf('resumeFrom') eq 'Step1-updateMonitorDB'} + ${wf:conf('resumeFrom') eq 'Step2-copyDataToImpalaCluster'} + ${wf:conf('resumeFrom') eq 'Step3-finalizeImpalaCluster'} + + + + + + Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}] + + + + + ${jobTracker} + ${nameNode} + monitor.sh + ${stats_db_name} + ${monitor_db_name} + ${monitor_db_shadow_name} + ${wf:appPath()}/scripts/updateMonitorDB_institutions.sql + ${wf:appPath()}/scripts/updateMonitorDB.sql + ${wf:appPath()}/scripts/updateMonitorDBAll.sql + monitor.sh + + + + + + + + ${jobTracker} + ${nameNode} + copyDataToImpalaCluster.sh + ${monitor_db_name} + ${hadoop_user_name} + copyDataToImpalaCluster.sh + + + + + + + + ${jobTracker} + ${nameNode} + finalizeImpalaCluster.sh + ${monitor_db_name} + ${monitor_db_prod_name} + ${monitor_db_shadow_name} + finalizeImpalaCluster.sh + + + + + + + diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/contexts.sh b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/contexts.sh index a436d0380..971b0da3f 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/contexts.sh +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/contexts.sh @@ -35,12 +35,20 @@ export HADOOP_USER="oozie" export HADOOP_USER_NAME="oozie" echo "Creating and populating impala tables" -hive $HIVE_OPTS -e "create table ${TARGET_DB}.context (id string, name string) row format delimited fields terminated by ','" -hive $HIVE_OPTS -e "create table ${TARGET_DB}.category (context string, id string, name string) row format delimited fields terminated by ','" -hive $HIVE_OPTS -e "create table ${TARGET_DB}.concept (category string, id string, name string) row format delimited fields terminated by ','" -hive $HIVE_OPTS -e "load data inpath '${TMP}/contexts.csv' into table ${TARGET_DB}.context" -hive $HIVE_OPTS -e "load data inpath '${TMP}/categories.csv' into table ${TARGET_DB}.category" -hive $HIVE_OPTS -e "load data inpath '${TMP}/concepts.csv' into table ${TARGET_DB}.concept" +hive $HIVE_OPTS -e "create table ${TARGET_DB}.context_csv (id string, name string) row format delimited fields terminated by ','" +hive $HIVE_OPTS -e "load data inpath '${TMP}/contexts.csv' into table ${TARGET_DB}.context_csv" +hive $HIVE_OPTS -e "create table ${TARGET_DB}.context stored as parquet as select * from ${TARGET_DB}.context_csv" +hive $HIVE_OPTS -e "drop table ${TARGET_DB}.context_csv purge" + +hive $HIVE_OPTS -e "create table ${TARGET_DB}.category_csv (context string, id string, name string) row format delimited fields terminated by ','" +hive $HIVE_OPTS -e "load data inpath '${TMP}/categories.csv' into table ${TARGET_DB}.category_csv" +hive $HIVE_OPTS -e "create table ${TARGET_DB}.category stored as parquet as select * from ${TARGET_DB}.category_csv" +hive $HIVE_OPTS -e "drop table ${TARGET_DB}.category_csv purge" + +hive $HIVE_OPTS -e "create table ${TARGET_DB}.concept_csv (category string, id string, name string) row format delimited fields terminated by ','" +hive $HIVE_OPTS -e "load data inpath '${TMP}/concepts.csv' into table ${TARGET_DB}.concept_csv" +hive $HIVE_OPTS -e "create table ${TARGET_DB}.concept stored as parquet as select * from ${TARGET_DB}.concept_csv" +hive $HIVE_OPTS -e "drop table ${TARGET_DB}.concept_csv purge" echo "Cleaning up" rm concepts.csv diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh index 18ff6dca8..c2324b912 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh @@ -6,68 +6,215 @@ then ln -sfn ${PYTHON_EGG_CACHE}${link_folder} ${link_folder} fi + +# Set the active HDFS node of OCEAN and IMPALA cluster. +OCEAN_HDFS_NODE='hdfs://nameservice1' +echo -e "\nOCEAN HDFS virtual-name which resolves automatically to the active-node: ${OCEAN_HDFS_NODE}" + +IMPALA_HDFS_NODE='' +COUNTER=0 +while [ $COUNTER -lt 3 ]; do + if hdfs dfs -test -e hdfs://impala-cluster-mn1.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn1.openaire.eu:8020' + break + elif hdfs dfs -test -e hdfs://impala-cluster-mn2.openaire.eu/tmp >/dev/null 2>&1; then + IMPALA_HDFS_NODE='hdfs://impala-cluster-mn2.openaire.eu:8020' + break + else + IMPALA_HDFS_NODE='' + sleep 1 + fi + ((COUNTER++)) +done +if [ -z "$IMPALA_HDFS_NODE" ]; then + echo -e "\n\nERROR: PROBLEM WHEN SETTING THE HDFS-NODE FOR IMPALA CLUSTER! | AFTER ${COUNTER} RETRIES.\n\n" + exit 1 +fi +echo -e "Active IMPALA HDFS Node: ${IMPALA_HDFS_NODE} , after ${COUNTER} retries.\n\n" + +IMPALA_HOSTNAME='impala-cluster-dn1.openaire.eu' +IMPALA_CONFIG_FILE='/etc/impala_cluster/hdfs-site.xml' + +IMPALA_HDFS_DB_BASE_PATH="${IMPALA_HDFS_NODE}/user/hive/warehouse" + +# Set sed arguments. +LOCATION_HDFS_NODE_SED_ARG="s|${OCEAN_HDFS_NODE}|${IMPALA_HDFS_NODE}|g" # This requires to be used with "sed -e" in order to have the "|" delimiter (as the "/" conflicts with the URIs) + +# Set the SED command arguments for column-names with reserved words: +DATE_SED_ARG_1='s/[[:space:]]\date[[:space:]]/\`date\`/g' +DATE_SED_ARG_2='s/\.date,/\.\`date\`,/g' # the "date" may be part of a larger field name like "datestamp" or "date_aggregated", so we need to be careful with what we are replacing. +DATE_SED_ARG_3='s/\.date[[:space:]]/\.\`date\` /g' + +HASH_SED_ARG_1='s/[[:space:]]\hash[[:space:]]/\`hash\`/g' +HASH_SED_ARG_2='s/\.hash,/\.\`hash\`,/g' +HASH_SED_ARG_3='s/\.hash[[:space:]]/\.\`hash\` /g' + +LOCATION_SED_ARG_1='s/[[:space:]]\location[[:space:]]/\`location\`/g' +LOCATION_SED_ARG_2='s/\.location,/\.\`location\`,/g' +LOCATION_SED_ARG_3='s/\.location[[:space:]]/\.\`location\` /g' + + export HADOOP_USER_NAME=$6 export PROD_USAGE_STATS_DB="openaire_prod_usage_stats" + + function copydb() { db=$1 - FILE=("hive_wf_tmp_"$RANDOM) - hdfs dfs -mkdir hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/ - # copy the databases from ocean to impala + echo -e "\nStart processing db: '${db}'..\n" - echo "copying $db" - hadoop distcp -Dmapreduce.map.memory.mb=6144 -pb hdfs://nameservice1/user/hive/warehouse/${db}.db hdfs://impala-cluster-mn1.openaire.eu:8020/tmp/$FILE/ + # Delete the old DB from Impala cluster (if exists). + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "drop database if exists ${db} cascade" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" + rm -f error.log + return 1 + fi - # change ownership to impala - hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -chmod -R 777 /tmp/$FILE/${db}.db + # Make Impala aware of the deletion of the old DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - # drop tables from db - for i in `impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`; - do - `impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "drop table $i;"`; - done + echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" + # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s + # Using max memory of: 50 * 6144 = 300 Gb + # Using 1MB as a buffer-size. + # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The "ug" args cannot be used as we get a "User does not belong to hive" error. + # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. + hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ + -numListstatusThreads 40 \ + -copybuffersize 1048576 \ + -strategy dynamic \ + -pb \ + ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} - # drop views from db - for i in `impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`; - do - `impala-shell -i impala-cluster-dn1.openaire.eu -d ${db} -q "drop view $i;"`; - done + # Check the exit status of the "hadoop distcp" command. + if [ $? -eq 0 ]; then + echo -e "\nSuccessfully copied the files of '${db}'.\n" + else + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + rm -f error.log + return 2 + fi - # delete the database - impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -q "drop database if exists ${db} cascade"; + # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. + #hdfs dfs -conf ${IMPALA_CONFIG_FILE} -chmod -R 777 ${TEMP_SUBDIR_FULLPATH}/${db}.db - # create the databases - impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -q "create database ${db}"; + echo -e "\nCreating schema for db: '${db}'\n" - impala-shell --user $HADOOP_USER_NAME -q "INVALIDATE METADATA" - echo "creating schema for ${db}" - for (( k = 0; k < 5; k ++ )); do - for i in `impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show tables"`; - do - impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show create table $i"; - done | sed 's/"$/;/' | sed 's/^"//' | sed 's/[[:space:]]\date[[:space:]]/`date`/g' | impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -c -f - + # create the new database (with the same name) + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" + + # Make Impala aware of the creation of the new DB immediately. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. + # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. + + all_create_view_statements=() + + entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. + for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Check if this is a view by showing the create-statement where it should print "create view" for a view, not the "create table". Unfortunately, there is no "show views" command. + create_entity_statement=`hive -e "show create table ${db}.${i};"` # It needs to happen in two stages, otherwise the "grep" is not able to match multi-line statement. + + create_view_statement_test=`echo -e "$create_entity_statement" | grep 'CREATE VIEW'` + if [ -n "$create_view_statement_test" ]; then + echo -e "\n'${i}' is a view, so we will save its 'create view' statement and execute it on Impala, after all tables have been created.\n" + create_view_statement=`echo -e "$create_entity_statement" | sed 's/WARN:.*//g' | sed 's/\`//g' \ + | sed 's/"$/;/' | sed 's/^"//' | sed 's/\\"\\"/\"/g' | sed -e "${LOCATION_HDFS_NODE_SED_ARG}" | sed "${DATE_SED_ARG_1}" | sed "${HASH_SED_ARG_1}" | sed "${LOCATION_SED_ARG_1}" \ + | sed "${DATE_SED_ARG_2}" | sed "${HASH_SED_ARG_2}" | sed "${LOCATION_SED_ARG_2}" \ + | sed "${DATE_SED_ARG_3}" | sed "${HASH_SED_ARG_3}" | sed "${LOCATION_SED_ARG_3}"` + all_create_view_statements+=("$create_view_statement") + else + echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` + if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. + echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + else + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log + log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` + if [ -n "$log_errors" ]; then + echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN CREATING TABLE '${i}'!\n\n" + fi + fi + fi done -# for i in `impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show tables"`; -# do -# impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show create table $i"; -# done | sed 's/"$/;/' | sed 's/^"//' | sed 's/[[:space:]]\date[[:space:]]/`date`/g' | impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -c -f - -# -# # run the same command twice because we may have failures in the first run (due to views pointing to the same db) -# for i in `impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show tables"`; -# do -# impala-shell --user $HADOOP_USER_NAME -d ${db} --delimited -q "show create table $i"; -# done | sed 's/"$/;/' | sed 's/^"//' | sed 's/[[:space:]]\date[[:space:]]/`date`/g' | impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -c -f - + echo -e "\nAll tables have been created, going to create the views..\n" - # load the data from /tmp in the respective tables - echo "copying data in tables and computing stats" - for i in `impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -d ${db} --delimited -q "show tables"`; - do - impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -d ${db} -q "load data inpath '/tmp/$FILE/${db}.db/$i' into table $i"; - impala-shell --user $HADOOP_USER_NAME -i impala-cluster-dn1.openaire.eu -d ${db} -q "compute stats $i"; - done + # Time to loop through the views and create them. + # At this point all table-schemas should have been created. - # deleting the remaining directory from hdfs -hdfs dfs -conf /etc/impala_cluster/hdfs-site.xml -rm -R /tmp/$FILE/${db}.db + previous_num_of_views_to_retry=${#all_create_view_statements} + if [[ $previous_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG + # Make Impala aware of the new tables, so it knows them when creating the views. + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + else + echo -e "\nDB '${db}' does not contain any views.\n" + fi + + level_counter=0 + while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + ((level_counter++)) + # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. + # In this case, we should retry creating this particular view again. + should_retry_create_view_statements=() + + for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later + specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` + if [ -n "$specific_errors" ]; then + echo -e "\nspecific_errors: ${specific_errors}\n" + echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" + should_retry_create_view_statements+=("$create_view_statement") + else + sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. + fi + done + + new_num_of_views_to_retry=${#should_retry_create_view_statements} + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then + echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" + return 3 + elif [[ $new_num_of_views_to_retry -gt 0 ]]; then + echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" + previous_num_of_views_to_retry=$new_num_of_views_to_retry + else + echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" + fi + all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + done + + sleep 1 + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" + sleep 1 + + echo -e "\nComputing stats for tables..\n" + entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` + for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. + # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. + create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. + if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; + fi + done + + if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then + echo -e "\nAll entities have been copied to Impala cluster.\n" + else + echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" + rm -f error.log + return 4 + fi + + rm -f error.log + echo -e "\n\nFinished processing db: ${db}\n\n" } STATS_DB=$1 diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/monitor.sh b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/monitor.sh index 872456973..a5b6a54cb 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/monitor.sh +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/monitor.sh @@ -85,12 +85,12 @@ hive $HIVE_OPTS --database ${2}_funded -e "show tables" | grep -v WARN | sed "s/ hive -f foo echo "Updated shadow monitor funded database" -echo "Updating shadow monitor insitutions database" +echo "Updating shadow monitor institutions database" hive -e "drop database if exists ${SHADOW}_institutions cascade" hive -e "create database if not exists ${SHADOW}_institutions" hive $HIVE_OPTS --database ${2}_institutions -e "show tables" | grep -v WARN | sed "s/\(.*\)/create view ${SHADOW}_institutions.\1 as select * from ${2}_institutions.\1;/" > foo hive -f foo -echo "Shadow db monitor insitutions ready!" +echo "Shadow db monitor institutions ready!" echo "Updating shadow monitor RIs database" for i in $contexts diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step13.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step13.sql index 6493fa7d0..8c1dbdc4d 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step13.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step13.sql @@ -69,7 +69,7 @@ SELECT * FROM ${stats_db_name}.otherresearchproduct_sources; DROP TABLE IF EXISTS ${stats_db_name}.result_orcid purge; CREATE TABLE IF NOT EXISTS ${stats_db_name}.result_orcid STORED AS PARQUET as -select distinct res.id, regexp_replace(res.orcid, 'http://orcid.org/' ,'') as orcid +select distinct res.id, upper(regexp_replace(res.orcid, 'http://orcid.org/' ,'')) as orcid from ( SELECT substr(res.id, 4) as id, auth_pid.value as orcid FROM ${openaire_db_name}.result res diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql index 066b197e6..ce6b6cc2f 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql @@ -7,32 +7,76 @@ ------------------------------------------------------ DROP TABLE IF EXISTS ${stats_db_name}.publication_refereed purge; - CREATE TABLE IF NOT EXISTS ${stats_db_name}.publication_refereed STORED AS PARQUET as -select substr(r.id, 4) as id, inst.refereed.classname as refereed -from ${openaire_db_name}.publication r lateral view explode(r.instance) instances as inst -where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE; +with peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.publication r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='peerReviewed'), +non_peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.publication r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='nonPeerReviewed') +select distinct * +from ( + select peer_reviewed.* from peer_reviewed + union all + select non_peer_reviewed.* from non_peer_reviewed + left join peer_reviewed on peer_reviewed.id=non_peer_reviewed.id + where peer_reviewed.id is null) pr; DROP TABLE IF EXISTS ${stats_db_name}.dataset_refereed purge; - CREATE TABLE IF NOT EXISTS ${stats_db_name}.dataset_refereed STORED AS PARQUET as -select substr(r.id, 4) as id, inst.refereed.classname as refereed -from ${openaire_db_name}.dataset r lateral view explode(r.instance) instances as inst -where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE; +with peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.dataset r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='peerReviewed'), +non_peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.dataset r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='nonPeerReviewed') +select distinct * +from ( + select peer_reviewed.* from peer_reviewed + union all + select non_peer_reviewed.* from non_peer_reviewed + left join peer_reviewed on peer_reviewed.id=non_peer_reviewed.id + where peer_reviewed.id is null) pr; DROP TABLE IF EXISTS ${stats_db_name}.software_refereed purge; - CREATE TABLE IF NOT EXISTS ${stats_db_name}.software_refereed STORED AS PARQUET as -select substr(r.id, 4) as id, inst.refereed.classname as refereed -from ${openaire_db_name}.software r lateral view explode(r.instance) instances as inst -where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE; +with peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.software r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='peerReviewed'), +non_peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.software r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='nonPeerReviewed') +select distinct * +from ( + select peer_reviewed.* from peer_reviewed + union all + select non_peer_reviewed.* from non_peer_reviewed + left join peer_reviewed on peer_reviewed.id=non_peer_reviewed.id + where peer_reviewed.id is null) pr; DROP TABLE IF EXISTS ${stats_db_name}.otherresearchproduct_refereed purge; - CREATE TABLE IF NOT EXISTS ${stats_db_name}.otherresearchproduct_refereed STORED AS PARQUET as -select substr(r.id, 4) as id, inst.refereed.classname as refereed -from ${openaire_db_name}.otherresearchproduct r lateral view explode(r.instance) instances as inst -where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE; +with peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.otherresearchproduct r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='peerReviewed'), +non_peer_reviewed as ( + select distinct substr(r.id, 4) as id, inst.refereed.classname as refereed + from ${openaire_db_name}.otherresearchproduct r lateral view explode(r.instance) instances as inst + where r.datainfo.deletedbyinference=false and r.datainfo.invisible = FALSE and inst.refereed.classname='nonPeerReviewed') +select distinct * +from ( + select peer_reviewed.* from peer_reviewed + union all + select non_peer_reviewed.* from non_peer_reviewed + left join peer_reviewed on peer_reviewed.id=non_peer_reviewed.id + where peer_reviewed.id is null) pr; CREATE VIEW IF NOT EXISTS ${stats_db_name}.result_refereed as select * from ${stats_db_name}.publication_refereed @@ -60,4 +104,4 @@ rel.properties[1].value apc_currency from ${openaire_db_name}.relation rel join ${openaire_db_name}.organization o on o.id=rel.source join ${openaire_db_name}.result r on r.id=rel.target -where rel.subreltype = 'affiliation' and rel.datainfo.deletedbyinference = false and size(rel.properties)>0; +where rel.subreltype = 'affiliation' and rel.datainfo.deletedbyinference = false and size(rel.properties)>0; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql index 2c606fb92..6ed686a05 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql @@ -50,13 +50,13 @@ select distinct r.* from ( select substr(r.id, 4) as id, inst.accessright.classname as accessright, inst.accessright.openaccessroute as accessright_uw, substr(inst.collectedfrom.key, 4) as collectedfrom, substr(inst.hostedby.key, 4) as hostedby, inst.dateofacceptance.value as dateofacceptance, inst.license.value as license, p.qualifier.classname as pidtype, p.value as pid - from ${openaire_db_name}.result r lateral view explode(r.instance) instances as inst lateral view explode(inst.pid) pids as p) r + from ${openaire_db_name}.result r lateral view explode(r.instance) instances as inst lateral view outer explode(inst.pid) pids as p) r join ${stats_db_name}.result res on res.id=r.id; DROP TABLE IF EXISTS ${stats_db_name}.result_apc purge; create table if not exists ${stats_db_name}.result_apc STORED AS PARQUET as -select r.id, r.amount, r.currency +select distinct r.id, r.amount, r.currency from ( select substr(r.id, 4) as id, cast(inst.processingchargeamount.value as float) as amount, inst.processingchargecurrency.value as currency from ${openaire_db_name}.result r lateral view explode(r.instance) instances as inst) r diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index f13b2500c..ca5efccce 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -1,390 +1,255 @@ -- Sprint 1 ---- drop table if exists ${stats_db_name}.indi_pub_green_oa purge; /*EOS*/ - ---create table if not exists ${stats_db_name}.indi_pub_green_oa stored as parquet as ---select distinct p.id, coalesce(green_oa, 0) as green_oa ---from ${stats_db_name}.publication p --- left outer join ( --- select p.id, 1 as green_oa --- from ${stats_db_name}.publication p --- join ${stats_db_name}.result_instance ri on ri.id = p.id --- join ${stats_db_name}.datasource on datasource.id = ri.hostedby --- where datasource.type like '%Repository%' --- and (ri.accessright = 'Open Access' --- or ri.accessright = 'Embargo' or ri.accessright = 'Open Source')) tmp --- on p.id= tmp.id; create table if not exists ${stats_db_name}.indi_pub_green_oa stored as parquet as select distinct p.id, coalesce(green_oa, 0) as green_oa from ${stats_db_name}.publication p - left outer join ( +left outer join ( select p.id, 1 as green_oa from ${stats_db_name}.publication p - join ${stats_db_name}.result_instance ri on ri.id = p.id - join ${stats_db_name}.datasource on datasource.id = ri.hostedby - where datasource.type like '%Repository%' - and (ri.accessright = 'Open Access' - or ri.accessright = 'Embargo' or ri.accessright = 'Open Source') and datasource.name!='Other') tmp - on p.id= tmp.id; /*EOS*/ + join ${stats_db_name}.result_instance ri on ri.id = p.id + join ${stats_db_name}.datasource on datasource.id = ri.hostedby + where datasource.type like '%Repository%' and (ri.accessright = 'Open Access' or ri.accessright = 'Embargo' or ri.accessright = 'Open Source') and datasource.name!='Other') tmp on p.id= tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_grey_lit purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_grey_lit stored as parquet as select distinct p.id, coalesce(grey_lit, 0) as grey_lit from ${stats_db_name}.publication p - left outer join ( +left outer join ( select p.id, 1 as grey_lit from ${stats_db_name}.publication p - join ${stats_db_name}.result_classifications rt on rt.id = p.id - where rt.type not in ('Article','Part of book or chapter of book','Book','Doctoral thesis','Master thesis','Data Paper', 'Thesis', 'Bachelor thesis', 'Conference object') and - not exists (select 1 from ${stats_db_name}.result_classifications rc where type ='Other literature type' - and rc.id=p.id)) tmp on p.id=tmp.id; /*EOS*/ + join ${stats_db_name}.result_classifications rt on rt.id = p.id + where rt.type not in ('Article','Part of book or chapter of book','Book','Doctoral thesis','Master thesis','Data Paper', 'Thesis', 'Bachelor thesis', 'Conference object') + and not exists (select 1 from ${stats_db_name}.result_classifications rc where type ='Other literature type' and rc.id=p.id)) tmp on p.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_doi_from_crossref purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_doi_from_crossref stored as parquet as select distinct p.id, coalesce(doi_from_crossref, 0) as doi_from_crossref from ${stats_db_name}.publication p - left outer join - (select ri.id, 1 as doi_from_crossref from ${stats_db_name}.result_instance ri - join ${stats_db_name}.datasource d on d.id = ri.collectedfrom - where pidtype='Digital Object Identifier' and d.name ='Crossref') tmp - on tmp.id=p.id; /*EOS*/ +left outer join ( + select ri.id, 1 as doi_from_crossref from ${stats_db_name}.result_instance ri + join ${stats_db_name}.datasource d on d.id = ri.collectedfrom + where pidtype='Digital Object Identifier' and d.name ='Crossref') tmp on tmp.id=p.id; /*EOS*/ -- Sprint 2 ---- drop table if exists ${stats_db_name}.indi_result_has_cc_licence purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_has_cc_licence stored as parquet as select distinct r.id, (case when lic='' or lic is null then 0 else 1 end) as has_cc_license from ${stats_db_name}.result r -left outer join (select r.id, license.type as lic from ${stats_db_name}.result r - join ${stats_db_name}.result_licenses as license on license.id = r.id - where lower(license.type) LIKE '%creativecommons.org%' OR lower(license.type) LIKE '%cc-%') tmp - on r.id= tmp.id; /*EOS*/ +left outer join ( + select r.id, license.type as lic from ${stats_db_name}.result r + join ${stats_db_name}.result_licenses as license on license.id = r.id + where lower(license.type) LIKE '%creativecommons.org%' OR lower(license.type) LIKE '%cc %') tmp on r.id= tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_has_cc_licence_url purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_has_cc_licence_url stored as parquet as select distinct r.id, case when lic_host='' or lic_host is null then 0 else 1 end as has_cc_license_url from ${stats_db_name}.result r - left outer join (select r.id, lower(parse_url(license.type, "HOST")) as lic_host - from ${stats_db_name}.result r - join ${stats_db_name}.result_licenses as license on license.id = r.id - WHERE lower(parse_url(license.type, "HOST")) = "creativecommons.org") tmp - on r.id= tmp.id; /*EOS*/ +left outer join ( + select r.id, lower(parse_url(license.type, "HOST")) as lic_host + from ${stats_db_name}.result r + join ${stats_db_name}.result_licenses as license on license.id = r.id + WHERE lower(parse_url(license.type, "HOST")) = "creativecommons.org") tmp on r.id= tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_has_abstract purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_has_abstract stored as parquet as select distinct publication.id, cast(coalesce(abstract, true) as int) has_abstract from ${stats_db_name}.publication; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_with_orcid purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_with_orcid stored as parquet as select distinct r.id, coalesce(has_orcid, 0) as has_orcid from ${stats_db_name}.result r - left outer join (select id, 1 as has_orcid from ${stats_db_name}.result_orcid) tmp - on r.id= tmp.id; /*EOS*/ +left outer join ( + select id, 1 as has_orcid from ${stats_db_name}.result_orcid) tmp on r.id= tmp.id; /*EOS*/ + ---- Sprint 3 ---- - drop table if exists ${stats_db_name}.indi_funded_result_with_fundref purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_funded_result_with_fundref stored as parquet as select distinct r.result as id, coalesce(fundref, 0) as fundref from ${stats_db_name}.project_results r - left outer join (select distinct result, 1 as fundref from ${stats_db_name}.project_results - where provenance='Harvested') tmp - on r.result= tmp.result; /*EOS*/ - --- create table indi_result_org_collab stored as parquet as --- select o1.organization org1, o2.organization org2, count(distinct o1.id) as collaborations --- from result_organization as o1 --- join result_organization as o2 on o1.id=o2.id and o1.organization!=o2.organization --- group by o1.organization, o2.organization; --- --- compute stats indi_result_org_collab; --- -create TEMPORARY VIEW tmp AS SELECT ro.organization organization, ro.id, o.name from ${stats_db_name}.result_organization ro -join ${stats_db_name}.organization o on o.id=ro.organization where o.name is not null; /*EOS*/ +left outer join ( + select distinct result, 1 as fundref from ${stats_db_name}.project_results where provenance='Harvested') tmp on r.result= tmp.result; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_org_collab purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_org_collab stored as parquet as -select o1.organization org1, o1.name org1name1, o2.organization org2, o2.name org2name2, count(o1.id) as collaborations -from tmp as o1 -join tmp as o2 where o1.id=o2.id and o1.organization!=o2.organization and o1.name!=o2.name -group by o1.organization, o2.organization, o1.name, o2.name; /*EOS*/ - -DROP VIEW if exists tmp; /*EOS*/ - -create TEMPORARY VIEW tmp AS -select distinct ro.organization organization, ro.id, o.name, o.country from ${stats_db_name}.result_organization ro -join ${stats_db_name}.organization o on o.id=ro.organization where country <> 'UNKNOWN' and o.name is not null; /*EOS*/ + WITH tmp AS ( + SELECT ro.organization organization, ro.id, o.name + from ${stats_db_name}.result_organization ro + join ${stats_db_name}.organization o on o.id=ro.organization where o.name is not null) + select o1.organization org1, o1.name org1name1, o2.organization org2, o2.name org2name2, count(o1.id) as collaborations + from tmp as o1 + join tmp as o2 where o1.id=o2.id and o1.organization!=o2.organization and o1.name!=o2.name + group by o1.organization, o2.organization, o1.name, o2.name; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_org_country_collab purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_org_country_collab stored as parquet as -select o1.organization org1,o1.name org1name1, o2.country country2, count(o1.id) as collaborations -from tmp as o1 join tmp as o2 on o1.id=o2.id -where o1.id=o2.id and o1.country!=o2.country -group by o1.organization, o1.id, o1.name, o2.country; /*EOS*/ - -drop table if exists tmp purge; /*EOS*/ - -create TEMPORARY VIEW tmp AS -select o.id organization, o.name, ro.project as project from ${stats_db_name}.organization o - join ${stats_db_name}.organization_projects ro on o.id=ro.id where o.name is not null; /*EOS*/ + WITH tmp AS ( + select distinct ro.organization organization, ro.id, o.name, o.country + from ${stats_db_name}.result_organization ro + join ${stats_db_name}.organization o on o.id=ro.organization + where country <> 'UNKNOWN' and o.name is not null) + select o1.organization org1,o1.name org1name1, o2.country country2, count(o1.id) as collaborations + from tmp as o1 join tmp as o2 on o1.id=o2.id + where o1.id=o2.id and o1.country!=o2.country + group by o1.organization, o1.id, o1.name, o2.country; /*EOS*/ drop table if exists ${stats_db_name}.indi_project_collab_org purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_project_collab_org stored as parquet as -select o1.organization org1,o1.name orgname1, o2.organization org2, o2.name orgname2, count(distinct o1.project) as collaborations -from tmp as o1 - join tmp as o2 on o1.project=o2.project -where o1.organization<>o2.organization and o1.name<>o2.name -group by o1.name,o2.name, o1.organization, o2.organization; /*EOS*/ - -DROP VIEW if exists tmp; /*EOS*/ - -create TEMPORARY VIEW tmp AS -select o.id organization, o.name, o.country , ro.project as project from ${stats_db_name}.organization o - join ${stats_db_name}.organization_projects ro on o.id=ro.id - and o.country <> 'UNKNOWN' and o.name is not null; /*EOS*/ + WITH tmp AS ( + select o.id organization, o.name, ro.project as project + from ${stats_db_name}.organization o + join ${stats_db_name}.organization_projects ro on o.id=ro.id where o.name is not null) + select o1.organization org1,o1.name orgname1, o2.organization org2, o2.name orgname2, count(distinct o1.project) as collaborations + from tmp as o1 + join tmp as o2 on o1.project=o2.project + where o1.organization<>o2.organization and o1.name<>o2.name + group by o1.name,o2.name, o1.organization, o2.organization; /*EOS*/ drop table if exists ${stats_db_name}.indi_project_collab_org_country purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_project_collab_org_country stored as parquet as -select o1.organization org1,o1.name org1name, o2.country country2, count(distinct o1.project) as collaborations -from tmp as o1 - join tmp as o2 on o1.project=o2.project -where o1.organization<>o2.organization and o1.country<>o2.country -group by o1.organization, o2.country, o1.name; /*EOS*/ - -DROP VIEW if exists tmp; /*EOS*/ + WITH tmp AS ( + select o.id organization, o.name, o.country , ro.project as project + from ${stats_db_name}.organization o + join ${stats_db_name}.organization_projects ro on o.id=ro.id and o.country <> 'UNKNOWN' and o.name is not null) + select o1.organization org1,o1.name org1name, o2.country country2, count(distinct o1.project) as collaborations + from tmp as o1 + join tmp as o2 on o1.project=o2.project + where o1.organization<>o2.organization and o1.country<>o2.country + group by o1.organization, o2.country, o1.name; /*EOS*/ drop table if exists ${stats_db_name}.indi_funder_country_collab purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_funder_country_collab stored as parquet as with tmp as (select funder, project, country from ${stats_db_name}.organization_projects op join ${stats_db_name}.organization o on o.id=op.id join ${stats_db_name}.project p on p.id=op.project where country <> 'UNKNOWN') -select f1.funder, f1.country as country1, f2.country as country2, count(distinct f1.project) as collaborations -from tmp as f1 - join tmp as f2 on f1.project=f2.project -where f1.country<>f2.country -group by f1.funder, f2.country, f1.country; /*EOS*/ - -create TEMPORARY VIEW tmp AS -select distinct country, ro.id as result from ${stats_db_name}.organization o - join ${stats_db_name}.result_organization ro on o.id=ro.organization - where country <> 'UNKNOWN' and o.name is not null; /*EOS*/ + select f1.funder, f1.country as country1, f2.country as country2, count(distinct f1.project) as collaborations + from tmp as f1 + join tmp as f2 on f1.project=f2.project + where f1.country<>f2.country + group by f1.funder, f2.country, f1.country; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_country_collab purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_country_collab stored as parquet as -select o1.country country1, o2.country country2, count(o1.result) as collaborations -from tmp as o1 - join tmp as o2 on o1.result=o2.result -where o1.country<>o2.country -group by o1.country, o2.country; /*EOS*/ + WITH tmp AS ( + select distinct country, ro.id as result from ${stats_db_name}.organization o + join ${stats_db_name}.result_organization ro on o.id=ro.organization + where country <> 'UNKNOWN' and o.name is not null) + select o1.country country1, o2.country country2, count(o1.result) as collaborations + from tmp as o1 + join tmp as o2 on o1.result=o2.result + where o1.country<>o2.country + group by o1.country, o2.country; /*EOS*/ -DROP VIEW if exists tmp; /*EOS*/ ---- Sprint 4 ---- drop table if exists ${stats_db_name}.indi_pub_diamond purge; /*EOS*/ - ---create table if not exists ${stats_db_name}.indi_pub_diamond stored as parquet as ---select distinct pd.id, coalesce(in_diamond_journal, 0) as in_diamond_journal ---from ${stats_db_name}.publication_datasources pd --- left outer join ( --- select pd.id, 1 as in_diamond_journal from ${stats_db_name}.publication_datasources pd --- join ${stats_db_name}.datasource d on d.id=pd.datasource --- join STATS_EXT.plan_s_jn ps where (ps.issn_print=d.issn_printed and ps.issn_online=d.issn_online) --- and (ps.journal_is_in_doaj=true or ps.journal_is_oa=true) and ps.has_apc=false) tmp --- on pd.id=tmp.id; - create table if not exists ${stats_db_name}.indi_pub_diamond stored as parquet as -select distinct pd.id, coalesce(in_diamond_journal, 0) as in_diamond_journal -from ${stats_db_name}.publication_datasources pd -left outer join (select pd.id, 1 as in_diamond_journal from ${stats_db_name}.publication_datasources pd -join ${stats_db_name}.datasource d on d.id=pd.datasource -join STATS_EXT.plan_s_jn ps where (ps.issn_print=d.issn_printed and ps.issn_online=d.issn_online) -and (ps.journal_is_in_doaj=true or ps.journal_is_oa=true) and ps.has_apc=false) tmp -on pd.id=tmp.id; /*EOS*/ + select distinct pd.id, coalesce(in_diamond_journal, 0) as in_diamond_journal + from ${stats_db_name}.publication_datasources pd + left outer join ( + select pd.id, 1 as in_diamond_journal + from ${stats_db_name}.publication_datasources pd + join ${stats_db_name}.datasource d on d.id=pd.datasource + join STATS_EXT.plan_s_jn ps where (ps.issn_print=d.issn_printed and ps.issn_online=d.issn_online) + and (ps.journal_is_in_doaj=true or ps.journal_is_oa=true) and ps.has_apc=false) tmp on pd.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_in_transformative purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_in_transformative stored as parquet as -select distinct pd.id, coalesce(is_transformative, 0) as is_transformative -from ${stats_db_name}.publication pd - left outer join ( - select pd.id, 1 as is_transformative from ${stats_db_name}.publication_datasources pd - join ${stats_db_name}.datasource d on d.id=pd.datasource - join STATS_EXT.plan_s_jn ps where (ps.issn_print=d.issn_printed and ps.issn_online=d.issn_online) - and ps.is_transformative_journal=true) tmp - on pd.id=tmp.id; /*EOS*/ + select distinct pd.id, coalesce(is_transformative, 0) as is_transformative + from ${stats_db_name}.publication pd + left outer join ( + select pd.id, 1 as is_transformative + from ${stats_db_name}.publication_datasources pd + join ${stats_db_name}.datasource d on d.id=pd.datasource + join STATS_EXT.plan_s_jn ps where (ps.issn_print=d.issn_printed and ps.issn_online=d.issn_online) + and ps.is_transformative_journal=true) tmp on pd.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_closed_other_open purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_closed_other_open stored as parquet as -select distinct ri.id, coalesce(pub_closed_other_open, 0) as pub_closed_other_open from ${stats_db_name}.result_instance ri - left outer join - (select ri.id, 1 as pub_closed_other_open from ${stats_db_name}.result_instance ri - join ${stats_db_name}.publication p on p.id=ri.id - join ${stats_db_name}.datasource d on ri.hostedby=d.id - where d.type like '%Journal%' and ri.accessright='Closed Access' and - (p.bestlicence='Open Access' or p.bestlicence='Open Source')) tmp - on tmp.id=ri.id; /*EOS*/ + select distinct ri.id, coalesce(pub_closed_other_open, 0) as pub_closed_other_open + from ${stats_db_name}.result_instance ri + left outer join ( + select ri.id, 1 as pub_closed_other_open + from ${stats_db_name}.result_instance ri + join ${stats_db_name}.publication p on p.id=ri.id + join ${stats_db_name}.datasource d on ri.hostedby=d.id + where d.type like '%Journal%' and ri.accessright='Closed Access' and + (p.bestlicence='Open Access' or p.bestlicence='Open Source')) tmp on tmp.id=ri.id; /*EOS*/ + ---- Sprint 5 ---- drop table if exists ${stats_db_name}.indi_result_no_of_copies purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_result_no_of_copies stored as parquet as -select id, count(id) as number_of_copies from ${stats_db_name}.result_instance group by id; /*EOS*/ + select id, count(id) as number_of_copies + from ${stats_db_name}.result_instance + group by id; /*EOS*/ ---- Sprint 6 ---- drop table if exists ${stats_db_name}.indi_pub_downloads purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_downloads stored as parquet as -SELECT result_id, sum(downloads) no_downloads from openaire_prod_usage_stats.usage_stats - join ${stats_db_name}.publication on result_id=id -where downloads>0 -GROUP BY result_id -order by no_downloads desc; /*EOS*/ - ---ANALYZE TABLE ${stats_db_name}.indi_pub_downloads COMPUTE STATISTICS; + SELECT result_id, sum(downloads) no_downloads + from openaire_prod_usage_stats.usage_stats + join ${stats_db_name}.publication on result_id=id + where downloads>0 + GROUP BY result_id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_downloads_datasource purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_downloads_datasource stored as parquet as -SELECT result_id, repository_id, sum(downloads) no_downloads from openaire_prod_usage_stats.usage_stats - join ${stats_db_name}.publication on result_id=id -where downloads>0 -GROUP BY result_id, repository_id -order by result_id; /*EOS*/ + SELECT result_id, repository_id, sum(downloads) no_downloads + from openaire_prod_usage_stats.usage_stats + join ${stats_db_name}.publication on result_id=id + where downloads>0 + GROUP BY result_id, repository_id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_downloads_year purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_downloads_year stored as parquet as -SELECT result_id, cast(substring(us.`date`, 1,4) as int) as `year`, sum(downloads) no_downloads -from openaire_prod_usage_stats.usage_stats us -join ${stats_db_name}.publication on result_id=id where downloads>0 -GROUP BY result_id, substring(us.`date`, 1,4); /*EOS*/ + SELECT result_id, cast(substring(us.`date`, 1,4) as int) as `year`, sum(downloads) no_downloads + from openaire_prod_usage_stats.usage_stats us + join ${stats_db_name}.publication on result_id=id where downloads>0 + GROUP BY result_id, substring(us.`date`, 1,4); /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_downloads_datasource_year purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_downloads_datasource_year stored as parquet as -SELECT result_id, cast(substring(us.`date`, 1,4) as int) as `year`, repository_id, sum(downloads) no_downloads from openaire_prod_usage_stats.usage_stats us -join ${stats_db_name}.publication on result_id=id -where downloads>0 -GROUP BY result_id, repository_id, substring(us.`date`, 1,4); /*EOS*/ + SELECT result_id, cast(substring(us.`date`, 1,4) as int) as `year`, repository_id, sum(downloads) no_downloads + from openaire_prod_usage_stats.usage_stats us + join ${stats_db_name}.publication on result_id=id + where downloads>0 + GROUP BY result_id, repository_id, substring(us.`date`, 1,4); /*EOS*/ + ---- Sprint 7 ---- drop table if exists ${stats_db_name}.indi_pub_gold_oa purge; /*EOS*/ - ---create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet as --- WITH gold_oa AS ( SELECT --- issn_l, --- journal_is_in_doaj, --- journal_is_oa, --- issn_1 as issn --- FROM --- STATS_EXT.oa_journals --- WHERE --- issn_1 != "" --- UNION --- ALL SELECT --- issn_l, --- journal_is_in_doaj, --- journal_is_oa, --- issn_2 as issn --- FROM --- STATS_EXT.oa_journals --- WHERE --- issn_2 != "" ), issn AS ( SELECT --- * --- FROM ---( SELECT --- id, --- issn_printed as issn --- FROM --- ${stats_db_name}.datasource --- WHERE --- issn_printed IS NOT NULL --- UNION ALL --- SELECT --- id, --- issn_online as issn --- FROM --- ${stats_db_name}.datasource --- WHERE --- issn_online IS NOT NULL or id like '%doajarticles%') as issn --- WHERE --- LENGTH(issn) > 7) ---SELECT --- DISTINCT pd.id, coalesce(is_gold, 0) as is_gold ---FROM --- ${stats_db_name}.publication_datasources pd --- left outer join( --- select pd.id, 1 as is_gold FROM ${stats_db_name}.publication_datasources pd --- JOIN issn on issn.id=pd.datasource --- JOIN gold_oa on issn.issn = gold_oa.issn) tmp --- on pd.id=tmp.id; - ---create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet as ---with gold_oa as ( ---SELECT issn,issn_l from stats_ext.issn_gold_oa_dataset_v5), ---issn AS (SELECT * FROM ---(SELECT id,issn_printed as issn FROM ${stats_db_name}.datasource ---WHERE issn_printed IS NOT NULL ---UNION ALL ---SELECT id, issn_online as issn FROM ${stats_db_name}.datasource ---WHERE issn_online IS NOT NULL or id like '%doajarticles%') as issn ---WHERE LENGTH(issn) > 7), ---alljournals AS(select issn, issn_l from stats_ext.alljournals ---where journal_is_in_doaj=true or journal_is_oa=true) ---SELECT DISTINCT pd.id, coalesce(is_gold, 0) as is_gold ---FROM ${stats_db_name}.publication_datasources pd ---left outer join ( ---select pd.id, 1 as is_gold FROM ${stats_db_name}.publication_datasources pd ---JOIN issn on issn.id=pd.datasource ---JOIN gold_oa on issn.issn = gold_oa.issn ---join alljournals on issn.issn=alljournals.issn ---left outer join ${stats_db_name}.result_instance ri on ri.id=pd.id ---and ri.accessright!='Closed Access' and ri.accessright_uw='gold') tmp ---on pd.id=tmp.id; create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet as -with gold_oa as ( -select distinct issn from ( - SELECT issn_l as issn from stats_ext.issn_gold_oa_dataset_v5 - UNION ALL - SELECT issn as issn from stats_ext.issn_gold_oa_dataset_v5 - UNION ALL - select issn from stats_ext.alljournals where journal_is_in_doaj=true or journal_is_oa=true - UNION ALL - select issn_l as issn from stats_ext.alljournals where journal_is_in_doaj=true or journal_is_oa=true) foo), -dd as ( -select distinct * from ( - select id, issn_printed as issn from ${stats_db_name}.datasource d where d.id like '%doajarticles%' - UNION ALL - select id, issn_online as issn from ${stats_db_name}.datasource d where d.id like '%doajarticles%' - UNION ALL - select id, issn_printed as issn from ${stats_db_name}.datasource d join gold_oa on gold_oa.issn=d.issn_printed - UNION ALL - select id, issn_online as issn from ${stats_db_name}.datasource d join gold_oa on gold_oa.issn=d.issn_online) foo -) -SELECT DISTINCT pd.id, coalesce(is_gold, 0) as is_gold -FROM ${stats_db_name}.publication_datasources pd -left outer join ( - select pd.id, 1 as is_gold - FROM ${stats_db_name}.publication_datasources pd - join dd on dd.id=pd.datasource - left outer join ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ + with gold_oa as ( + select distinct issn from ( + SELECT issn_l as issn from stats_ext.issn_gold_oa_dataset_v5 + UNION ALL + SELECT issn as issn from stats_ext.issn_gold_oa_dataset_v5 + UNION ALL + select issn from stats_ext.alljournals where journal_is_in_doaj=true or journal_is_oa=true + UNION ALL + select issn_l as issn from stats_ext.alljournals where journal_is_in_doaj=true or journal_is_oa=true) foo), + dd as ( + select distinct * from ( + select id, issn_printed as issn from ${stats_db_name}.datasource d where d.id like '%doajarticles%' + UNION ALL + select id, issn_online as issn from ${stats_db_name}.datasource d where d.id like '%doajarticles%' + UNION ALL + select id, issn_printed as issn from ${stats_db_name}.datasource d left semi join gold_oa on gold_oa.issn=d.issn_printed + UNION ALL + select id, issn_online as issn from ${stats_db_name}.datasource d left semi join gold_oa on gold_oa.issn=d.issn_online) foo + ) + SELECT DISTINCT pd.id, coalesce(is_gold, 0) as is_gold + FROM ${stats_db_name}.publication_datasources pd + left outer join ( + select pd.id, 1 as is_gold + FROM ${stats_db_name}.publication_datasources pd + left semi join dd on dd.id=pd.datasource + left outer join ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc stored as parquet as WITH hybrid_oa AS ( SELECT issn_l, journal_is_in_doaj, journal_is_oa, issn_print as issn @@ -395,121 +260,57 @@ create table if not exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc stored as FROM STATS_EXT.plan_s_jn WHERE issn_online != "" and (journal_is_in_doaj = FALSE OR journal_is_oa = FALSE)), issn AS ( - SELECT * - FROM ( - SELECT id, issn_printed as issn - FROM ${stats_db_name}.datasource - WHERE issn_printed IS NOT NULL - UNION ALL - SELECT id,issn_online as issn - FROM ${stats_db_name}.datasource - WHERE issn_online IS NOT NULL ) as issn - WHERE LENGTH(issn) > 7) -SELECT DISTINCT pd.id, coalesce(is_hybrid_oa, 0) as is_hybrid_oa -FROM ${stats_db_name}.publication_datasources pd - LEFT OUTER JOIN ( - SELECT pd.id, 1 as is_hybrid_oa from ${stats_db_name}.publication_datasources pd - JOIN ${stats_db_name}.datasource d on d.id=pd.datasource - JOIN issn on issn.id=pd.datasource - JOIN hybrid_oa ON issn.issn = hybrid_oa.issn - JOIN ${stats_db_name}.indi_result_has_cc_licence cc on pd.id=cc.id - JOIN ${stats_db_name}.indi_pub_gold_oa ga on pd.id=ga.id - where cc.has_cc_license=1 and ga.is_gold=0) tmp on pd.id=tmp.id; /*EOS*/ + SELECT * + FROM ( + SELECT id, issn_printed as issn + FROM ${stats_db_name}.datasource + WHERE issn_printed IS NOT NULL + UNION ALL + SELECT id,issn_online as issn + FROM ${stats_db_name}.datasource + WHERE issn_online IS NOT NULL ) as issn + WHERE LENGTH(issn) > 7) + SELECT DISTINCT pd.id, coalesce(is_hybrid_oa, 0) as is_hybrid_oa + FROM ${stats_db_name}.publication_datasources pd + LEFT OUTER JOIN ( + SELECT pd.id, 1 as is_hybrid_oa from ${stats_db_name}.publication_datasources pd + JOIN ${stats_db_name}.datasource d on d.id=pd.datasource + JOIN issn on issn.id=pd.datasource + JOIN hybrid_oa ON issn.issn = hybrid_oa.issn + JOIN ${stats_db_name}.indi_result_has_cc_licence cc on pd.id=cc.id + JOIN ${stats_db_name}.indi_pub_gold_oa ga on pd.id=ga.id where cc.has_cc_license=1 and ga.is_gold=0) tmp on pd.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_hybrid purge; /*EOS*/ - ---create table if not exists ${stats_db_name}.indi_pub_hybrid stored as parquet as --- WITH gold_oa AS ( SELECT --- issn_l, --- journal_is_in_doaj, --- journal_is_oa, --- issn_1 as issn, --- has_apc --- FROM --- STATS_EXT.oa_journals --- WHERE --- issn_1 != "" --- UNION --- ALL SELECT --- issn_l, --- journal_is_in_doaj, --- journal_is_oa, --- issn_2 as issn, --- has_apc --- FROM --- STATS_EXT.oa_journals --- WHERE --- issn_2 != "" ), issn AS ( SELECT --- * --- FROM ---( SELECT --- id, --- issn_printed as issn --- FROM --- ${stats_db_name}.datasource --- WHERE --- issn_printed IS NOT NULL --- UNION ALL --- SELECT --- id, --- issn_online as issn --- FROM --- ${stats_db_name}.datasource --- WHERE --- issn_online IS NOT NULL or id like '%doajarticles%') as issn --- WHERE --- LENGTH(issn) > 7) ---select distinct pd.id, coalesce(is_hybrid, 0) as is_hybrid ---from ${stats_db_name}.publication_datasources pd --- left outer join ( --- select pd.id, 1 as is_hybrid from ${stats_db_name}.publication_datasources pd --- join ${stats_db_name}.datasource d on d.id=pd.datasource --- join issn on issn.id=pd.datasource --- join gold_oa on issn.issn=gold_oa.issn --- where (gold_oa.journal_is_in_doaj=false or gold_oa.journal_is_oa=false))tmp --- on pd.id=tmp.id; - ---create table if not exists ${stats_db_name}.indi_pub_hybrid stored as parquet as ---select distinct pd.id,coalesce(is_hybrid,0) is_hybrid from ${stats_db_name}.publication_datasources pd ---left outer join (select pd.id, 1 as is_hybrid from ${stats_db_name}.publication_datasources pd ---join ${stats_db_name}.datasource d on pd.datasource=d.id ---join ${stats_db_name}.result_instance ri on ri.id=pd.id ---join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=pd.id ---join ${stats_db_name}.result_accessroute ra on ra.id=pd.id ---where d.type like '%Journal%' and ri.accessright!='Closed Access' and (ri.accessright_uw!='gold' ---or indi_gold.is_gold=0) and (ra.accessroute='hybrid' or ri.license is not null)) tmp ---on pd.id=tmp.id; - create table if not exists ${stats_db_name}.indi_pub_hybrid stored as parquet as select distinct pd.id,coalesce(is_hybrid,0) is_hybrid from ${stats_db_name}.publication pd -left outer join (select pd.id, 1 as is_hybrid from ${stats_db_name}.publication pd -join ${stats_db_name}.result_instance ri on ri.id=pd.id -join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=pd.id -join ${stats_db_name}.result_accessroute ra on ra.id=pd.id -join ${stats_db_name}.datasource d on d.id=ri.hostedby -where indi_gold.is_gold=0 and ((d.type like '%Journal%' and ri.accessright!='Closed Access' and ri.accessright!='Restricted' and ri.license is not null) or -ra.accessroute='hybrid'))tmp -on pd.id=tmp.id; /*EOS*/ +left outer join ( + select pd.id, 1 as is_hybrid from ${stats_db_name}.publication pd + join ${stats_db_name}.result_instance ri on ri.id=pd.id + join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=pd.id + join ${stats_db_name}.result_accessroute ra on ra.id=pd.id + join ${stats_db_name}.datasource d on d.id=ri.hostedby + where indi_gold.is_gold=0 and ((d.type like '%Journal%' and ri.accessright!='Closed Access' and ri.accessright!='Restricted' and ri.license is not null) or ra.accessroute='hybrid')) tmp on pd.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_org_fairness purge; /*EOS*/ - create table if not exists ${stats_db_name}.indi_org_fairness stored as parquet as --return results with PIDs, and rich metadata group by organization - with result_fair as - (select ro.organization organization, count(distinct ro.id) no_result_fair from ${stats_db_name}.result_organization ro - join ${stats_db_name}.result r on r.id=ro.id + with result_fair as ( + select ro.organization organization, count(distinct ro.id) no_result_fair + from ${stats_db_name}.result_organization ro + join ${stats_db_name}.result r on r.id=ro.id --join result_pids rp on r.id=rp.id - where (title is not null) and (publisher is not null) and (abstract=true) and (year is not null) and (authors>0) and cast(year as int)>2003 - group by ro.organization), + where (title is not null) and (publisher is not null) and (abstract=true) and (year is not null) and (authors>0) and cast(year as int)>2003 + group by ro.organization), --return all results group by organization - allresults as (select ro.organization, count(distinct ro.id) no_allresults from ${stats_db_name}.result_organization ro - join ${stats_db_name}.result r on r.id=ro.id - where cast(year as int)>2003 - group by ro.organization) + allresults as ( + select ro.organization, count(distinct ro.id) no_allresults from ${stats_db_name}.result_organization ro + join ${stats_db_name}.result r on r.id=ro.id + where cast(year as int)>2003 + group by ro.organization) --return results_fair/all_results -select allresults.organization, result_fair.no_result_fair/allresults.no_allresults org_fairness -from allresults - join result_fair on result_fair.organization=allresults.organization; /*EOS*/ + select allresults.organization, result_fair.no_result_fair/allresults.no_allresults org_fairness + from allresults + join result_fair on result_fair.organization=allresults.organization; /*EOS*/ CREATE TEMPORARY VIEW result_fair as select ro.organization organization, count(distinct ro.id) no_result_fair @@ -859,19 +660,6 @@ drop view pub_fos_totals; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_bronze_oa purge; /*EOS*/ ---create table if not exists ${stats_db_name}.indi_pub_bronze_oa stored as parquet as ---select distinct p.id, coalesce(is_bronze_oa,0) as is_bronze_oa ---from ${stats_db_name}.publication p ---left outer join ---(select p.id, 1 as is_bronze_oa from ${stats_db_name}.publication p ---join ${stats_db_name}.indi_result_has_cc_licence cc on cc.id=p.id ---join ${stats_db_name}.indi_pub_gold_oa ga on ga.id=p.id ---join ${stats_db_name}.result_instance ri on ri.id=p.id ---join ${stats_db_name}.datasource d on d.id=ri.hostedby ---where cc.has_cc_license=0 and ga.is_gold=0 ---and (d.type='Journal' or d.type='Journal Aggregator/Publisher') ---and ri.accessright='Open Access') tmp on tmp.id=p.id; - create table ${stats_db_name}.indi_pub_bronze_oa stored as parquet as select distinct pd.id,coalesce(is_bronze_oa,0) is_bronze_oa from ${stats_db_name}.publication pd left outer join (select pd.id, 1 as is_bronze_oa from ${stats_db_name}.publication pd @@ -1189,7 +977,7 @@ union all select pf.name from stats_ext.insitutions_for_publicly_funded pf join ${stats_db_name}.project p on p.funder=pf.name union all -select pf.name from stats_ext.insitutions_for_publicly_funded pf +select op.name from stats_ext.insitutions_for_publicly_funded pf join org_names_pids op on (op.name=pf.name or op.pid=pf.ror) and pf.publicly_funded='yes') foo) select distinct p.id, coalesce(publicly_funded, 0) as publicly_funded @@ -1199,3 +987,58 @@ select distinct ro.id, 1 as publicly_funded from ${stats_db_name}.result_organiz join ${stats_db_name}.organization o on o.id=ro.organization join publicly_funded_orgs pfo on o.name=pfo.name) tmp on p.id=tmp.id; /*EOS*/ +drop table if exists ${stats_db_name}.indi_pub_green_with_license purge; /*EOS*/ +create table ${stats_db_name}.indi_pub_green_with_license stored as parquet as +select distinct p.id, coalesce(green_with_license, 0) as green_with_license +from ${stats_db_name}.publication p +left outer join ( + select distinct p.id, 1 as green_with_license from ${stats_db_name}.publication p + join ${stats_db_name}.result_instance ri on ri.id = p.id + join ${stats_db_name}.datasource on datasource.id = ri.hostedby + where ri.license is not null and datasource.type like '%Repository%' and datasource.name!='Other') tmp on p.id= tmp.id; /*EOS*/ + +drop table if exists ${stats_db_name}.result_country purge; /*EOS*/ + +create table ${stats_db_name}.result_country stored as parquet as +select distinct * +from ( + select ro.id, o.country + from ${stats_db_name}.result_organization ro + left outer join ${stats_db_name}.organization o on o.id=ro.organization + union all + select rp.id, f.country + from ${stats_db_name}.result_projects + left outer join ${stats_db_name}.project p on p.id=rp.project + left outer join ${stats_db_name}.funder f on f.name=p.funder + ) rc +where rc.country is not null; /*EOS*/ + +drop table if exists ${stats_db_name}.indi_result_oa_with_license purge; /*EOS*/ +create table ${stats_db_name}.indi_result_oa_with_license stored as parquet as +select distinct r.id, coalesce(oa_with_license,0) as oa_with_license +from ${stats_db_name}.result r +left outer join (select distinct r.id, 1 as oa_with_license from ${stats_db_name}.result r +join ${stats_db_name}.result_licenses rl on rl.id=r.id where r.bestlicence='Open Access') tmp on r.id=tmp.id; /*EOS*/ + +drop table if exists ${stats_db_name}.indi_result_oa_without_license purge; /*EOS*/ +create table ${stats_db_name}.indi_result_oa_without_license stored as parquet as +with without_license as +(select distinct id from ${stats_db_name}.indi_result_oa_with_license +where oa_with_license=0) +select distinct r.id, coalesce(oa_without_license,0) as oa_without_license +from ${stats_db_name}.result r +left outer join (select distinct r.id, 1 as oa_without_license +from ${stats_db_name}.result r +join without_license wl on wl.id=r.id +where r.bestlicence='Open Access') tmp on r.id=tmp.id; /*EOS*/ + +drop table if exists ${stats_db_name}.indi_result_under_transformative purge; /*EOS*/ +create table ${stats_db_name}.indi_result_under_transformative stored as parquet as +with transformative_dois as ( + select distinct doi from stats_ext.transformative_facts) +select distinct r.id, coalesce(under_transformative,0) as under_transformative +from ${stats_db_name}.result r +left outer join ( + select distinct rp.id, 1 as under_transformative + from ${stats_db_name}.result_pids rp join ${stats_db_name}.result r on r.id=rp.id + join transformative_dois td on td.doi=rp.pid) tmp on r.id=tmp.id; /*EOS*/ \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB.sql index cc8348f26..4abb6bdbc 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB.sql @@ -180,4 +180,12 @@ create view TARGET.indi_funder_openess as select * from SOURCE.indi_funder_opene create view TARGET.indi_funder_findable as select * from SOURCE.indi_funder_findable; create view TARGET.indi_ris_fairness as select * from SOURCE.indi_ris_fairness; create view TARGET.indi_ris_openess as select * from SOURCE.indi_ris_openess; -create view TARGET.indi_ris_findable as select * from SOURCE.indi_ris_findable; \ No newline at end of file +create view TARGET.indi_ris_findable as select * from SOURCE.indi_ris_findable; + +create table TARGET.indi_pub_green_with_license stored as parquet as select * from SOURCE.indi_pub_green_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.result_country stored as parquet as select * from SOURCE.result_country orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_oa_with_license stored as parquet as select * from SOURCE.indi_result_oa_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.indi_result_oa_without_license stored as parquet as select * from SOURCE.indi_result_oa_without_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_under_transformative stored as parquet as select * from SOURCE.indi_result_under_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDBAll.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDBAll.sql index 42812d159..a8392b226 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDBAll.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDBAll.sql @@ -80,8 +80,12 @@ create table TARGET.result stored as parquet as 'openorgs____::0fccc7640f0cb44d5cd1b06b312a06b9', -- Cardiff University 'openorgs____::8839b55dae0c84d56fd533f52d5d483a', -- Leibniz Institute of Ecological Urban and Regional Development 'openorgs____::526468206bca24c1c90da6a312295cf4', -- Cyprus University of Technology - 'openorgs____::b5ca9d4340e26454e367e2908ef3872f' -- Alma Mater Studiorum University of Bologna - + 'openorgs____::b5ca9d4340e26454e367e2908ef3872f', -- Alma Mater Studiorum University of Bologna + 'openorgs____::a6340e6ecf60f6bba163659df985b0f2', -- TU Dresden + 'openorgs____::64badd35233ba2cd4946368ef2f4cf57', -- University of Vienna + 'openorgs____::7501d66d2297a963ebfb075c43fff88e', -- Royal Institute of Technology + 'openorgs____::d5eb679abdd31f70fcd4c8ba711148bf', -- Sorbonne University + 'openorgs____::b316f25380d106aac402f5ae8653910d' -- Centre for Research on Ecology and Forestry Applications ) )) foo; create view if not exists TARGET.category as select * from SOURCE.category; @@ -264,4 +268,11 @@ create view TARGET.indi_ris_fairness as select * from SOURCE.indi_ris_fairness; create view TARGET.indi_ris_openess as select * from SOURCE.indi_ris_openess; create view TARGET.indi_ris_findable as select * from SOURCE.indi_ris_findable; +create table TARGET.indi_pub_green_with_license stored as parquet as select * from SOURCE.indi_pub_green_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.result_country stored as parquet as select * from SOURCE.result_country orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_oa_with_license stored as parquet as select * from SOURCE.indi_result_oa_with_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); +create table TARGET.indi_result_oa_without_license stored as parquet as select * from SOURCE.indi_result_oa_without_license orig where exists (select 1 from TARGET.result r where r.id=orig.id); + +create table TARGET.indi_result_under_transformative stored as parquet as select * from SOURCE.indi_result_under_transformative orig where exists (select 1 from TARGET.result r where r.id=orig.id); diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB_institutions.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB_institutions.sql index 2c0ac337c..62c68c625 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB_institutions.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step20-createMonitorDB_institutions.sql @@ -60,5 +60,10 @@ create table TARGET.result stored as parquet as 'openorgs____::0fccc7640f0cb44d5cd1b06b312a06b9', -- Cardiff University 'openorgs____::8839b55dae0c84d56fd533f52d5d483a', -- Leibniz Institute of Ecological Urban and Regional Development 'openorgs____::526468206bca24c1c90da6a312295cf4', -- Cyprus University of Technology - 'openorgs____::b5ca9d4340e26454e367e2908ef3872f' -- Alma Mater Studiorum University of Bologna + 'openorgs____::b5ca9d4340e26454e367e2908ef3872f', -- Alma Mater Studiorum University of Bologna + 'openorgs____::a6340e6ecf60f6bba163659df985b0f2', -- TU Dresden + 'openorgs____::64badd35233ba2cd4946368ef2f4cf57', -- University of Vienna + 'openorgs____::7501d66d2297a963ebfb075c43fff88e', -- Royal Institute of Technology + 'openorgs____::d5eb679abdd31f70fcd4c8ba711148bf', -- Sorbonne University + 'openorgs____::b316f25380d106aac402f5ae8653910d' -- Centre for Research on Ecology and Forestry Applications ))) foo; \ No newline at end of file diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step21-createObservatoryDB.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step21-createObservatoryDB.sql index 2e6f0711c..66620ac38 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step21-createObservatoryDB.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step21-createObservatoryDB.sql @@ -2,9 +2,8 @@ create table ${observatory_db_name}.result_cc_licence stored as parquet as select r.id, coalesce(rln.count, 0) > 0 as cc_licence from ${stats_db_name}.result r left outer join ( - select rl.id, sum(case when lower(rln.normalized) like 'cc-%' then 1 else 0 end) as count + select rl.id, sum(case when rl.type like 'CC%' then 1 else 0 end) as count from ${stats_db_name}.result_licenses rl - left outer join ${stats_db_name}.licenses_normalized rln on rl.type=rln.license group by rl.id ) rln on rln.id=r.id; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step6.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step6.sql index e5b3f504e..165f77946 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step6.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step6.sql @@ -95,7 +95,8 @@ DROP TABLE IF EXISTS ${stats_db_name}.funder purge; create table ${stats_db_name}.funder STORED AS PARQUET as select distinct xpath_string(fund, '//funder/id') as id, xpath_string(fund, '//funder/name') as name, - xpath_string(fund, '//funder/shortname') as shortname + xpath_string(fund, '//funder/shortname') as shortname, + xpath_string(fundingtree[0].value, '//funder/jurisdiction') as country from ${openaire_db_name}.project p lateral view explode(p.fundingtree.value) fundingtree as fund; DROP TABLE IF EXISTS ${stats_db_name}.project_organization_contribution purge; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml index 709de6595..813fffcf9 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml @@ -345,6 +345,17 @@ + + + + + + + + + + + yarn diff --git a/dhp-workflows/pom.xml b/dhp-workflows/pom.xml index 369c71b5b..1c331d126 100644 --- a/dhp-workflows/pom.xml +++ b/dhp-workflows/pom.xml @@ -31,6 +31,10 @@ dhp-enrichment dhp-graph-provision dhp-blacklist + dhp-stats-actionsets + dhp-stats-hist-snaps + dhp-stats-monitor-irish + dhp-stats-monitor-update dhp-stats-update dhp-stats-promote dhp-usage-stats-build