From 1f5aba12faefdfa5d56d38b58deea15b46b60ea9 Mon Sep 17 00:00:00 2001 From: antleb Date: Wed, 17 Apr 2024 23:54:23 +0300 Subject: [PATCH 01/29] slight optimization in indi_pub_gold_oa definition --- .../stats/oozie_app/scripts/step16-createIndicatorsTables.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 70cde6481..0845387d3 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -242,7 +242,7 @@ create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet a select id, issn_online as issn from ${stats_db_name}.datasource d join gold_oa on gold_oa.issn=d.issn_online) foo ) SELECT DISTINCT pd.id, coalesce(is_gold, 0) as is_gold - FROM ${stats_db_name}.publication_datasources pd + FROM ${stats_db_name}.publication pd left outer join ( select pd.id, 1 as is_gold FROM ${stats_db_name}.publication_datasources pd From 27d22bd8f945db559392fc1eabcfe185d4183aac Mon Sep 17 00:00:00 2001 From: antleb Date: Wed, 17 Apr 2024 23:59:52 +0300 Subject: [PATCH 02/29] slight optimization in indi_pub_gold_oa definition --- .../stats/oozie_app/scripts/step16-createIndicatorsTables.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 0845387d3..455c173ef 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -246,7 +246,7 @@ create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet a left outer join ( select pd.id, 1 as is_gold FROM ${stats_db_name}.publication_datasources pd - join dd on dd.id=pd.datasource + left semi join dd on dd.id=pd.datasource left outer join ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc purge; /*EOS*/ From 308ae580a97afba2cf19bf79d8022dbac33fc1e1 Mon Sep 17 00:00:00 2001 From: antleb Date: Thu, 18 Apr 2024 10:57:52 +0300 Subject: [PATCH 03/29] slight optimization in indi_pub_gold_oa definition --- .../scripts/step16-createIndicatorsTables.sql | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 455c173ef..18d66c6db 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -282,14 +282,17 @@ create table if not exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc stored as drop table if exists ${stats_db_name}.indi_pub_hybrid purge; /*EOS*/ create table if not exists ${stats_db_name}.indi_pub_hybrid stored as parquet as -select distinct pd.id,coalesce(is_hybrid,0) is_hybrid from ${stats_db_name}.publication pd +select distinct p.id, coalesce(is_hybrid, 0) is_hybrid +from ${stats_db_name}.publication p left outer join ( - select pd.id, 1 as is_hybrid from ${stats_db_name}.publication pd - join ${stats_db_name}.result_instance ri on ri.id=pd.id - join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=pd.id - join ${stats_db_name}.result_accessroute ra on ra.id=pd.id + select p.id, 1 as is_hybrid + from ${stats_db_name}.publication p + join ${stats_db_name}.result_instance ri on ri.id=p.id join ${stats_db_name}.datasource d on d.id=ri.hostedby - where indi_gold.is_gold=0 and ((d.type like '%Journal%' and ri.accessright!='Closed Access' and ri.accessright!='Restricted' and ri.license is not null) or ra.accessroute='hybrid')) tmp on pd.id=tmp.id; /*EOS*/ + join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=p.id + left outer join ${stats_db_name}.result_accessroute ra on ra.id=p.id + where indi_gold.is_gold=0 and + ((d.type like '%Journal%' and ri.accessright not in ('Closed Access', 'Restricted', 'Not Available') and ri.license is not null) or ra.accessroute='hybrid')) tmp on pd.i=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_org_fairness purge; /*EOS*/ create table if not exists ${stats_db_name}.indi_org_fairness stored as parquet as From e728a0897c88c4496f4533f713714387b9a1c25f Mon Sep 17 00:00:00 2001 From: antleb Date: Thu, 18 Apr 2024 11:07:55 +0300 Subject: [PATCH 04/29] fixed the definition of indi_pub_bronze_oa --- .../scripts/step16-createIndicatorsTables.sql | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 18d66c6db..ac14e2904 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -664,17 +664,18 @@ drop view pub_fos_totals; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_bronze_oa purge; /*EOS*/ create table ${stats_db_name}.indi_pub_bronze_oa stored as parquet as -select distinct pd.id,coalesce(is_bronze_oa,0) is_bronze_oa from ${stats_db_name}.publication pd -left outer join (select pd.id, 1 as is_bronze_oa from ${stats_db_name}.publication pd -join ${stats_db_name}.result_instance ri on ri.id=pd.id -join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=pd.id -join ${stats_db_name}.indi_pub_hybrid indi_hybrid on indi_hybrid.id=pd.id -join ${stats_db_name}.result_accessroute ra on ra.id=pd.id -join ${stats_db_name}.datasource d on d.id=ri.hostedby -where indi_gold.is_gold=0 and indi_hybrid.is_hybrid=0 -and ((d.type like '%Journal%' and ri.accessright!='Closed Access' -and ri.accessright!='Restricted' and ri.license is null) or ra.accessroute='bronze')) tmp -on pd.id=tmp.id; /*EOS*/ +select distinct p.id,coalesce(is_bronze_oa,0) is_bronze_oa +from ${stats_db_name}.publication p +left outer join ( + select p.id, 1 as is_bronze_oa + from ${stats_db_name}.publication p + join ${stats_db_name}.result_instance ri on ri.id=p.id + join ${stats_db_name}.datasource d on d.id=ri.hostedby + join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=p.id + join ${stats_db_name}.indi_pub_hybrid indi_hybrid on indi_hybrid.id=p.id + left outer join ${stats_db_name}.result_accessroute ra on ra.id=p.id + where indi_gold.is_gold=0 and indi_hybrid.is_hybrid=0 + and ((d.type like '%Journal%' and ri.accessright not in ('Closed Access', 'Restricted', 'Not Available') and ri.license is null) or ra.accessroute='bronze')) tmp on p.id=tmp.id; /*EOS*/ CREATE TEMPORARY VIEW project_year_result_year as select p.id project_id, acronym, r.id result_id, r.year, p.end_year From 43d05dbebb4d0a8760dee242a1da0146b3698689 Mon Sep 17 00:00:00 2001 From: antleb Date: Thu, 18 Apr 2024 11:53:50 +0300 Subject: [PATCH 05/29] fixed the definition of result_country --- .../scripts/step16-createIndicatorsTables.sql | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index ac14e2904..9ea84023a 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -1004,13 +1004,18 @@ left outer join ( drop table if exists ${stats_db_name}.result_country purge; /*EOS*/ create table ${stats_db_name}.result_country stored as parquet as -select distinct ro.id, coalesce(o.country, f.country) -from ${stats_db_name}.result_organization ro -left outer join ${stats_db_name}.organization o on o.id=ro.organization -left outer join ${stats_db_name}.result_projects rp on rp.id=ro.id -left outer join ${stats_db_name}.project p on p.id=rp.project -left outer join ${stats_db_name}.funder f on f.name=p.funder -where coalesce(o.country, f.country) IS NOT NULL; +select distinct * +from ( + select ro.id, o.country + from ${stats_db_name}.result_organization ro + left outer join ${stats_db_name}.organization o on o.id=ro.organization + union all + select rp.id, f.country + from ${stats_db_name}.result_projects + left outer join ${stats_db_name}.project p on p.id=rp.project + left outer join ${stats_db_name}.funder f on f.name=p.funder + ) rc +where rc.country is not null; /*EOS*/ drop table if exists ${stats_db_name}.indi_result_oa_with_license purge; /*EOS*/ create table ${stats_db_name}.indi_result_oa_with_license stored as parquet as From 0c71c58df69a23968b942fcc62d7d63e4cd3d551 Mon Sep 17 00:00:00 2001 From: antleb Date: Thu, 18 Apr 2024 12:01:27 +0300 Subject: [PATCH 06/29] fixed the definition of gold_oa --- .../stats/oozie_app/scripts/step16-createIndicatorsTables.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 9ea84023a..65193a50c 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -247,7 +247,9 @@ create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet a select pd.id, 1 as is_gold FROM ${stats_db_name}.publication_datasources pd left semi join dd on dd.id=pd.datasource - left outer join ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ + union all + select ra.id, 1 as is_gold + from ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc purge; /*EOS*/ create table if not exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc stored as parquet as From c3fe9662b22e1a80ccb56479639338f79f8d1832 Mon Sep 17 00:00:00 2001 From: Antonis Lempesis Date: Fri, 19 Apr 2024 12:45:36 +0300 Subject: [PATCH 07/29] all indicator tables are now stored as parquet --- .../oozie_app/scripts/step16-createIndicatorsTables.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 65193a50c..1a4002bcf 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -380,7 +380,7 @@ CREATE TEMPORARY VIEW allresults as drop table if exists ${stats_db_name}.indi_org_fairness_pub purge; /*EOS*/ -create table if not exists ${stats_db_name}.indi_org_fairness_pub as +create table if not exists ${stats_db_name}.indi_org_fairness_pub stored as parquet as select ar.organization, rf.no_result_fair/ar.no_allresults org_fairness from allresults ar join result_fair rf on rf.organization=ar.organization; /*EOS*/ @@ -639,7 +639,7 @@ from ${stats_db_name}.publication p drop table if exists ${stats_db_name}.indi_result_with_pid purge; /*EOS*/ -create table if not exists ${stats_db_name}.indi_result_with_pid as +create table if not exists ${stats_db_name}.indi_result_with_pid stored as parquet as select distinct p.id, coalesce(result_with_pid, 0) as result_with_pid from ${stats_db_name}.result p left outer join ( @@ -653,7 +653,7 @@ group by rf.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_interdisciplinarity purge; /*EOS*/ -create table if not exists ${stats_db_name}.indi_pub_interdisciplinarity as +create table if not exists ${stats_db_name}.indi_pub_interdisciplinarity stored as parquet as select distinct p.id as id, coalesce(is_interdisciplinary, 0) as is_interdisciplinary from pub_fos_totals p From 425c9afc36e2edf3a5a7f7f7c3303f3173431e5d Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 23 Apr 2024 14:30:04 +0200 Subject: [PATCH 08/29] using version 1.2.5-beta for the release --- dhp-build/dhp-build-assembly-resources/pom.xml | 2 +- dhp-build/dhp-build-properties-maven-plugin/pom.xml | 2 +- dhp-build/dhp-code-style/pom.xml | 2 +- dhp-build/pom.xml | 2 +- dhp-common/pom.xml | 2 +- dhp-pace-core/pom.xml | 4 ++-- dhp-workflows/dhp-actionmanager/pom.xml | 2 +- dhp-workflows/dhp-aggregation/pom.xml | 2 +- dhp-workflows/dhp-blacklist/pom.xml | 2 +- dhp-workflows/dhp-broker-events/pom.xml | 2 +- dhp-workflows/dhp-dedup-openaire/pom.xml | 2 +- dhp-workflows/dhp-doiboost/pom.xml | 2 +- dhp-workflows/dhp-enrichment/pom.xml | 4 ++-- dhp-workflows/dhp-graph-mapper/pom.xml | 2 +- dhp-workflows/dhp-graph-provision/pom.xml | 2 +- dhp-workflows/dhp-impact-indicators/pom.xml | 2 +- dhp-workflows/dhp-stats-actionsets/pom.xml | 2 +- dhp-workflows/dhp-stats-hist-snaps/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-irish/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-update/pom.xml | 2 +- dhp-workflows/dhp-stats-promote/pom.xml | 2 +- dhp-workflows/dhp-stats-update/pom.xml | 2 +- dhp-workflows/dhp-swh/pom.xml | 2 +- dhp-workflows/dhp-usage-raw-data-update/pom.xml | 2 +- dhp-workflows/dhp-usage-stats-build/pom.xml | 2 +- dhp-workflows/dhp-workflow-profiles/pom.xml | 2 +- dhp-workflows/pom.xml | 2 +- pom.xml | 2 +- 28 files changed, 30 insertions(+), 30 deletions(-) diff --git a/dhp-build/dhp-build-assembly-resources/pom.xml b/dhp-build/dhp-build-assembly-resources/pom.xml index 44165995d..7f5b76fdd 100644 --- a/dhp-build/dhp-build-assembly-resources/pom.xml +++ b/dhp-build/dhp-build-assembly-resources/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-build-assembly-resources diff --git a/dhp-build/dhp-build-properties-maven-plugin/pom.xml b/dhp-build/dhp-build-properties-maven-plugin/pom.xml index 7579bdf45..e76dcd8fc 100644 --- a/dhp-build/dhp-build-properties-maven-plugin/pom.xml +++ b/dhp-build/dhp-build-properties-maven-plugin/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-build-properties-maven-plugin diff --git a/dhp-build/dhp-code-style/pom.xml b/dhp-build/dhp-code-style/pom.xml index 5a86efe17..8bbe6fac0 100644 --- a/dhp-build/dhp-code-style/pom.xml +++ b/dhp-build/dhp-code-style/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp-code-style - 1.2.5-SNAPSHOT + 1.2.5-beta jar diff --git a/dhp-build/pom.xml b/dhp-build/pom.xml index 9040ea94e..74a09a23c 100644 --- a/dhp-build/pom.xml +++ b/dhp-build/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-build pom diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml index 6198bd81e..692d2bdc3 100644 --- a/dhp-common/pom.xml +++ b/dhp-common/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp - 1.2.5-SNAPSHOT + 1.2.5-beta ../pom.xml diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml index fd7f44fc9..7b384f109 100644 --- a/dhp-pace-core/pom.xml +++ b/dhp-pace-core/pom.xml @@ -6,13 +6,13 @@ eu.dnetlib.dhp dhp - 1.2.5-SNAPSHOT + 1.2.5-beta ../pom.xml eu.dnetlib.dhp dhp-pace-core - 1.2.5-SNAPSHOT + 1.2.5-beta jar diff --git a/dhp-workflows/dhp-actionmanager/pom.xml b/dhp-workflows/dhp-actionmanager/pom.xml index ce13502b6..5a5f156fc 100644 --- a/dhp-workflows/dhp-actionmanager/pom.xml +++ b/dhp-workflows/dhp-actionmanager/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-actionmanager diff --git a/dhp-workflows/dhp-aggregation/pom.xml b/dhp-workflows/dhp-aggregation/pom.xml index 108d25ba6..d67e880b4 100644 --- a/dhp-workflows/dhp-aggregation/pom.xml +++ b/dhp-workflows/dhp-aggregation/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-aggregation diff --git a/dhp-workflows/dhp-blacklist/pom.xml b/dhp-workflows/dhp-blacklist/pom.xml index 7ecc8b35d..64be812ba 100644 --- a/dhp-workflows/dhp-blacklist/pom.xml +++ b/dhp-workflows/dhp-blacklist/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-broker-events/pom.xml b/dhp-workflows/dhp-broker-events/pom.xml index 322fc7e93..b9f572527 100644 --- a/dhp-workflows/dhp-broker-events/pom.xml +++ b/dhp-workflows/dhp-broker-events/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-dedup-openaire/pom.xml b/dhp-workflows/dhp-dedup-openaire/pom.xml index a271efe8e..96a0ae74c 100644 --- a/dhp-workflows/dhp-dedup-openaire/pom.xml +++ b/dhp-workflows/dhp-dedup-openaire/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-dedup-openaire diff --git a/dhp-workflows/dhp-doiboost/pom.xml b/dhp-workflows/dhp-doiboost/pom.xml index 6e8911fba..cfa5a3fce 100644 --- a/dhp-workflows/dhp-doiboost/pom.xml +++ b/dhp-workflows/dhp-doiboost/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-enrichment/pom.xml b/dhp-workflows/dhp-enrichment/pom.xml index 9698dee03..d7f75de8c 100644 --- a/dhp-workflows/dhp-enrichment/pom.xml +++ b/dhp-workflows/dhp-enrichment/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 @@ -51,7 +51,7 @@ eu.dnetlib.dhp dhp-aggregation - 1.2.5-SNAPSHOT + 1.2.5-beta compile diff --git a/dhp-workflows/dhp-graph-mapper/pom.xml b/dhp-workflows/dhp-graph-mapper/pom.xml index ef35951c0..c7ac55ef6 100644 --- a/dhp-workflows/dhp-graph-mapper/pom.xml +++ b/dhp-workflows/dhp-graph-mapper/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-graph-provision/pom.xml b/dhp-workflows/dhp-graph-provision/pom.xml index e62fcdf19..7b879e074 100644 --- a/dhp-workflows/dhp-graph-provision/pom.xml +++ b/dhp-workflows/dhp-graph-provision/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-impact-indicators/pom.xml b/dhp-workflows/dhp-impact-indicators/pom.xml index a9eb0a4a1..d931c2323 100644 --- a/dhp-workflows/dhp-impact-indicators/pom.xml +++ b/dhp-workflows/dhp-impact-indicators/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-impact-indicators diff --git a/dhp-workflows/dhp-stats-actionsets/pom.xml b/dhp-workflows/dhp-stats-actionsets/pom.xml index 3daa8f995..5d9b60b87 100644 --- a/dhp-workflows/dhp-stats-actionsets/pom.xml +++ b/dhp-workflows/dhp-stats-actionsets/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-stats-actionsets diff --git a/dhp-workflows/dhp-stats-hist-snaps/pom.xml b/dhp-workflows/dhp-stats-hist-snaps/pom.xml index b31d909f9..94371dc0b 100644 --- a/dhp-workflows/dhp-stats-hist-snaps/pom.xml +++ b/dhp-workflows/dhp-stats-hist-snaps/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-hist-snaps diff --git a/dhp-workflows/dhp-stats-monitor-irish/pom.xml b/dhp-workflows/dhp-stats-monitor-irish/pom.xml index 6ab19dced..4887005bb 100644 --- a/dhp-workflows/dhp-stats-monitor-irish/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-irish/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-monitor-irish diff --git a/dhp-workflows/dhp-stats-monitor-update/pom.xml b/dhp-workflows/dhp-stats-monitor-update/pom.xml index f2bc35f8d..c8a69c078 100644 --- a/dhp-workflows/dhp-stats-monitor-update/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-monitor-update diff --git a/dhp-workflows/dhp-stats-promote/pom.xml b/dhp-workflows/dhp-stats-promote/pom.xml index 9e17a78dc..1c711c878 100644 --- a/dhp-workflows/dhp-stats-promote/pom.xml +++ b/dhp-workflows/dhp-stats-promote/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-promote diff --git a/dhp-workflows/dhp-stats-update/pom.xml b/dhp-workflows/dhp-stats-update/pom.xml index cc15b8a15..246aa63cf 100644 --- a/dhp-workflows/dhp-stats-update/pom.xml +++ b/dhp-workflows/dhp-stats-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-update diff --git a/dhp-workflows/dhp-swh/pom.xml b/dhp-workflows/dhp-swh/pom.xml index 80fff4587..4ba5cf868 100644 --- a/dhp-workflows/dhp-swh/pom.xml +++ b/dhp-workflows/dhp-swh/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-SNAPSHOT + 1.2.5-beta dhp-swh diff --git a/dhp-workflows/dhp-usage-raw-data-update/pom.xml b/dhp-workflows/dhp-usage-raw-data-update/pom.xml index a9dbb09ae..ed3616fde 100644 --- a/dhp-workflows/dhp-usage-raw-data-update/pom.xml +++ b/dhp-workflows/dhp-usage-raw-data-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-usage-raw-data-update diff --git a/dhp-workflows/dhp-usage-stats-build/pom.xml b/dhp-workflows/dhp-usage-stats-build/pom.xml index 56aec73b7..52cc3bf44 100644 --- a/dhp-workflows/dhp-usage-stats-build/pom.xml +++ b/dhp-workflows/dhp-usage-stats-build/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-usage-stats-build diff --git a/dhp-workflows/dhp-workflow-profiles/pom.xml b/dhp-workflows/dhp-workflow-profiles/pom.xml index 8c71a5ca1..ef4e0ada6 100644 --- a/dhp-workflows/dhp-workflow-profiles/pom.xml +++ b/dhp-workflows/dhp-workflow-profiles/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/pom.xml b/dhp-workflows/pom.xml index 1c331d126..9b87c7b44 100644 --- a/dhp-workflows/pom.xml +++ b/dhp-workflows/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp - 1.2.5-SNAPSHOT + 1.2.5-beta ../pom.xml diff --git a/pom.xml b/pom.xml index 892382b9d..d015acd9e 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 eu.dnetlib.dhp dhp - 1.2.5-SNAPSHOT + 1.2.5-beta pom From b5bcab13ec088aab05d5b3a3512d2c4ab50e645a Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 23 Apr 2024 14:36:39 +0200 Subject: [PATCH 09/29] using version 1.2.5-beta for the release --- dhp-build/dhp-build-assembly-resources/pom.xml | 2 +- dhp-build/dhp-build-properties-maven-plugin/pom.xml | 2 +- dhp-build/dhp-code-style/pom.xml | 2 +- dhp-build/pom.xml | 2 +- dhp-common/pom.xml | 2 +- dhp-pace-core/pom.xml | 4 ++-- dhp-workflows/dhp-actionmanager/pom.xml | 2 +- dhp-workflows/dhp-aggregation/pom.xml | 2 +- dhp-workflows/dhp-blacklist/pom.xml | 2 +- dhp-workflows/dhp-broker-events/pom.xml | 2 +- dhp-workflows/dhp-dedup-openaire/pom.xml | 2 +- dhp-workflows/dhp-doiboost/pom.xml | 2 +- dhp-workflows/dhp-enrichment/pom.xml | 4 ++-- dhp-workflows/dhp-graph-mapper/pom.xml | 2 +- dhp-workflows/dhp-graph-provision/pom.xml | 2 +- dhp-workflows/dhp-impact-indicators/pom.xml | 2 +- dhp-workflows/dhp-stats-actionsets/pom.xml | 2 +- dhp-workflows/dhp-stats-hist-snaps/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-irish/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-update/pom.xml | 2 +- dhp-workflows/dhp-stats-promote/pom.xml | 2 +- dhp-workflows/dhp-stats-update/pom.xml | 2 +- dhp-workflows/dhp-swh/pom.xml | 2 +- dhp-workflows/dhp-usage-raw-data-update/pom.xml | 2 +- dhp-workflows/dhp-usage-stats-build/pom.xml | 2 +- dhp-workflows/dhp-workflow-profiles/pom.xml | 2 +- dhp-workflows/pom.xml | 2 +- pom.xml | 2 +- 28 files changed, 30 insertions(+), 30 deletions(-) diff --git a/dhp-build/dhp-build-assembly-resources/pom.xml b/dhp-build/dhp-build-assembly-resources/pom.xml index 7f5b76fdd..9e0674a43 100644 --- a/dhp-build/dhp-build-assembly-resources/pom.xml +++ b/dhp-build/dhp-build-assembly-resources/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-build-assembly-resources diff --git a/dhp-build/dhp-build-properties-maven-plugin/pom.xml b/dhp-build/dhp-build-properties-maven-plugin/pom.xml index e76dcd8fc..178cb271a 100644 --- a/dhp-build/dhp-build-properties-maven-plugin/pom.xml +++ b/dhp-build/dhp-build-properties-maven-plugin/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-build-properties-maven-plugin diff --git a/dhp-build/dhp-code-style/pom.xml b/dhp-build/dhp-code-style/pom.xml index 8bbe6fac0..093f5a9ad 100644 --- a/dhp-build/dhp-code-style/pom.xml +++ b/dhp-build/dhp-code-style/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp-code-style - 1.2.5-beta + 1.2.5-beta-SNAPSHOT jar diff --git a/dhp-build/pom.xml b/dhp-build/pom.xml index 74a09a23c..f944d787e 100644 --- a/dhp-build/pom.xml +++ b/dhp-build/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-build pom diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml index 692d2bdc3..b280721b6 100644 --- a/dhp-common/pom.xml +++ b/dhp-common/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT ../pom.xml diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml index 7b384f109..432da4bfd 100644 --- a/dhp-pace-core/pom.xml +++ b/dhp-pace-core/pom.xml @@ -6,13 +6,13 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT ../pom.xml eu.dnetlib.dhp dhp-pace-core - 1.2.5-beta + 1.2.5-beta-SNAPSHOT jar diff --git a/dhp-workflows/dhp-actionmanager/pom.xml b/dhp-workflows/dhp-actionmanager/pom.xml index 5a5f156fc..e7e78e774 100644 --- a/dhp-workflows/dhp-actionmanager/pom.xml +++ b/dhp-workflows/dhp-actionmanager/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-actionmanager diff --git a/dhp-workflows/dhp-aggregation/pom.xml b/dhp-workflows/dhp-aggregation/pom.xml index d67e880b4..db2ec2052 100644 --- a/dhp-workflows/dhp-aggregation/pom.xml +++ b/dhp-workflows/dhp-aggregation/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-aggregation diff --git a/dhp-workflows/dhp-blacklist/pom.xml b/dhp-workflows/dhp-blacklist/pom.xml index 64be812ba..2636ac6ec 100644 --- a/dhp-workflows/dhp-blacklist/pom.xml +++ b/dhp-workflows/dhp-blacklist/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-broker-events/pom.xml b/dhp-workflows/dhp-broker-events/pom.xml index b9f572527..84d353908 100644 --- a/dhp-workflows/dhp-broker-events/pom.xml +++ b/dhp-workflows/dhp-broker-events/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-dedup-openaire/pom.xml b/dhp-workflows/dhp-dedup-openaire/pom.xml index 96a0ae74c..4e7e4d741 100644 --- a/dhp-workflows/dhp-dedup-openaire/pom.xml +++ b/dhp-workflows/dhp-dedup-openaire/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-dedup-openaire diff --git a/dhp-workflows/dhp-doiboost/pom.xml b/dhp-workflows/dhp-doiboost/pom.xml index cfa5a3fce..a2b238e55 100644 --- a/dhp-workflows/dhp-doiboost/pom.xml +++ b/dhp-workflows/dhp-doiboost/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-enrichment/pom.xml b/dhp-workflows/dhp-enrichment/pom.xml index d7f75de8c..7297651d4 100644 --- a/dhp-workflows/dhp-enrichment/pom.xml +++ b/dhp-workflows/dhp-enrichment/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 @@ -51,7 +51,7 @@ eu.dnetlib.dhp dhp-aggregation - 1.2.5-beta + 1.2.5-beta-SNAPSHOT compile diff --git a/dhp-workflows/dhp-graph-mapper/pom.xml b/dhp-workflows/dhp-graph-mapper/pom.xml index c7ac55ef6..9f25f33a6 100644 --- a/dhp-workflows/dhp-graph-mapper/pom.xml +++ b/dhp-workflows/dhp-graph-mapper/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-graph-provision/pom.xml b/dhp-workflows/dhp-graph-provision/pom.xml index 7b879e074..8fb84255f 100644 --- a/dhp-workflows/dhp-graph-provision/pom.xml +++ b/dhp-workflows/dhp-graph-provision/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-impact-indicators/pom.xml b/dhp-workflows/dhp-impact-indicators/pom.xml index d931c2323..327c067c8 100644 --- a/dhp-workflows/dhp-impact-indicators/pom.xml +++ b/dhp-workflows/dhp-impact-indicators/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-impact-indicators diff --git a/dhp-workflows/dhp-stats-actionsets/pom.xml b/dhp-workflows/dhp-stats-actionsets/pom.xml index 5d9b60b87..aed43cd2b 100644 --- a/dhp-workflows/dhp-stats-actionsets/pom.xml +++ b/dhp-workflows/dhp-stats-actionsets/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-stats-actionsets diff --git a/dhp-workflows/dhp-stats-hist-snaps/pom.xml b/dhp-workflows/dhp-stats-hist-snaps/pom.xml index 94371dc0b..132875425 100644 --- a/dhp-workflows/dhp-stats-hist-snaps/pom.xml +++ b/dhp-workflows/dhp-stats-hist-snaps/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-stats-hist-snaps diff --git a/dhp-workflows/dhp-stats-monitor-irish/pom.xml b/dhp-workflows/dhp-stats-monitor-irish/pom.xml index 4887005bb..0e687b2cf 100644 --- a/dhp-workflows/dhp-stats-monitor-irish/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-irish/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-stats-monitor-irish diff --git a/dhp-workflows/dhp-stats-monitor-update/pom.xml b/dhp-workflows/dhp-stats-monitor-update/pom.xml index c8a69c078..2010c0a81 100644 --- a/dhp-workflows/dhp-stats-monitor-update/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-stats-monitor-update diff --git a/dhp-workflows/dhp-stats-promote/pom.xml b/dhp-workflows/dhp-stats-promote/pom.xml index 1c711c878..e34eb0881 100644 --- a/dhp-workflows/dhp-stats-promote/pom.xml +++ b/dhp-workflows/dhp-stats-promote/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-stats-promote diff --git a/dhp-workflows/dhp-stats-update/pom.xml b/dhp-workflows/dhp-stats-update/pom.xml index 246aa63cf..c1f1ac7ca 100644 --- a/dhp-workflows/dhp-stats-update/pom.xml +++ b/dhp-workflows/dhp-stats-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-stats-update diff --git a/dhp-workflows/dhp-swh/pom.xml b/dhp-workflows/dhp-swh/pom.xml index 4ba5cf868..54dda262e 100644 --- a/dhp-workflows/dhp-swh/pom.xml +++ b/dhp-workflows/dhp-swh/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-beta-SNAPSHOT dhp-swh diff --git a/dhp-workflows/dhp-usage-raw-data-update/pom.xml b/dhp-workflows/dhp-usage-raw-data-update/pom.xml index ed3616fde..ee238b78b 100644 --- a/dhp-workflows/dhp-usage-raw-data-update/pom.xml +++ b/dhp-workflows/dhp-usage-raw-data-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-usage-raw-data-update diff --git a/dhp-workflows/dhp-usage-stats-build/pom.xml b/dhp-workflows/dhp-usage-stats-build/pom.xml index 52cc3bf44..f7ef774b8 100644 --- a/dhp-workflows/dhp-usage-stats-build/pom.xml +++ b/dhp-workflows/dhp-usage-stats-build/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 dhp-usage-stats-build diff --git a/dhp-workflows/dhp-workflow-profiles/pom.xml b/dhp-workflows/dhp-workflow-profiles/pom.xml index ef4e0ada6..c0f246172 100644 --- a/dhp-workflows/dhp-workflow-profiles/pom.xml +++ b/dhp-workflows/dhp-workflow-profiles/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/pom.xml b/dhp-workflows/pom.xml index 9b87c7b44..4e6076377 100644 --- a/dhp-workflows/pom.xml +++ b/dhp-workflows/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT ../pom.xml diff --git a/pom.xml b/pom.xml index d015acd9e..09e02a8c2 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-beta-SNAPSHOT pom From c3053ef34df15a198b90b3a1aa9e4305dfb14a5d Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 23 Apr 2024 14:52:32 +0200 Subject: [PATCH 10/29] using version 1.2.5-beta for the release --- dhp-build/dhp-build-assembly-resources/pom.xml | 2 +- dhp-build/dhp-build-properties-maven-plugin/pom.xml | 2 +- dhp-build/dhp-code-style/pom.xml | 2 +- dhp-build/pom.xml | 2 +- dhp-common/pom.xml | 2 +- dhp-pace-core/pom.xml | 4 ++-- dhp-workflows/dhp-actionmanager/pom.xml | 2 +- dhp-workflows/dhp-aggregation/pom.xml | 2 +- dhp-workflows/dhp-blacklist/pom.xml | 2 +- dhp-workflows/dhp-broker-events/pom.xml | 2 +- dhp-workflows/dhp-dedup-openaire/pom.xml | 2 +- dhp-workflows/dhp-doiboost/pom.xml | 2 +- dhp-workflows/dhp-enrichment/pom.xml | 4 ++-- dhp-workflows/dhp-graph-mapper/pom.xml | 2 +- dhp-workflows/dhp-graph-provision/pom.xml | 2 +- dhp-workflows/dhp-impact-indicators/pom.xml | 2 +- dhp-workflows/dhp-stats-actionsets/pom.xml | 2 +- dhp-workflows/dhp-stats-hist-snaps/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-irish/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-update/pom.xml | 2 +- dhp-workflows/dhp-stats-promote/pom.xml | 2 +- dhp-workflows/dhp-stats-update/pom.xml | 2 +- dhp-workflows/dhp-swh/pom.xml | 2 +- dhp-workflows/dhp-usage-raw-data-update/pom.xml | 2 +- dhp-workflows/dhp-usage-stats-build/pom.xml | 2 +- dhp-workflows/dhp-workflow-profiles/pom.xml | 2 +- dhp-workflows/pom.xml | 2 +- pom.xml | 2 +- 28 files changed, 30 insertions(+), 30 deletions(-) diff --git a/dhp-build/dhp-build-assembly-resources/pom.xml b/dhp-build/dhp-build-assembly-resources/pom.xml index 9e0674a43..7f5b76fdd 100644 --- a/dhp-build/dhp-build-assembly-resources/pom.xml +++ b/dhp-build/dhp-build-assembly-resources/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-build-assembly-resources diff --git a/dhp-build/dhp-build-properties-maven-plugin/pom.xml b/dhp-build/dhp-build-properties-maven-plugin/pom.xml index 178cb271a..e76dcd8fc 100644 --- a/dhp-build/dhp-build-properties-maven-plugin/pom.xml +++ b/dhp-build/dhp-build-properties-maven-plugin/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-build-properties-maven-plugin diff --git a/dhp-build/dhp-code-style/pom.xml b/dhp-build/dhp-code-style/pom.xml index 093f5a9ad..8bbe6fac0 100644 --- a/dhp-build/dhp-code-style/pom.xml +++ b/dhp-build/dhp-code-style/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp-code-style - 1.2.5-beta-SNAPSHOT + 1.2.5-beta jar diff --git a/dhp-build/pom.xml b/dhp-build/pom.xml index f944d787e..74a09a23c 100644 --- a/dhp-build/pom.xml +++ b/dhp-build/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-build pom diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml index b280721b6..692d2bdc3 100644 --- a/dhp-common/pom.xml +++ b/dhp-common/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta ../pom.xml diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml index 432da4bfd..7b384f109 100644 --- a/dhp-pace-core/pom.xml +++ b/dhp-pace-core/pom.xml @@ -6,13 +6,13 @@ eu.dnetlib.dhp dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta ../pom.xml eu.dnetlib.dhp dhp-pace-core - 1.2.5-beta-SNAPSHOT + 1.2.5-beta jar diff --git a/dhp-workflows/dhp-actionmanager/pom.xml b/dhp-workflows/dhp-actionmanager/pom.xml index e7e78e774..5a5f156fc 100644 --- a/dhp-workflows/dhp-actionmanager/pom.xml +++ b/dhp-workflows/dhp-actionmanager/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-actionmanager diff --git a/dhp-workflows/dhp-aggregation/pom.xml b/dhp-workflows/dhp-aggregation/pom.xml index db2ec2052..d67e880b4 100644 --- a/dhp-workflows/dhp-aggregation/pom.xml +++ b/dhp-workflows/dhp-aggregation/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-aggregation diff --git a/dhp-workflows/dhp-blacklist/pom.xml b/dhp-workflows/dhp-blacklist/pom.xml index 2636ac6ec..64be812ba 100644 --- a/dhp-workflows/dhp-blacklist/pom.xml +++ b/dhp-workflows/dhp-blacklist/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-broker-events/pom.xml b/dhp-workflows/dhp-broker-events/pom.xml index 84d353908..b9f572527 100644 --- a/dhp-workflows/dhp-broker-events/pom.xml +++ b/dhp-workflows/dhp-broker-events/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-dedup-openaire/pom.xml b/dhp-workflows/dhp-dedup-openaire/pom.xml index 4e7e4d741..96a0ae74c 100644 --- a/dhp-workflows/dhp-dedup-openaire/pom.xml +++ b/dhp-workflows/dhp-dedup-openaire/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-dedup-openaire diff --git a/dhp-workflows/dhp-doiboost/pom.xml b/dhp-workflows/dhp-doiboost/pom.xml index a2b238e55..cfa5a3fce 100644 --- a/dhp-workflows/dhp-doiboost/pom.xml +++ b/dhp-workflows/dhp-doiboost/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-enrichment/pom.xml b/dhp-workflows/dhp-enrichment/pom.xml index 7297651d4..d7f75de8c 100644 --- a/dhp-workflows/dhp-enrichment/pom.xml +++ b/dhp-workflows/dhp-enrichment/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 @@ -51,7 +51,7 @@ eu.dnetlib.dhp dhp-aggregation - 1.2.5-beta-SNAPSHOT + 1.2.5-beta compile diff --git a/dhp-workflows/dhp-graph-mapper/pom.xml b/dhp-workflows/dhp-graph-mapper/pom.xml index 9f25f33a6..c7ac55ef6 100644 --- a/dhp-workflows/dhp-graph-mapper/pom.xml +++ b/dhp-workflows/dhp-graph-mapper/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-graph-provision/pom.xml b/dhp-workflows/dhp-graph-provision/pom.xml index 8fb84255f..7b879e074 100644 --- a/dhp-workflows/dhp-graph-provision/pom.xml +++ b/dhp-workflows/dhp-graph-provision/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/dhp-impact-indicators/pom.xml b/dhp-workflows/dhp-impact-indicators/pom.xml index 327c067c8..d931c2323 100644 --- a/dhp-workflows/dhp-impact-indicators/pom.xml +++ b/dhp-workflows/dhp-impact-indicators/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-impact-indicators diff --git a/dhp-workflows/dhp-stats-actionsets/pom.xml b/dhp-workflows/dhp-stats-actionsets/pom.xml index aed43cd2b..5d9b60b87 100644 --- a/dhp-workflows/dhp-stats-actionsets/pom.xml +++ b/dhp-workflows/dhp-stats-actionsets/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-stats-actionsets diff --git a/dhp-workflows/dhp-stats-hist-snaps/pom.xml b/dhp-workflows/dhp-stats-hist-snaps/pom.xml index 132875425..94371dc0b 100644 --- a/dhp-workflows/dhp-stats-hist-snaps/pom.xml +++ b/dhp-workflows/dhp-stats-hist-snaps/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-hist-snaps diff --git a/dhp-workflows/dhp-stats-monitor-irish/pom.xml b/dhp-workflows/dhp-stats-monitor-irish/pom.xml index 0e687b2cf..4887005bb 100644 --- a/dhp-workflows/dhp-stats-monitor-irish/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-irish/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-monitor-irish diff --git a/dhp-workflows/dhp-stats-monitor-update/pom.xml b/dhp-workflows/dhp-stats-monitor-update/pom.xml index 2010c0a81..c8a69c078 100644 --- a/dhp-workflows/dhp-stats-monitor-update/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-monitor-update diff --git a/dhp-workflows/dhp-stats-promote/pom.xml b/dhp-workflows/dhp-stats-promote/pom.xml index e34eb0881..1c711c878 100644 --- a/dhp-workflows/dhp-stats-promote/pom.xml +++ b/dhp-workflows/dhp-stats-promote/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-promote diff --git a/dhp-workflows/dhp-stats-update/pom.xml b/dhp-workflows/dhp-stats-update/pom.xml index c1f1ac7ca..246aa63cf 100644 --- a/dhp-workflows/dhp-stats-update/pom.xml +++ b/dhp-workflows/dhp-stats-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-stats-update diff --git a/dhp-workflows/dhp-swh/pom.xml b/dhp-workflows/dhp-swh/pom.xml index 54dda262e..4ba5cf868 100644 --- a/dhp-workflows/dhp-swh/pom.xml +++ b/dhp-workflows/dhp-swh/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta-SNAPSHOT + 1.2.5-beta dhp-swh diff --git a/dhp-workflows/dhp-usage-raw-data-update/pom.xml b/dhp-workflows/dhp-usage-raw-data-update/pom.xml index ee238b78b..ed3616fde 100644 --- a/dhp-workflows/dhp-usage-raw-data-update/pom.xml +++ b/dhp-workflows/dhp-usage-raw-data-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-usage-raw-data-update diff --git a/dhp-workflows/dhp-usage-stats-build/pom.xml b/dhp-workflows/dhp-usage-stats-build/pom.xml index f7ef774b8..52cc3bf44 100644 --- a/dhp-workflows/dhp-usage-stats-build/pom.xml +++ b/dhp-workflows/dhp-usage-stats-build/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 dhp-usage-stats-build diff --git a/dhp-workflows/dhp-workflow-profiles/pom.xml b/dhp-workflows/dhp-workflow-profiles/pom.xml index c0f246172..ef4e0ada6 100644 --- a/dhp-workflows/dhp-workflow-profiles/pom.xml +++ b/dhp-workflows/dhp-workflow-profiles/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta 4.0.0 diff --git a/dhp-workflows/pom.xml b/dhp-workflows/pom.xml index 4e6076377..9b87c7b44 100644 --- a/dhp-workflows/pom.xml +++ b/dhp-workflows/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta ../pom.xml diff --git a/pom.xml b/pom.xml index 09e02a8c2..d015acd9e 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 eu.dnetlib.dhp dhp - 1.2.5-beta-SNAPSHOT + 1.2.5-beta pom From d2649a1429ffcb7355d41696bd7abd2744d0a81b Mon Sep 17 00:00:00 2001 From: antleb Date: Tue, 23 Apr 2024 16:03:16 +0300 Subject: [PATCH 11/29] increased the jvm ram --- .../dnetlib/dhp/oa/graph/usagerawdata/oozie_app/workflow.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dhp-workflows/dhp-usage-raw-data-update/src/main/resources/eu/dnetlib/dhp/oa/graph/usagerawdata/oozie_app/workflow.xml b/dhp-workflows/dhp-usage-raw-data-update/src/main/resources/eu/dnetlib/dhp/oa/graph/usagerawdata/oozie_app/workflow.xml index 022a107ab..b684b5e24 100644 --- a/dhp-workflows/dhp-usage-raw-data-update/src/main/resources/eu/dnetlib/dhp/oa/graph/usagerawdata/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-usage-raw-data-update/src/main/resources/eu/dnetlib/dhp/oa/graph/usagerawdata/oozie_app/workflow.xml @@ -30,6 +30,10 @@ oozie.launcher.mapred.job.queue.name ${oozieLauncherQueueName} + + mapred.child.java.opts + -Xmx16g + From 49af2e574088d24a799d4c22741a8f0e03455826 Mon Sep 17 00:00:00 2001 From: LSmyrnaios Date: Tue, 23 Apr 2024 17:15:04 +0300 Subject: [PATCH 12/29] Miscellaneous updates to the copying operation to Impala Cluster: - Update the algorithm for creating views that depend on other views; overcome some bash-instabilities. - Upon any error, fail the whole process, not just the current DB-creation, as those errors usually indicate a bug in the initial DB-creation, that should be fixed immediately. - Enhance parallel-copy of large files by "hadoop distcp" command. - Reduce the "invalidate metadata" commands to just the current DB's tables, in order to eliminate the general overhead on Impala. - Show the number of tables and views in the logs. - Fix some log-messages. --- .../oozie_app/copyDataToImpalaCluster.sh | 71 +++++++++---------- .../oozie_app/copyDataToImpalaCluster.sh | 71 +++++++++---------- .../oozie_app/copyDataToImpalaCluster.sh | 71 +++++++++---------- .../oozie_app/copyDataToImpalaCluster.sh | 71 +++++++++---------- 4 files changed, 132 insertions(+), 152 deletions(-) diff --git a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh index 3d9986b64..059fb9089 100644 --- a/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh +++ b/dhp-workflows/dhp-stats-hist-snaps/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-hist-snaps/oozie_app/copyDataToImpalaCluster.sh @@ -67,24 +67,21 @@ function copydb() { if [ -n "$log_errors" ]; then echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" rm -f error.log - return 1 + exit 2 fi - # Make Impala aware of the deletion of the old DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" - # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s - # Using max memory of: 50 * 6144 = 300 Gb + # Using max-bandwidth of: 70 * 150 Mb/s = 10.5 Gb/s + # Using max memory of: 70 * 6144 = 430 Gb # Using 1MB as a buffer-size. - # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The " -Ddistcp.dynamic.recordsPerChunk=N" arg is not available in our version of hadoop # The "ug" args cannot be used as we get a "User does not belong to hive" error. # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ -numListstatusThreads 40 \ -copybuffersize 1048576 \ -strategy dynamic \ + -blocksperchunk 8 \ -pb \ ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} @@ -92,9 +89,9 @@ function copydb() { if [ $? -eq 0 ]; then echo -e "\nSuccessfully copied the files of '${db}'.\n" else - echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT EXIT STATUS: $?\n\n" rm -f error.log - return 2 + exit 3 fi # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. @@ -105,14 +102,11 @@ function copydb() { # create the new database (with the same name) impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" - # Make Impala aware of the creation of the new DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. all_create_view_statements=() + num_tables=0 entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. @@ -129,9 +123,11 @@ function copydb() { all_create_view_statements+=("$create_view_statement") else echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + ((num_tables++)) CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + exit 4 # Comment out when testing a DB which has such a table, just for performing this exact test-check. else impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` @@ -142,74 +138,73 @@ function copydb() { fi done - echo -e "\nAll tables have been created, going to create the views..\n" + previous_num_of_views_to_retry=${#all_create_view_statements[@]} + if [[ $num_tables -gt 0 ]]; then + echo -e "\nAll ${num_tables} tables have been created, for db '${db}', going to create the ${previous_num_of_views_to_retry} views..\n" + else + echo -e "\nDB '${db}' does not have any tables, moving on to create the ${previous_num_of_views_to_retry} views..\n" + fi - # Time to loop through the views and create them. - # At this point all table-schemas should have been created. - - previous_num_of_views_to_retry=${#all_create_view_statements} if [[ $previous_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG - # Make Impala aware of the new tables, so it knows them when creating the views. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 + echo -e "\nAll_create_view_statements (${previous_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" # DEBUG else echo -e "\nDB '${db}' does not contain any views.\n" fi level_counter=0 - while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + while [[ $previous_num_of_views_to_retry -gt 0 ]]; do ((level_counter++)) # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. # In this case, we should retry creating this particular view again. - should_retry_create_view_statements=() + new_num_of_views_to_retry=0 for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` if [ -n "$specific_errors" ]; then echo -e "\nspecific_errors: ${specific_errors}\n" - echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" - should_retry_create_view_statements+=("$create_view_statement") + echo -e "\nView '$(cat error.log | grep -Eo "Query: CREATE VIEW ([^\s]+)" | sed 's/Query: CREATE VIEW //g')' failed to be created, possibly because it depends on another view.\n" + ((new_num_of_views_to_retry++)) # Increment it here, instead of acquiring the array's size in the end, as that doesn't work for some reason. else + all_create_view_statements=("${all_create_view_statements[@]/$create_view_statement}") # Remove the current successful statement from the list. sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. fi done - new_num_of_views_to_retry=${#should_retry_create_view_statements} + all_create_view_statements=("$(echo "${all_create_view_statements[@]}" | grep -v '^[\s]*$')") # Re-index the array, filtering-out any empty elements. + # Although the above command reduces the "active" elements to just the few to-be-retried, it does not manage to make the array return the its true size through the "${#all_create_view_statements[@]}" statement. So we use counters. + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" - return 3 + exit 5 elif [[ $new_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" - previous_num_of_views_to_retry=$new_num_of_views_to_retry + echo -e "\nTo be retried \"create_view_statements\" (${new_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" else echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" fi - all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + previous_num_of_views_to_retry=$new_num_of_views_to_retry done - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 - echo -e "\nComputing stats for tables..\n" entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + # Invalidate metadata of this DB's tables, in order for Impala to be aware of all parquet files put inside the tables' directories, previously, by "hadoop distcp". + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA ${db}.${i}" + sleep 1 impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; fi done + # Check if the entities in both clusters are the same, down to the exact names, not just the counts. (they are sorted in the same way both in hive and impala) if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then echo -e "\nAll entities have been copied to Impala cluster.\n" else echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" rm -f error.log - return 4 + exit 6 fi rm -f error.log diff --git a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh index 2711d6e12..1130a684d 100644 --- a/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh +++ b/dhp-workflows/dhp-stats-monitor-irish/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor-irish/oozie_app/copyDataToImpalaCluster.sh @@ -66,24 +66,21 @@ function copydb() { if [ -n "$log_errors" ]; then echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" rm -f error.log - return 1 + exit 2 fi - # Make Impala aware of the deletion of the old DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" - # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s - # Using max memory of: 50 * 6144 = 300 Gb + # Using max-bandwidth of: 70 * 150 Mb/s = 10.5 Gb/s + # Using max memory of: 70 * 6144 = 430 Gb # Using 1MB as a buffer-size. - # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The " -Ddistcp.dynamic.recordsPerChunk=N" arg is not available in our version of hadoop # The "ug" args cannot be used as we get a "User does not belong to hive" error. # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ -numListstatusThreads 40 \ -copybuffersize 1048576 \ -strategy dynamic \ + -blocksperchunk 8 \ -pb \ ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} @@ -91,9 +88,9 @@ function copydb() { if [ $? -eq 0 ]; then echo -e "\nSuccessfully copied the files of '${db}'.\n" else - echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT EXIT STATUS: $?\n\n" rm -f error.log - return 2 + exit 3 fi # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. @@ -104,14 +101,11 @@ function copydb() { # create the new database (with the same name) impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" - # Make Impala aware of the creation of the new DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. all_create_view_statements=() + num_tables=0 entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. @@ -128,9 +122,11 @@ function copydb() { all_create_view_statements+=("$create_view_statement") else echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + ((num_tables++)) CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + exit 4 # Comment out when testing a DB which has such a table, just for performing this exact test-check. else impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` @@ -141,74 +137,73 @@ function copydb() { fi done - echo -e "\nAll tables have been created, going to create the views..\n" + previous_num_of_views_to_retry=${#all_create_view_statements[@]} + if [[ $num_tables -gt 0 ]]; then + echo -e "\nAll ${num_tables} tables have been created, for db '${db}', going to create the ${previous_num_of_views_to_retry} views..\n" + else + echo -e "\nDB '${db}' does not have any tables, moving on to create the ${previous_num_of_views_to_retry} views..\n" + fi - # Time to loop through the views and create them. - # At this point all table-schemas should have been created. - - previous_num_of_views_to_retry=${#all_create_view_statements} if [[ $previous_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG - # Make Impala aware of the new tables, so it knows them when creating the views. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 + echo -e "\nAll_create_view_statements (${previous_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" # DEBUG else echo -e "\nDB '${db}' does not contain any views.\n" fi level_counter=0 - while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + while [[ $previous_num_of_views_to_retry -gt 0 ]]; do ((level_counter++)) # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. # In this case, we should retry creating this particular view again. - should_retry_create_view_statements=() + new_num_of_views_to_retry=0 for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` if [ -n "$specific_errors" ]; then echo -e "\nspecific_errors: ${specific_errors}\n" - echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" - should_retry_create_view_statements+=("$create_view_statement") + echo -e "\nView '$(cat error.log | grep -Eo "Query: CREATE VIEW ([^\s]+)" | sed 's/Query: CREATE VIEW //g')' failed to be created, possibly because it depends on another view.\n" + ((new_num_of_views_to_retry++)) # Increment it here, instead of acquiring the array's size in the end, as that doesn't work for some reason. else + all_create_view_statements=("${all_create_view_statements[@]/$create_view_statement}") # Remove the current successful statement from the list. sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. fi done - new_num_of_views_to_retry=${#should_retry_create_view_statements} + all_create_view_statements=("$(echo "${all_create_view_statements[@]}" | grep -v '^[\s]*$')") # Re-index the array, filtering-out any empty elements. + # Although the above command reduces the "active" elements to just the few to-be-retried, it does not manage to make the array return the its true size through the "${#all_create_view_statements[@]}" statement. So we use counters. + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" - return 3 + exit 5 elif [[ $new_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" - previous_num_of_views_to_retry=$new_num_of_views_to_retry + echo -e "\nTo be retried \"create_view_statements\" (${new_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" else echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" fi - all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + previous_num_of_views_to_retry=$new_num_of_views_to_retry done - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 - echo -e "\nComputing stats for tables..\n" entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + # Invalidate metadata of this DB's tables, in order for Impala to be aware of all parquet files put inside the tables' directories, previously, by "hadoop distcp". + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA ${db}.${i}" + sleep 1 impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; fi done + # Check if the entities in both clusters are the same, down to the exact names, not just the counts. (they are sorted in the same way both in hive and impala) if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then echo -e "\nAll entities have been copied to Impala cluster.\n" else echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" rm -f error.log - return 4 + exit 6 fi rm -f error.log diff --git a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh index 5ad9df762..de275145b 100644 --- a/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh +++ b/dhp-workflows/dhp-stats-monitor-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats-monitor/oozie_app/copyDataToImpalaCluster.sh @@ -66,24 +66,21 @@ function copydb() { if [ -n "$log_errors" ]; then echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" rm -f error.log - return 1 + exit 2 fi - # Make Impala aware of the deletion of the old DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" - # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s - # Using max memory of: 50 * 6144 = 300 Gb + # Using max-bandwidth of: 70 * 150 Mb/s = 10.5 Gb/s + # Using max memory of: 70 * 6144 = 430 Gb # Using 1MB as a buffer-size. - # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The " -Ddistcp.dynamic.recordsPerChunk=N" arg is not available in our version of hadoop # The "ug" args cannot be used as we get a "User does not belong to hive" error. # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ -numListstatusThreads 40 \ -copybuffersize 1048576 \ -strategy dynamic \ + -blocksperchunk 8 \ -pb \ ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} @@ -91,9 +88,9 @@ function copydb() { if [ $? -eq 0 ]; then echo -e "\nSuccessfully copied the files of '${db}'.\n" else - echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT EXIT STATUS: $?\n\n" rm -f error.log - return 2 + exit 3 fi # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. @@ -104,14 +101,11 @@ function copydb() { # create the new database (with the same name) impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" - # Make Impala aware of the creation of the new DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. all_create_view_statements=() + num_tables=0 entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. @@ -128,9 +122,11 @@ function copydb() { all_create_view_statements+=("$create_view_statement") else echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + ((num_tables++)) CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + exit 4 # Comment out when testing a DB which has such a table, just for performing this exact test-check. else impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` @@ -141,74 +137,73 @@ function copydb() { fi done - echo -e "\nAll tables have been created, going to create the views..\n" + previous_num_of_views_to_retry=${#all_create_view_statements[@]} + if [[ $num_tables -gt 0 ]]; then + echo -e "\nAll ${num_tables} tables have been created, for db '${db}', going to create the ${previous_num_of_views_to_retry} views..\n" + else + echo -e "\nDB '${db}' does not have any tables, moving on to create the ${previous_num_of_views_to_retry} views..\n" + fi - # Time to loop through the views and create them. - # At this point all table-schemas should have been created. - - previous_num_of_views_to_retry=${#all_create_view_statements} if [[ $previous_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG - # Make Impala aware of the new tables, so it knows them when creating the views. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 + echo -e "\nAll_create_view_statements (${previous_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" # DEBUG else echo -e "\nDB '${db}' does not contain any views.\n" fi level_counter=0 - while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + while [[ $previous_num_of_views_to_retry -gt 0 ]]; do ((level_counter++)) # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. # In this case, we should retry creating this particular view again. - should_retry_create_view_statements=() + new_num_of_views_to_retry=0 for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` if [ -n "$specific_errors" ]; then echo -e "\nspecific_errors: ${specific_errors}\n" - echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" - should_retry_create_view_statements+=("$create_view_statement") + echo -e "\nView '$(cat error.log | grep -Eo "Query: CREATE VIEW ([^\s]+)" | sed 's/Query: CREATE VIEW //g')' failed to be created, possibly because it depends on another view.\n" + ((new_num_of_views_to_retry++)) # Increment it here, instead of acquiring the array's size in the end, as that doesn't work for some reason. else + all_create_view_statements=("${all_create_view_statements[@]/$create_view_statement}") # Remove the current successful statement from the list. sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. fi done - new_num_of_views_to_retry=${#should_retry_create_view_statements} + all_create_view_statements=("$(echo "${all_create_view_statements[@]}" | grep -v '^[\s]*$')") # Re-index the array, filtering-out any empty elements. + # Although the above command reduces the "active" elements to just the few to-be-retried, it does not manage to make the array return the its true size through the "${#all_create_view_statements[@]}" statement. So we use counters. + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" - return 3 + exit 5 elif [[ $new_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" - previous_num_of_views_to_retry=$new_num_of_views_to_retry + echo -e "\nTo be retried \"create_view_statements\" (${new_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" else echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" fi - all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + previous_num_of_views_to_retry=$new_num_of_views_to_retry done - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 - echo -e "\nComputing stats for tables..\n" entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + # Invalidate metadata of this DB's tables, in order for Impala to be aware of all parquet files put inside the tables' directories, previously, by "hadoop distcp". + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA ${db}.${i}" + sleep 1 impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; fi done + # Check if the entities in both clusters are the same, down to the exact names, not just the counts. (they are sorted in the same way both in hive and impala) if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then echo -e "\nAll entities have been copied to Impala cluster.\n" else echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" rm -f error.log - return 4 + exit 6 fi rm -f error.log diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh index c2324b912..6fc0aa745 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/copyDataToImpalaCluster.sh @@ -68,24 +68,21 @@ function copydb() { if [ -n "$log_errors" ]; then echo -e "\n\nERROR: THERE WAS A PROBLEM WHEN DROPPING THE OLD DATABASE! EXITING...\n\n" rm -f error.log - return 1 + exit 2 fi - # Make Impala aware of the deletion of the old DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - echo -e "\n\nCopying files of '${db}', from Ocean to Impala cluster..\n" - # Using max-bandwidth of: 50 * 100 Mb/s = 5 Gb/s - # Using max memory of: 50 * 6144 = 300 Gb + # Using max-bandwidth of: 70 * 150 Mb/s = 10.5 Gb/s + # Using max memory of: 70 * 6144 = 430 Gb # Using 1MB as a buffer-size. - # The " -Ddistcp.dynamic.recordsPerChunk=50" arg is not available in our version of hadoop + # The " -Ddistcp.dynamic.recordsPerChunk=N" arg is not available in our version of hadoop # The "ug" args cannot be used as we get a "User does not belong to hive" error. # The "p" argument cannot be used, as it blocks the files from being used, giving a "sticky bit"-error, even after applying chmod and chown onm the files. hadoop distcp -Dmapreduce.map.memory.mb=6144 -m 70 -bandwidth 150 \ -numListstatusThreads 40 \ -copybuffersize 1048576 \ -strategy dynamic \ + -blocksperchunk 8 \ -pb \ ${OCEAN_HDFS_NODE}/user/hive/warehouse/${db}.db ${IMPALA_HDFS_DB_BASE_PATH} @@ -93,9 +90,9 @@ function copydb() { if [ $? -eq 0 ]; then echo -e "\nSuccessfully copied the files of '${db}'.\n" else - echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT WITH EXIT STATUS: $?\n\n" + echo -e "\n\nERROR: FAILED TO TRANSFER THE FILES OF '${db}', WITH 'hadoop distcp'. GOT EXIT STATUS: $?\n\n" rm -f error.log - return 2 + exit 3 fi # In case we ever use this script for a writable DB (using inserts/updates), we should perform the following costly operation as well.. @@ -106,14 +103,11 @@ function copydb() { # create the new database (with the same name) impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create database ${db}" - # Make Impala aware of the creation of the new DB immediately. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 # Because "Hive" and "Impala" do not have compatible schemas, we cannot use the "show create table " output from hive to create the exact same table in impala. # So, we have to find at least one parquet file (check if it's there) from the table in the ocean cluster for impala to use it to extract the table-schema itself from that file. all_create_view_statements=() + num_tables=0 entities_on_ocean=`hive -e "show tables in ${db};" | sed 's/WARN:.*//g'` # Get the tables and views without any potential the "WARN" logs. for i in ${entities_on_ocean[@]}; do # Use un-quoted values, as the elemetns are single-words. @@ -130,9 +124,11 @@ function copydb() { all_create_view_statements+=("$create_view_statement") else echo -e "\n'${i}' is a table, so we will check for its parquet files and create the table on Impala cluster.\n" + ((num_tables++)) CURRENT_PRQ_FILE=`hdfs dfs -conf ${IMPALA_CONFIG_FILE} -ls -C "${IMPALA_HDFS_DB_BASE_PATH}/${db}.db/${i}/" | grep -v 'Found' | grep -v '_impala_insert_staging' | head -1` if [ -z "$CURRENT_PRQ_FILE" ]; then # If there is not parquet-file inside. echo -e "\nERROR: THE TABLE \"${i}\" HAD NO FILES TO GET THE SCHEMA FROM! IT'S EMPTY!\n\n" + exit 4 # Comment out when testing a DB which has such a table, just for performing this exact test-check. else impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "create table ${db}.${i} like parquet '${CURRENT_PRQ_FILE}' stored as parquet;" |& tee error.log log_errors=`cat error.log | grep -E "WARN|ERROR|FAILED"` @@ -143,74 +139,73 @@ function copydb() { fi done - echo -e "\nAll tables have been created, going to create the views..\n" + previous_num_of_views_to_retry=${#all_create_view_statements[@]} + if [[ $num_tables -gt 0 ]]; then + echo -e "\nAll ${num_tables} tables have been created, for db '${db}', going to create the ${previous_num_of_views_to_retry} views..\n" + else + echo -e "\nDB '${db}' does not have any tables, moving on to create the ${previous_num_of_views_to_retry} views..\n" + fi - # Time to loop through the views and create them. - # At this point all table-schemas should have been created. - - previous_num_of_views_to_retry=${#all_create_view_statements} if [[ $previous_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nAll_create_view_statements:\n\n${all_create_view_statements[@]}\n" # DEBUG - # Make Impala aware of the new tables, so it knows them when creating the views. - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 + echo -e "\nAll_create_view_statements (${previous_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" # DEBUG else echo -e "\nDB '${db}' does not contain any views.\n" fi level_counter=0 - while [[ ${#all_create_view_statements[@]} -gt 0 ]]; do + while [[ $previous_num_of_views_to_retry -gt 0 ]]; do ((level_counter++)) # The only accepted reason for a view to not be created, is if it depends on another view, which has not been created yet. # In this case, we should retry creating this particular view again. - should_retry_create_view_statements=() + new_num_of_views_to_retry=0 for create_view_statement in "${all_create_view_statements[@]}"; do # Here we use double quotes, as the elements are phrases, instead of single-words. impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "${create_view_statement}" |& tee error.log # impala-shell prints all logs in stderr, so wee need to capture them and put them in a file, in order to perform "grep" on them later specific_errors=`cat error.log | grep -E "FAILED: ParseException line 1:13 missing TABLE at 'view'|ERROR: AnalysisException: Could not resolve table reference:"` if [ -n "$specific_errors" ]; then echo -e "\nspecific_errors: ${specific_errors}\n" - echo -e "\nView '$(cat error.log | grep "CREATE VIEW " | sed 's/CREATE VIEW //g' | sed 's/ as select .*//g')' failed to be created, possibly because it depends on another view.\n" - should_retry_create_view_statements+=("$create_view_statement") + echo -e "\nView '$(cat error.log | grep -Eo "Query: CREATE VIEW ([^\s]+)" | sed 's/Query: CREATE VIEW //g')' failed to be created, possibly because it depends on another view.\n" + ((new_num_of_views_to_retry++)) # Increment it here, instead of acquiring the array's size in the end, as that doesn't work for some reason. else + all_create_view_statements=("${all_create_view_statements[@]/$create_view_statement}") # Remove the current successful statement from the list. sleep 1 # Wait a bit for Impala to register that the view was created, before possibly referencing it by another view. fi done - new_num_of_views_to_retry=${#should_retry_create_view_statements} + all_create_view_statements=("$(echo "${all_create_view_statements[@]}" | grep -v '^[\s]*$')") # Re-index the array, filtering-out any empty elements. + # Although the above command reduces the "active" elements to just the few to-be-retried, it does not manage to make the array return the its true size through the "${#all_create_view_statements[@]}" statement. So we use counters. + if [[ $new_num_of_views_to_retry -eq $previous_num_of_views_to_retry ]]; then echo -e "\n\nERROR: THE NUMBER OF VIEWS TO RETRY HAS NOT BEEN REDUCED! THE SCRIPT IS LIKELY GOING TO AN INFINITE-LOOP! EXITING..\n\n" - return 3 + exit 5 elif [[ $new_num_of_views_to_retry -gt 0 ]]; then - echo -e "\nTo be retried \"create_view_statements\":\n\n${should_retry_create_view_statements[@]}\n" - previous_num_of_views_to_retry=$new_num_of_views_to_retry + echo -e "\nTo be retried \"create_view_statements\" (${new_num_of_views_to_retry}):\n\n${all_create_view_statements[@]}\n" else echo -e "\nFinished creating views, for db: '${db}', in level-${level_counter}.\n" fi - all_create_view_statements=("${should_retry_create_view_statement[@]}") # This is needed in any case to either move forward with the rest of the views or stop at 0 remaining views. + previous_num_of_views_to_retry=$new_num_of_views_to_retry done - sleep 1 - impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA" - sleep 1 - echo -e "\nComputing stats for tables..\n" entities_on_impala=`impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} --delimited -q "show tables in ${db}"` for i in ${entities_on_impala[@]}; do # Use un-quoted values, as the elemetns are single-words. # Taking the create table statement from the Ocean cluster, just to check if its a view, as the output is easier than using impala-shell from Impala cluster. create_view_statement=`hive -e "show create table ${db}.${i};" | grep "CREATE VIEW"` # This grep works here, as we do not want to match multiple-lines. if [ -z "$create_view_statement" ]; then # If it's a table, then go load the data to it. + # Invalidate metadata of this DB's tables, in order for Impala to be aware of all parquet files put inside the tables' directories, previously, by "hadoop distcp". + impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "INVALIDATE METADATA ${db}.${i}" + sleep 1 impala-shell --user ${HADOOP_USER_NAME} -i ${IMPALA_HOSTNAME} -q "compute stats ${db}.${i}"; fi done + # Check if the entities in both clusters are the same, down to the exact names, not just the counts. (they are sorted in the same way both in hive and impala) if [ "${entities_on_impala[@]}" == "${entities_on_ocean[@]}" ]; then echo -e "\nAll entities have been copied to Impala cluster.\n" else echo -e "\n\nERROR: 1 OR MORE ENTITIES OF DB '${db}' FAILED TO BE COPIED TO IMPALA CLUSTER!\n\n" rm -f error.log - return 4 + exit 6 fi rm -f error.log From 50c18f7a0b05940a476ed2ef900e15c329b7a398 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 30 Apr 2024 12:34:16 +0200 Subject: [PATCH 13/29] [dedup wf] revised memory settings to address the increased volume of input contents --- .../dedup/consistency/oozie_app/workflow.xml | 2 + .../dhp/oa/dedup/scan/oozie_app/workflow.xml | 46 ++++++------------- 2 files changed, 16 insertions(+), 32 deletions(-) diff --git a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/consistency/oozie_app/workflow.xml b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/consistency/oozie_app/workflow.xml index 306229e79..46dc71c2c 100644 --- a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/consistency/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/consistency/oozie_app/workflow.xml @@ -102,6 +102,8 @@ --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} --conf spark.sql.shuffle.partitions=15000 + --conf spark.network.timeout=300s + --conf spark.shuffle.registration.timeout=50000 --graphBasePath${graphBasePath} --graphOutputPath${graphOutputPath} diff --git a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/scan/oozie_app/workflow.xml b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/scan/oozie_app/workflow.xml index 49a331def..ff37c5074 100644 --- a/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/scan/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-dedup-openaire/src/main/resources/eu/dnetlib/dhp/oa/dedup/scan/oozie_app/workflow.xml @@ -33,16 +33,14 @@ max number of elements in a connected component - sparkDriverMemory - memory for driver process + sparkResourceOpts + --executor-memory=6G --conf spark.executor.memoryOverhead=4G --executor-cores=6 --driver-memory=8G --driver-cores=4 + spark resource options - sparkExecutorMemory - memory for individual executor - - - sparkExecutorCores - number of cores used by single executor + sparkResourceOptsCreateMergeRel + --executor-memory=6G --conf spark.executor.memoryOverhead=4G --executor-cores=6 --driver-memory=8G --driver-cores=4 + spark resource options oozieActionShareLibForSpark2 @@ -119,9 +117,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCreateSimRels dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -146,9 +142,7 @@ eu.dnetlib.dhp.oa.dedup.SparkWhitelistSimRels dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -174,9 +168,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCreateMergeRels dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOptsCreateMergeRel} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -203,9 +195,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCreateDedupRecord dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -230,9 +220,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCopyOpenorgsMergeRels dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -257,9 +245,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCreateOrgsDedupRecord dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -283,9 +269,7 @@ eu.dnetlib.dhp.oa.dedup.SparkUpdateEntity dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -309,9 +293,7 @@ eu.dnetlib.dhp.oa.dedup.SparkCopyRelationsNoOpenorgs dhp-dedup-openaire-${projectVersion}.jar - --executor-memory=${sparkExecutorMemory} - --executor-cores=${sparkExecutorCores} - --driver-memory=${sparkDriverMemory} + ${sparkResourceOpts} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} From e96c2c1606d2ddf4b1f6c0c3f18af7b7de4f57db Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 30 Apr 2024 16:23:25 +0200 Subject: [PATCH 14/29] [ranking wf] set spark.executor.memoryOverhead to fine tune the resource consumption --- .../graph/impact_indicators/oozie_app/workflow.xml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/dhp-workflows/dhp-impact-indicators/src/main/resources/eu/dnetlib/dhp/oa/graph/impact_indicators/oozie_app/workflow.xml b/dhp-workflows/dhp-impact-indicators/src/main/resources/eu/dnetlib/dhp/oa/graph/impact_indicators/oozie_app/workflow.xml index e43e7cf14..70f5f8d2a 100644 --- a/dhp-workflows/dhp-impact-indicators/src/main/resources/eu/dnetlib/dhp/oa/graph/impact_indicators/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-impact-indicators/src/main/resources/eu/dnetlib/dhp/oa/graph/impact_indicators/oozie_app/workflow.xml @@ -71,6 +71,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkHighDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -108,6 +109,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -141,6 +143,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -176,6 +179,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -209,6 +213,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -245,6 +250,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -315,6 +321,7 @@ --executor-memory=${sparkNormalExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkNormalExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -361,6 +368,7 @@ --executor-memory=${sparkNormalExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkNormalExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -409,6 +417,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkHighDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -444,6 +453,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkHighDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -482,6 +492,7 @@ --executor-memory=${sparkHighExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkHighExecutorMemory} --conf spark.sql.shuffle.partitions=${sparkShufflePartitions} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} @@ -533,6 +544,7 @@ --executor-memory=${sparkNormalExecutorMemory} --executor-cores=${sparkExecutorCores} --driver-memory=${sparkNormalDriverMemory} + --conf spark.executor.memoryOverhead=${sparkNormalExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} From 11bd89e1325ad4f4abbac118322a6f25aafb3419 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Wed, 1 May 2024 08:32:59 +0200 Subject: [PATCH 15/29] [enrichment] use sparkExecutorMemory to define also the memoryOverhead --- .../oozie_app/workflow.xml | 61 +++++-------------- 1 file changed, 15 insertions(+), 46 deletions(-) diff --git a/dhp-workflows/dhp-enrichment/src/main/resources/eu/dnetlib/dhp/wf/subworkflows/orcidtoresultfromsemrel/oozie_app/workflow.xml b/dhp-workflows/dhp-enrichment/src/main/resources/eu/dnetlib/dhp/wf/subworkflows/orcidtoresultfromsemrel/oozie_app/workflow.xml index a9642d637..ba3633e07 100644 --- a/dhp-workflows/dhp-enrichment/src/main/resources/eu/dnetlib/dhp/wf/subworkflows/orcidtoresultfromsemrel/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-enrichment/src/main/resources/eu/dnetlib/dhp/wf/subworkflows/orcidtoresultfromsemrel/oozie_app/workflow.xml @@ -100,16 +100,12 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} - --conf spark.sql.shuffle.partitions=3840 - --conf spark.speculation=false - --conf spark.hadoop.mapreduce.map.speculative=false - --conf spark.hadoop.mapreduce.reduce.speculative=false + --conf spark.sql.shuffle.partitions=8000 --sourcePath${sourcePath} --hive_metastore_uris${hive_metastore_uris} @@ -132,12 +128,11 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} --sourcePath${sourcePath} --hive_metastore_uris${hive_metastore_uris} @@ -160,12 +155,11 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} --sourcePath${sourcePath} --hive_metastore_uris${hive_metastore_uris} @@ -188,12 +182,11 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} --sourcePath${sourcePath} --hive_metastore_uris${hive_metastore_uris} @@ -218,12 +211,11 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} --sourcePath${workingDir}/orcid/targetOrcidAssoc --outputPath${workingDir}/orcid/mergedOrcidAssoc @@ -247,19 +239,14 @@ eu.dnetlib.dhp.orcidtoresultfromsemrel.SparkOrcidToResultFromSemRelJob dhp-enrichment-${projectVersion}.jar - --executor-cores=4 - --executor-memory=4G + --executor-cores=${sparkExecutorCores} + --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} - --conf spark.executor.memoryOverhead=5G + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} - --conf spark.speculation=false - --conf spark.hadoop.mapreduce.map.speculative=false - --conf spark.hadoop.mapreduce.reduce.speculative=false --conf spark.sql.shuffle.partitions=15000 --possibleUpdatesPath${workingDir}/orcid/mergedOrcidAssoc @@ -282,15 +269,12 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} - --conf spark.speculation=false - --conf spark.hadoop.mapreduce.map.speculative=false - --conf spark.hadoop.mapreduce.reduce.speculative=false + --conf spark.sql.shuffle.partitions=8000 --possibleUpdatesPath${workingDir}/orcid/mergedOrcidAssoc --sourcePath${sourcePath}/dataset @@ -312,15 +296,12 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} - --conf spark.speculation=false - --conf spark.hadoop.mapreduce.map.speculative=false - --conf spark.hadoop.mapreduce.reduce.speculative=false + --conf spark.sql.shuffle.partitions=8000 --possibleUpdatesPath${workingDir}/orcid/mergedOrcidAssoc --sourcePath${sourcePath}/otherresearchproduct @@ -342,15 +323,12 @@ --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.dynamicAllocation.enabled=true - --conf spark.dynamicAllocation.maxExecutors=${spark2MaxExecutors} - --conf spark.speculation=false - --conf spark.hadoop.mapreduce.map.speculative=false - --conf spark.hadoop.mapreduce.reduce.speculative=false + --conf spark.sql.shuffle.partitions=4000 --possibleUpdatesPath${workingDir}/orcid/mergedOrcidAssoc --sourcePath${sourcePath}/software @@ -362,15 +340,6 @@ - - - - - - - - - From 66680b8b9a69a2801016ee4a9b34f872ce6a766f Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Thu, 2 May 2024 11:16:58 +0200 Subject: [PATCH 16/29] refactoring of common utilities --- dhp-common/pom.xml | 10 +- .../dnetlib/pace/common/PaceCommonUtils.java | 100 ++++++++++++++++++ .../java/eu/dnetlib/pace/model/Person.java | 15 ++- .../java/eu/dnetlib/pace/util/Capitalise.java | 17 +++ .../dnetlib/pace/util/DotAbbreviations.java | 11 ++ .../eu/dnetlib/pace/config/name_particles.txt | 0 dhp-pace-core/pom.xml | 6 ++ .../pace/common/AbstractPaceFunctions.java | 81 ++------------ dhp-workflows/dhp-graph-mapper/pom.xml | 6 ++ 9 files changed, 160 insertions(+), 86 deletions(-) create mode 100644 dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java rename {dhp-pace-core => dhp-common}/src/main/java/eu/dnetlib/pace/model/Person.java (96%) create mode 100644 dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java create mode 100644 dhp-common/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java rename {dhp-pace-core => dhp-common}/src/main/resources/eu/dnetlib/pace/config/name_particles.txt (100%) diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml index 692d2bdc3..04735876d 100644 --- a/dhp-common/pom.xml +++ b/dhp-common/pom.xml @@ -63,11 +63,13 @@ - eu.dnetlib.dhp - dhp-pace-core - ${project.version} + edu.cmu + secondstring + + + com.ibm.icu + icu4j - org.apache.hadoop hadoop-common diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java b/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java new file mode 100644 index 000000000..a279271b5 --- /dev/null +++ b/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java @@ -0,0 +1,100 @@ + +package eu.dnetlib.pace.common; + +import com.google.common.base.Splitter; +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; +import com.ibm.icu.text.Transliterator; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; + +import java.nio.charset.StandardCharsets; +import java.text.Normalizer; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Set of common functions for the framework + * + * @author claudio + */ +public class PaceCommonUtils { + + // transliterator + protected static Transliterator transliterator = Transliterator.getInstance("Any-Eng"); + + protected static final String aliases_from = "⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎àáâäæãåāèéêëēėęəîïíīįìôöòóœøōõûüùúūßśšłžźżçćčñń"; + protected static final String aliases_to = "0123456789+-=()n0123456789+-=()aaaaaaaaeeeeeeeeiiiiiioooooooouuuuussslzzzcccnn"; + + protected static Pattern hexUnicodePattern = Pattern.compile("\\\\u(\\p{XDigit}{4})"); + + protected static String fixAliases(final String s) { + final StringBuilder sb = new StringBuilder(); + + s.chars().forEach(ch -> { + final int i = StringUtils.indexOf(aliases_from, ch); + sb.append(i >= 0 ? aliases_to.charAt(i) : (char) ch); + }); + + return sb.toString(); + } + + protected static String transliterate(final String s) { + try { + return transliterator.transliterate(s); + } catch (Exception e) { + return s; + } + } + + public static String normalize(final String s) { + return fixAliases(transliterate(nfd(unicodeNormalization(s)))) + .toLowerCase() + // do not compact the regexes in a single expression, would cause StackOverflowError in case of large input + // strings + .replaceAll("[^ \\w]+", "") + .replaceAll("(\\p{InCombiningDiacriticalMarks})+", "") + .replaceAll("(\\p{Punct})+", " ") + .replaceAll("(\\d)+", " ") + .replaceAll("(\\n)+", " ") + .trim(); + } + + public static String nfd(final String s) { + return Normalizer.normalize(s, Normalizer.Form.NFD); + } + + public static String unicodeNormalization(final String s) { + + Matcher m = hexUnicodePattern.matcher(s); + StringBuffer buf = new StringBuffer(s.length()); + while (m.find()) { + String ch = String.valueOf((char) Integer.parseInt(m.group(1), 16)); + m.appendReplacement(buf, Matcher.quoteReplacement(ch)); + } + m.appendTail(buf); + return buf.toString(); + } + + public static Set loadFromClasspath(final String classpath) { + + Transliterator transliterator = Transliterator.getInstance("Any-Eng"); + + final Set h = Sets.newHashSet(); + try { + for (final String s : IOUtils + .readLines(PaceCommonUtils.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) { + h.add(fixAliases(transliterator.transliterate(s))); // transliteration of the stopwords + } + } catch (final Throwable e) { + return Sets.newHashSet(); + } + return h; + } + + protected static Iterable tokens(final String s, final int maxTokens) { + return Iterables.limit(Splitter.on(" ").omitEmptyStrings().trimResults().split(s), maxTokens); + } + +} diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java b/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java similarity index 96% rename from dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java rename to dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java index 96120cf4d..c95c9d823 100644 --- a/dhp-pace-core/src/main/java/eu/dnetlib/pace/model/Person.java +++ b/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java @@ -1,21 +1,20 @@ package eu.dnetlib.pace.model; -import java.nio.charset.Charset; -import java.text.Normalizer; -import java.util.List; -import java.util.Set; - import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.hash.Hashing; - -import eu.dnetlib.pace.common.AbstractPaceFunctions; +import eu.dnetlib.pace.common.PaceCommonUtils; import eu.dnetlib.pace.util.Capitalise; import eu.dnetlib.pace.util.DotAbbreviations; +import java.nio.charset.Charset; +import java.text.Normalizer; +import java.util.List; +import java.util.Set; + public class Person { private static final String UTF8 = "UTF-8"; @@ -86,7 +85,7 @@ public class Person { private List splitTerms(final String s) { if (particles == null) { - particles = AbstractPaceFunctions.loadFromClasspath("/eu/dnetlib/pace/config/name_particles.txt"); + particles = PaceCommonUtils.loadFromClasspath("/eu/dnetlib/pace/config/name_particles.txt"); } final List list = Lists.newArrayList(); diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java b/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java new file mode 100644 index 000000000..015386423 --- /dev/null +++ b/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java @@ -0,0 +1,17 @@ + +package eu.dnetlib.pace.util; + +import com.google.common.base.Function; +import org.apache.commons.lang3.text.WordUtils; + +public class Capitalise implements Function { + + private final char[] DELIM = { + ' ', '-' + }; + + @Override + public String apply(final String s) { + return WordUtils.capitalize(s.toLowerCase(), DELIM); + } +} diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java b/dhp-common/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java new file mode 100644 index 000000000..2c89da4db --- /dev/null +++ b/dhp-common/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java @@ -0,0 +1,11 @@ + +package eu.dnetlib.pace.util; + +import com.google.common.base.Function; + +public class DotAbbreviations implements Function { + @Override + public String apply(String s) { + return s.length() == 1 ? s + "." : s; + } +} diff --git a/dhp-pace-core/src/main/resources/eu/dnetlib/pace/config/name_particles.txt b/dhp-common/src/main/resources/eu/dnetlib/pace/config/name_particles.txt similarity index 100% rename from dhp-pace-core/src/main/resources/eu/dnetlib/pace/config/name_particles.txt rename to dhp-common/src/main/resources/eu/dnetlib/pace/config/name_particles.txt diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml index 7b384f109..1593575d2 100644 --- a/dhp-pace-core/pom.xml +++ b/dhp-pace-core/pom.xml @@ -49,6 +49,12 @@ + + eu.dnetlib.dhp + dhp-common + ${project.version} + + edu.cmu secondstring diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java index ba7639ada..6bfb8b3f4 100644 --- a/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java +++ b/dhp-pace-core/src/main/java/eu/dnetlib/pace/common/AbstractPaceFunctions.java @@ -1,32 +1,26 @@ package eu.dnetlib.pace.common; +import com.google.common.base.Joiner; +import com.google.common.collect.Sets; +import com.ibm.icu.text.Transliterator; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; + import java.io.IOException; import java.io.StringWriter; import java.nio.charset.StandardCharsets; -import java.text.Normalizer; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; - -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; -import com.ibm.icu.text.Transliterator; - -import eu.dnetlib.pace.clustering.NGramUtils; - /** * Set of common functions for the framework * * @author claudio */ -public class AbstractPaceFunctions { +public class AbstractPaceFunctions extends PaceCommonUtils { // city map to be used when translating the city names into codes private static Map cityMap = AbstractPaceFunctions @@ -41,9 +35,6 @@ public class AbstractPaceFunctions { protected static Set stopwords_it = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_it.txt"); protected static Set stopwords_pt = loadFromClasspath("/eu/dnetlib/pace/config/stopwords_pt.txt"); - // transliterator - protected static Transliterator transliterator = Transliterator.getInstance("Any-Eng"); - // blacklist of ngrams: to avoid generic keys protected static Set ngramBlacklist = loadFromClasspath("/eu/dnetlib/pace/config/ngram_blacklist.txt"); @@ -51,8 +42,6 @@ public class AbstractPaceFunctions { public static final Pattern HTML_REGEX = Pattern.compile("<[^>]*>"); private static final String alpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "; - private static final String aliases_from = "⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎àáâäæãåāèéêëēėęəîïíīįìôöòóœøōõûüùúūßśšłžźżçćčñń"; - private static final String aliases_to = "0123456789+-=()n0123456789+-=()aaaaaaaaeeeeeeeeiiiiiioooooooouuuuussslzzzcccnn"; // doi prefix for normalization public static final Pattern DOI_PREFIX = Pattern.compile("(https?:\\/\\/dx\\.doi\\.org\\/)|(doi:)"); @@ -129,25 +118,6 @@ public class AbstractPaceFunctions { return numberPattern.matcher(strNum).matches(); } - protected static String fixAliases(final String s) { - final StringBuilder sb = new StringBuilder(); - - s.chars().forEach(ch -> { - final int i = StringUtils.indexOf(aliases_from, ch); - sb.append(i >= 0 ? aliases_to.charAt(i) : (char) ch); - }); - - return sb.toString(); - } - - protected static String transliterate(final String s) { - try { - return transliterator.transliterate(s); - } catch (Exception e) { - return s; - } - } - protected static String removeSymbols(final String s) { final StringBuilder sb = new StringBuilder(); @@ -162,23 +132,6 @@ public class AbstractPaceFunctions { return s != null; } - public static String normalize(final String s) { - return fixAliases(transliterate(nfd(unicodeNormalization(s)))) - .toLowerCase() - // do not compact the regexes in a single expression, would cause StackOverflowError in case of large input - // strings - .replaceAll("[^ \\w]+", "") - .replaceAll("(\\p{InCombiningDiacriticalMarks})+", "") - .replaceAll("(\\p{Punct})+", " ") - .replaceAll("(\\d)+", " ") - .replaceAll("(\\n)+", " ") - .trim(); - } - - public static String nfd(final String s) { - return Normalizer.normalize(s, Normalizer.Form.NFD); - } - public static String utf8(final String s) { byte[] bytes = s.getBytes(StandardCharsets.UTF_8); return new String(bytes, StandardCharsets.UTF_8); @@ -233,22 +186,6 @@ public class AbstractPaceFunctions { return newset; } - public static Set loadFromClasspath(final String classpath) { - - Transliterator transliterator = Transliterator.getInstance("Any-Eng"); - - final Set h = Sets.newHashSet(); - try { - for (final String s : IOUtils - .readLines(NGramUtils.class.getResourceAsStream(classpath), StandardCharsets.UTF_8)) { - h.add(fixAliases(transliterator.transliterate(s))); // transliteration of the stopwords - } - } catch (final Throwable e) { - return Sets.newHashSet(); - } - return h; - } - public static Map loadMapFromClasspath(final String classpath) { Transliterator transliterator = Transliterator.getInstance("Any-Eng"); @@ -303,10 +240,6 @@ public class AbstractPaceFunctions { return StringUtils.substring(s, 0, 1).toLowerCase(); } - protected static Iterable tokens(final String s, final int maxTokens) { - return Iterables.limit(Splitter.on(" ").omitEmptyStrings().trimResults().split(s), maxTokens); - } - public static String normalizePid(String pid) { return DOI_PREFIX.matcher(pid.toLowerCase()).replaceAll(""); } diff --git a/dhp-workflows/dhp-graph-mapper/pom.xml b/dhp-workflows/dhp-graph-mapper/pom.xml index c7ac55ef6..2c93bab83 100644 --- a/dhp-workflows/dhp-graph-mapper/pom.xml +++ b/dhp-workflows/dhp-graph-mapper/pom.xml @@ -90,6 +90,12 @@ ${project.version} + + eu.dnetlib.dhp + dhp-pace-core + ${project.version} + + com.jayway.jsonpath json-path From 4355f648106b1180f2946de9346961b0db2286a4 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Thu, 2 May 2024 11:23:53 +0200 Subject: [PATCH 17/29] reverted to version 1.2.5-SNAPSHOT --- .../dhp-build-assembly-resources/pom.xml | 2 +- .../dhp-build-properties-maven-plugin/pom.xml | 2 +- dhp-build/dhp-code-style/pom.xml | 2 +- dhp-build/pom.xml | 2 +- dhp-common/pom.xml | 2 +- dhp-pace-core/pom.xml | 4 +- .../java/eu/dnetlib/pace/util/Capitalise.java | 18 - .../eu/dnetlib/pace/util/DiffPatchMatch.java | 2553 ----------------- .../dnetlib/pace/util/DotAbbreviations.java | 11 - dhp-workflows/dhp-actionmanager/pom.xml | 2 +- dhp-workflows/dhp-aggregation/pom.xml | 2 +- dhp-workflows/dhp-blacklist/pom.xml | 2 +- dhp-workflows/dhp-broker-events/pom.xml | 2 +- dhp-workflows/dhp-dedup-openaire/pom.xml | 2 +- dhp-workflows/dhp-doiboost/pom.xml | 2 +- dhp-workflows/dhp-enrichment/pom.xml | 4 +- dhp-workflows/dhp-graph-mapper/pom.xml | 2 +- dhp-workflows/dhp-graph-provision/pom.xml | 2 +- dhp-workflows/dhp-impact-indicators/pom.xml | 2 +- dhp-workflows/dhp-stats-actionsets/pom.xml | 2 +- dhp-workflows/dhp-stats-hist-snaps/pom.xml | 2 +- dhp-workflows/dhp-stats-monitor-irish/pom.xml | 2 +- .../dhp-stats-monitor-update/pom.xml | 2 +- dhp-workflows/dhp-stats-promote/pom.xml | 2 +- dhp-workflows/dhp-stats-update/pom.xml | 2 +- dhp-workflows/dhp-swh/pom.xml | 2 +- .../dhp-usage-raw-data-update/pom.xml | 2 +- dhp-workflows/dhp-usage-stats-build/pom.xml | 2 +- dhp-workflows/dhp-workflow-profiles/pom.xml | 2 +- dhp-workflows/pom.xml | 2 +- pom.xml | 2 +- 31 files changed, 30 insertions(+), 2612 deletions(-) delete mode 100644 dhp-pace-core/src/main/java/eu/dnetlib/pace/util/Capitalise.java delete mode 100644 dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DiffPatchMatch.java delete mode 100644 dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java diff --git a/dhp-build/dhp-build-assembly-resources/pom.xml b/dhp-build/dhp-build-assembly-resources/pom.xml index 7f5b76fdd..44165995d 100644 --- a/dhp-build/dhp-build-assembly-resources/pom.xml +++ b/dhp-build/dhp-build-assembly-resources/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-build-assembly-resources diff --git a/dhp-build/dhp-build-properties-maven-plugin/pom.xml b/dhp-build/dhp-build-properties-maven-plugin/pom.xml index e76dcd8fc..7579bdf45 100644 --- a/dhp-build/dhp-build-properties-maven-plugin/pom.xml +++ b/dhp-build/dhp-build-properties-maven-plugin/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-build - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-build-properties-maven-plugin diff --git a/dhp-build/dhp-code-style/pom.xml b/dhp-build/dhp-code-style/pom.xml index 8bbe6fac0..5a86efe17 100644 --- a/dhp-build/dhp-code-style/pom.xml +++ b/dhp-build/dhp-code-style/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp-code-style - 1.2.5-beta + 1.2.5-SNAPSHOT jar diff --git a/dhp-build/pom.xml b/dhp-build/pom.xml index 74a09a23c..9040ea94e 100644 --- a/dhp-build/pom.xml +++ b/dhp-build/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-build pom diff --git a/dhp-common/pom.xml b/dhp-common/pom.xml index 04735876d..c2f76cff7 100644 --- a/dhp-common/pom.xml +++ b/dhp-common/pom.xml @@ -5,7 +5,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-SNAPSHOT ../pom.xml diff --git a/dhp-pace-core/pom.xml b/dhp-pace-core/pom.xml index 1593575d2..6c706b692 100644 --- a/dhp-pace-core/pom.xml +++ b/dhp-pace-core/pom.xml @@ -6,13 +6,13 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-SNAPSHOT ../pom.xml eu.dnetlib.dhp dhp-pace-core - 1.2.5-beta + 1.2.5-SNAPSHOT jar diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/Capitalise.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/Capitalise.java deleted file mode 100644 index 403d91dd9..000000000 --- a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/Capitalise.java +++ /dev/null @@ -1,18 +0,0 @@ - -package eu.dnetlib.pace.util; - -import org.apache.commons.lang3.text.WordUtils; - -import com.google.common.base.Function; - -public class Capitalise implements Function { - - private final char[] DELIM = { - ' ', '-' - }; - - @Override - public String apply(final String s) { - return WordUtils.capitalize(s.toLowerCase(), DELIM); - } -}; diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DiffPatchMatch.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DiffPatchMatch.java deleted file mode 100644 index cfd9acd70..000000000 --- a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DiffPatchMatch.java +++ /dev/null @@ -1,2553 +0,0 @@ - -package eu.dnetlib.pace.util; - -/* - * Diff Match and Patch - * Copyright 2018 The diff-match-patch Authors. - * https://github.com/google/diff-match-patch - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * Diff Match and Patch - * Copyright 2018 The diff-match-patch Authors. - * https://github.com/google/diff-match-patch - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.net.URLEncoder; -import java.util.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/* - * Functions for diff, match and patch. - * Computes the difference between two texts to create a patch. - * Applies the patch onto another text, allowing for errors. - * - * @author fraser@google.com (Neil Fraser) - */ - -/** - * Class containing the diff, match and patch methods. - * Also contains the behaviour settings. - */ -public class DiffPatchMatch { - - // Defaults. - // Set these on your diff_match_patch instance to override the defaults. - - /** - * Number of seconds to map a diff before giving up (0 for infinity). - */ - public float Diff_Timeout = 1.0f; - /** - * Cost of an empty edit operation in terms of edit characters. - */ - public short Diff_EditCost = 4; - /** - * At what point is no match declared (0.0 = perfection, 1.0 = very loose). - */ - public float Match_Threshold = 0.5f; - /** - * How far to search for a match (0 = exact location, 1000+ = broad match). - * A match this many characters away from the expected location will add - * 1.0 to the score (0.0 is a perfect match). - */ - public int Match_Distance = 1000; - /** - * When deleting a large block of text (over ~64 characters), how close do - * the contents have to be to match the expected contents. (0.0 = perfection, - * 1.0 = very loose). Note that Match_Threshold controls how closely the - * end points of a delete need to match. - */ - public float Patch_DeleteThreshold = 0.5f; - /** - * Chunk size for context length. - */ - public short Patch_Margin = 4; - - /** - * The number of bits in an int. - */ - private short Match_MaxBits = 32; - - /** - * Internal class for returning results from diff_linesToChars(). - * Other less paranoid languages just use a three-element array. - */ - protected static class LinesToCharsResult { - protected String chars1; - protected String chars2; - protected List lineArray; - - protected LinesToCharsResult(String chars1, String chars2, - List lineArray) { - this.chars1 = chars1; - this.chars2 = chars2; - this.lineArray = lineArray; - } - } - - // DIFF FUNCTIONS - - /** - * The data structure representing a diff is a Linked list of Diff objects: - * {Diff(Operation.DELETE, "Hello"), Diff(Operation.INSERT, "Goodbye"), - * Diff(Operation.EQUAL, " world.")} - * which means: delete "Hello", add "Goodbye" and keep " world." - */ - public enum Operation { - DELETE, INSERT, EQUAL - } - - /** - * Find the differences between two texts. - * Run a faster, slightly less optimal diff. - * This method allows the 'checklines' of diff_main() to be optional. - * Most of the time checklines is wanted, so default to true. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @return Linked List of Diff objects. - */ - public LinkedList diff_main(String text1, String text2) { - return diff_main(text1, text2, true); - } - - /** - * Find the differences between two texts. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param checklines Speedup flag. If false, then don't run a - * line-level diff first to identify the changed areas. - * If true, then run a faster slightly less optimal diff. - * @return Linked List of Diff objects. - */ - public LinkedList diff_main(String text1, String text2, - boolean checklines) { - // Set a deadline by which time the diff must be complete. - long deadline; - if (Diff_Timeout <= 0) { - deadline = Long.MAX_VALUE; - } else { - deadline = System.currentTimeMillis() + (long) (Diff_Timeout * 1000); - } - return diff_main(text1, text2, checklines, deadline); - } - - /** - * Find the differences between two texts. Simplifies the problem by - * stripping any common prefix or suffix off the texts before diffing. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param checklines Speedup flag. If false, then don't run a - * line-level diff first to identify the changed areas. - * If true, then run a faster slightly less optimal diff. - * @param deadline Time when the diff should be complete by. Used - * internally for recursive calls. Users should set DiffTimeout instead. - * @return Linked List of Diff objects. - */ - private LinkedList diff_main(String text1, String text2, - boolean checklines, long deadline) { - // Check for null inputs. - if (text1 == null || text2 == null) { - throw new IllegalArgumentException("Null inputs. (diff_main)"); - } - - // Check for equality (speedup). - LinkedList diffs; - if (text1.equals(text2)) { - diffs = new LinkedList(); - if (text1.length() != 0) { - diffs.add(new Diff(Operation.EQUAL, text1)); - } - return diffs; - } - - // Trim off common prefix (speedup). - int commonlength = diff_commonPrefix(text1, text2); - String commonprefix = text1.substring(0, commonlength); - text1 = text1.substring(commonlength); - text2 = text2.substring(commonlength); - - // Trim off common suffix (speedup). - commonlength = diff_commonSuffix(text1, text2); - String commonsuffix = text1.substring(text1.length() - commonlength); - text1 = text1.substring(0, text1.length() - commonlength); - text2 = text2.substring(0, text2.length() - commonlength); - - // Compute the diff on the middle block. - diffs = diff_compute(text1, text2, checklines, deadline); - - // Restore the prefix and suffix. - if (commonprefix.length() != 0) { - diffs.addFirst(new Diff(Operation.EQUAL, commonprefix)); - } - if (commonsuffix.length() != 0) { - diffs.addLast(new Diff(Operation.EQUAL, commonsuffix)); - } - - diff_cleanupMerge(diffs); - return diffs; - } - - /** - * Find the differences between two texts. Assumes that the texts do not - * have any common prefix or suffix. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param checklines Speedup flag. If false, then don't run a - * line-level diff first to identify the changed areas. - * If true, then run a faster slightly less optimal diff. - * @param deadline Time when the diff should be complete by. - * @return Linked List of Diff objects. - */ - private LinkedList diff_compute(String text1, String text2, - boolean checklines, long deadline) { - LinkedList diffs = new LinkedList(); - - if (text1.length() == 0) { - // Just add some text (speedup). - diffs.add(new Diff(Operation.INSERT, text2)); - return diffs; - } - - if (text2.length() == 0) { - // Just delete some text (speedup). - diffs.add(new Diff(Operation.DELETE, text1)); - return diffs; - } - - String longtext = text1.length() > text2.length() ? text1 : text2; - String shorttext = text1.length() > text2.length() ? text2 : text1; - int i = longtext.indexOf(shorttext); - if (i != -1) { - // Shorter text is inside the longer text (speedup). - Operation op = (text1.length() > text2.length()) ? Operation.DELETE : Operation.INSERT; - diffs.add(new Diff(op, longtext.substring(0, i))); - diffs.add(new Diff(Operation.EQUAL, shorttext)); - diffs.add(new Diff(op, longtext.substring(i + shorttext.length()))); - return diffs; - } - - if (shorttext.length() == 1) { - // Single character string. - // After the previous speedup, the character can't be an equality. - diffs.add(new Diff(Operation.DELETE, text1)); - diffs.add(new Diff(Operation.INSERT, text2)); - return diffs; - } - - // Check to see if the problem can be split in two. - String[] hm = diff_halfMatch(text1, text2); - if (hm != null) { - // A half-match was found, sort out the return data. - String text1_a = hm[0]; - String text1_b = hm[1]; - String text2_a = hm[2]; - String text2_b = hm[3]; - String mid_common = hm[4]; - // Send both pairs off for separate processing. - LinkedList diffs_a = diff_main( - text1_a, text2_a, - checklines, deadline); - LinkedList diffs_b = diff_main( - text1_b, text2_b, - checklines, deadline); - // Merge the results. - diffs = diffs_a; - diffs.add(new Diff(Operation.EQUAL, mid_common)); - diffs.addAll(diffs_b); - return diffs; - } - - if (checklines && text1.length() > 100 && text2.length() > 100) { - return diff_lineMode(text1, text2, deadline); - } - - return diff_bisect(text1, text2, deadline); - } - - /** - * Do a quick line-level diff on both strings, then rediff the parts for - * greater accuracy. - * This speedup can produce non-minimal diffs. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param deadline Time when the diff should be complete by. - * @return Linked List of Diff objects. - */ - private LinkedList diff_lineMode(String text1, String text2, - long deadline) { - // Scan the text on a line-by-line basis first. - LinesToCharsResult a = diff_linesToChars(text1, text2); - text1 = a.chars1; - text2 = a.chars2; - List linearray = a.lineArray; - - LinkedList diffs = diff_main(text1, text2, false, deadline); - - // Convert the diff back to original text. - diff_charsToLines(diffs, linearray); - // Eliminate freak matches (e.g. blank lines) - diff_cleanupSemantic(diffs); - - // Rediff any replacement blocks, this time character-by-character. - // Add a dummy entry at the end. - diffs.add(new Diff(Operation.EQUAL, "")); - int count_delete = 0; - int count_insert = 0; - String text_delete = ""; - String text_insert = ""; - ListIterator pointer = diffs.listIterator(); - Diff thisDiff = pointer.next(); - while (thisDiff != null) { - switch (thisDiff.operation) { - case INSERT: - count_insert++; - text_insert += thisDiff.text; - break; - case DELETE: - count_delete++; - text_delete += thisDiff.text; - break; - case EQUAL: - // Upon reaching an equality, check for prior redundancies. - if (count_delete >= 1 && count_insert >= 1) { - // Delete the offending records and add the merged ones. - pointer.previous(); - for (int j = 0; j < count_delete + count_insert; j++) { - pointer.previous(); - pointer.remove(); - } - for (Diff subDiff : diff_main( - text_delete, text_insert, false, - deadline)) { - pointer.add(subDiff); - } - } - count_insert = 0; - count_delete = 0; - text_delete = ""; - text_insert = ""; - break; - } - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - diffs.removeLast(); // Remove the dummy entry at the end. - - return diffs; - } - - /** - * Find the 'middle snake' of a diff, split the problem in two - * and return the recursively constructed diff. - * See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param deadline Time at which to bail if not yet complete. - * @return LinkedList of Diff objects. - */ - protected LinkedList diff_bisect(String text1, String text2, - long deadline) { - // Cache the text lengths to prevent multiple calls. - int text1_length = text1.length(); - int text2_length = text2.length(); - int max_d = (text1_length + text2_length + 1) / 2; - int v_offset = max_d; - int v_length = 2 * max_d; - int[] v1 = new int[v_length]; - int[] v2 = new int[v_length]; - for (int x = 0; x < v_length; x++) { - v1[x] = -1; - v2[x] = -1; - } - v1[v_offset + 1] = 0; - v2[v_offset + 1] = 0; - int delta = text1_length - text2_length; - // If the total number of characters is odd, then the front path will - // collide with the reverse path. - boolean front = (delta % 2 != 0); - // Offsets for start and end of k loop. - // Prevents mapping of space beyond the grid. - int k1start = 0; - int k1end = 0; - int k2start = 0; - int k2end = 0; - for (int d = 0; d < max_d; d++) { - // Bail out if deadline is reached. - if (System.currentTimeMillis() > deadline) { - break; - } - - // Walk the front path one step. - for (int k1 = -d + k1start; k1 <= d - k1end; k1 += 2) { - int k1_offset = v_offset + k1; - int x1; - if (k1 == -d || (k1 != d && v1[k1_offset - 1] < v1[k1_offset + 1])) { - x1 = v1[k1_offset + 1]; - } else { - x1 = v1[k1_offset - 1] + 1; - } - int y1 = x1 - k1; - while (x1 < text1_length && y1 < text2_length - && text1.charAt(x1) == text2.charAt(y1)) { - x1++; - y1++; - } - v1[k1_offset] = x1; - if (x1 > text1_length) { - // Ran off the right of the graph. - k1end += 2; - } else if (y1 > text2_length) { - // Ran off the bottom of the graph. - k1start += 2; - } else if (front) { - int k2_offset = v_offset + delta - k1; - if (k2_offset >= 0 && k2_offset < v_length && v2[k2_offset] != -1) { - // Mirror x2 onto top-left coordinate system. - int x2 = text1_length - v2[k2_offset]; - if (x1 >= x2) { - // Overlap detected. - return diff_bisectSplit(text1, text2, x1, y1, deadline); - } - } - } - } - - // Walk the reverse path one step. - for (int k2 = -d + k2start; k2 <= d - k2end; k2 += 2) { - int k2_offset = v_offset + k2; - int x2; - if (k2 == -d || (k2 != d && v2[k2_offset - 1] < v2[k2_offset + 1])) { - x2 = v2[k2_offset + 1]; - } else { - x2 = v2[k2_offset - 1] + 1; - } - int y2 = x2 - k2; - while (x2 < text1_length && y2 < text2_length - && text1.charAt(text1_length - x2 - 1) == text2.charAt(text2_length - y2 - 1)) { - x2++; - y2++; - } - v2[k2_offset] = x2; - if (x2 > text1_length) { - // Ran off the left of the graph. - k2end += 2; - } else if (y2 > text2_length) { - // Ran off the top of the graph. - k2start += 2; - } else if (!front) { - int k1_offset = v_offset + delta - k2; - if (k1_offset >= 0 && k1_offset < v_length && v1[k1_offset] != -1) { - int x1 = v1[k1_offset]; - int y1 = v_offset + x1 - k1_offset; - // Mirror x2 onto top-left coordinate system. - x2 = text1_length - x2; - if (x1 >= x2) { - // Overlap detected. - return diff_bisectSplit(text1, text2, x1, y1, deadline); - } - } - } - } - } - // Diff took too long and hit the deadline or - // number of diffs equals number of characters, no commonality at all. - LinkedList diffs = new LinkedList(); - diffs.add(new Diff(Operation.DELETE, text1)); - diffs.add(new Diff(Operation.INSERT, text2)); - return diffs; - } - - /** - * Given the location of the 'middle snake', split the diff in two parts - * and recurse. - * @param text1 Old string to be diffed. - * @param text2 New string to be diffed. - * @param x Index of split point in text1. - * @param y Index of split point in text2. - * @param deadline Time at which to bail if not yet complete. - * @return LinkedList of Diff objects. - */ - private LinkedList diff_bisectSplit(String text1, String text2, - int x, int y, long deadline) { - String text1a = text1.substring(0, x); - String text2a = text2.substring(0, y); - String text1b = text1.substring(x); - String text2b = text2.substring(y); - - // Compute both diffs serially. - LinkedList diffs = diff_main(text1a, text2a, false, deadline); - LinkedList diffsb = diff_main(text1b, text2b, false, deadline); - - diffs.addAll(diffsb); - return diffs; - } - - /** - * Split two texts into a list of strings. Reduce the texts to a string of - * hashes where each Unicode character represents one line. - * @param text1 First string. - * @param text2 Second string. - * @return An object containing the encoded text1, the encoded text2 and - * the List of unique strings. The zeroth element of the List of - * unique strings is intentionally blank. - */ - protected LinesToCharsResult diff_linesToChars(String text1, String text2) { - List lineArray = new ArrayList(); - Map lineHash = new HashMap(); - // e.g. linearray[4] == "Hello\n" - // e.g. linehash.get("Hello\n") == 4 - - // "\x00" is a valid character, but various debuggers don't like it. - // So we'll insert a junk entry to avoid generating a null character. - lineArray.add(""); - - // Allocate 2/3rds of the space for text1, the rest for text2. - String chars1 = diff_linesToCharsMunge(text1, lineArray, lineHash, 40000); - String chars2 = diff_linesToCharsMunge(text2, lineArray, lineHash, 65535); - return new LinesToCharsResult(chars1, chars2, lineArray); - } - - /** - * Split a text into a list of strings. Reduce the texts to a string of - * hashes where each Unicode character represents one line. - * @param text String to encode. - * @param lineArray List of unique strings. - * @param lineHash Map of strings to indices. - * @param maxLines Maximum length of lineArray. - * @return Encoded string. - */ - private String diff_linesToCharsMunge(String text, List lineArray, - Map lineHash, int maxLines) { - int lineStart = 0; - int lineEnd = -1; - String line; - StringBuilder chars = new StringBuilder(); - // Walk the text, pulling out a substring for each line. - // text.split('\n') would would temporarily double our memory footprint. - // Modifying text would create many large strings to garbage collect. - while (lineEnd < text.length() - 1) { - lineEnd = text.indexOf('\n', lineStart); - if (lineEnd == -1) { - lineEnd = text.length() - 1; - } - line = text.substring(lineStart, lineEnd + 1); - - if (lineHash.containsKey(line)) { - chars.append(String.valueOf((char) (int) lineHash.get(line))); - } else { - if (lineArray.size() == maxLines) { - // Bail out at 65535 because - // String.valueOf((char) 65536).equals(String.valueOf(((char) 0))) - line = text.substring(lineStart); - lineEnd = text.length(); - } - lineArray.add(line); - lineHash.put(line, lineArray.size() - 1); - chars.append(String.valueOf((char) (lineArray.size() - 1))); - } - lineStart = lineEnd + 1; - } - return chars.toString(); - } - - /** - * Rehydrate the text in a diff from a string of line hashes to real lines of - * text. - * @param diffs List of Diff objects. - * @param lineArray List of unique strings. - */ - protected void diff_charsToLines(List diffs, - List lineArray) { - StringBuilder text; - for (Diff diff : diffs) { - text = new StringBuilder(); - for (int j = 0; j < diff.text.length(); j++) { - text.append(lineArray.get(diff.text.charAt(j))); - } - diff.text = text.toString(); - } - } - - /** - * Determine the common prefix of two strings - * @param text1 First string. - * @param text2 Second string. - * @return The number of characters common to the start of each string. - */ - public int diff_commonPrefix(String text1, String text2) { - // Performance analysis: https://neil.fraser.name/news/2007/10/09/ - int n = Math.min(text1.length(), text2.length()); - for (int i = 0; i < n; i++) { - if (text1.charAt(i) != text2.charAt(i)) { - return i; - } - } - return n; - } - - /** - * Determine the common suffix of two strings - * @param text1 First string. - * @param text2 Second string. - * @return The number of characters common to the end of each string. - */ - public int diff_commonSuffix(String text1, String text2) { - // Performance analysis: https://neil.fraser.name/news/2007/10/09/ - int text1_length = text1.length(); - int text2_length = text2.length(); - int n = Math.min(text1_length, text2_length); - for (int i = 1; i <= n; i++) { - if (text1.charAt(text1_length - i) != text2.charAt(text2_length - i)) { - return i - 1; - } - } - return n; - } - - /** - * Determine if the suffix of one string is the prefix of another. - * @param text1 First string. - * @param text2 Second string. - * @return The number of characters common to the end of the first - * string and the start of the second string. - */ - protected int diff_commonOverlap(String text1, String text2) { - // Cache the text lengths to prevent multiple calls. - int text1_length = text1.length(); - int text2_length = text2.length(); - // Eliminate the null case. - if (text1_length == 0 || text2_length == 0) { - return 0; - } - // Truncate the longer string. - if (text1_length > text2_length) { - text1 = text1.substring(text1_length - text2_length); - } else if (text1_length < text2_length) { - text2 = text2.substring(0, text1_length); - } - int text_length = Math.min(text1_length, text2_length); - // Quick check for the worst case. - if (text1.equals(text2)) { - return text_length; - } - - // Start by looking for a single character match - // and increase length until no match is found. - // Performance analysis: https://neil.fraser.name/news/2010/11/04/ - int best = 0; - int length = 1; - while (true) { - String pattern = text1.substring(text_length - length); - int found = text2.indexOf(pattern); - if (found == -1) { - return best; - } - length += found; - if (found == 0 || text1 - .substring(text_length - length) - .equals( - text2.substring(0, length))) { - best = length; - length++; - } - } - } - - /** - * Do the two texts share a substring which is at least half the length of - * the longer text? - * This speedup can produce non-minimal diffs. - * @param text1 First string. - * @param text2 Second string. - * @return Five element String array, containing the prefix of text1, the - * suffix of text1, the prefix of text2, the suffix of text2 and the - * common middle. Or null if there was no match. - */ - protected String[] diff_halfMatch(String text1, String text2) { - if (Diff_Timeout <= 0) { - // Don't risk returning a non-optimal diff if we have unlimited time. - return null; - } - String longtext = text1.length() > text2.length() ? text1 : text2; - String shorttext = text1.length() > text2.length() ? text2 : text1; - if (longtext.length() < 4 || shorttext.length() * 2 < longtext.length()) { - return null; // Pointless. - } - - // First check if the second quarter is the seed for a half-match. - String[] hm1 = diff_halfMatchI( - longtext, shorttext, - (longtext.length() + 3) / 4); - // Check again based on the third quarter. - String[] hm2 = diff_halfMatchI( - longtext, shorttext, - (longtext.length() + 1) / 2); - String[] hm; - if (hm1 == null && hm2 == null) { - return null; - } else if (hm2 == null) { - hm = hm1; - } else if (hm1 == null) { - hm = hm2; - } else { - // Both matched. Select the longest. - hm = hm1[4].length() > hm2[4].length() ? hm1 : hm2; - } - - // A half-match was found, sort out the return data. - if (text1.length() > text2.length()) { - return hm; - // return new String[]{hm[0], hm[1], hm[2], hm[3], hm[4]}; - } else { - return new String[] { - hm[2], hm[3], hm[0], hm[1], hm[4] - }; - } - } - - /** - * Does a substring of shorttext exist within longtext such that the - * substring is at least half the length of longtext? - * @param longtext Longer string. - * @param shorttext Shorter string. - * @param i Start index of quarter length substring within longtext. - * @return Five element String array, containing the prefix of longtext, the - * suffix of longtext, the prefix of shorttext, the suffix of shorttext - * and the common middle. Or null if there was no match. - */ - private String[] diff_halfMatchI(String longtext, String shorttext, int i) { - // Start with a 1/4 length substring at position i as a seed. - String seed = longtext.substring(i, i + longtext.length() / 4); - int j = -1; - String best_common = ""; - String best_longtext_a = "", best_longtext_b = ""; - String best_shorttext_a = "", best_shorttext_b = ""; - while ((j = shorttext.indexOf(seed, j + 1)) != -1) { - int prefixLength = diff_commonPrefix( - longtext.substring(i), - shorttext.substring(j)); - int suffixLength = diff_commonSuffix( - longtext.substring(0, i), - shorttext.substring(0, j)); - if (best_common.length() < suffixLength + prefixLength) { - best_common = shorttext.substring(j - suffixLength, j) - + shorttext.substring(j, j + prefixLength); - best_longtext_a = longtext.substring(0, i - suffixLength); - best_longtext_b = longtext.substring(i + prefixLength); - best_shorttext_a = shorttext.substring(0, j - suffixLength); - best_shorttext_b = shorttext.substring(j + prefixLength); - } - } - if (best_common.length() * 2 >= longtext.length()) { - return new String[] { - best_longtext_a, best_longtext_b, - best_shorttext_a, best_shorttext_b, best_common - }; - } else { - return null; - } - } - - /** - * Reduce the number of edits by eliminating semantically trivial equalities. - * @param diffs LinkedList of Diff objects. - */ - public void diff_cleanupSemantic(LinkedList diffs) { - if (diffs.isEmpty()) { - return; - } - boolean changes = false; - Deque equalities = new ArrayDeque(); // Double-ended queue of qualities. - String lastEquality = null; // Always equal to equalities.peek().text - ListIterator pointer = diffs.listIterator(); - // Number of characters that changed prior to the equality. - int length_insertions1 = 0; - int length_deletions1 = 0; - // Number of characters that changed after the equality. - int length_insertions2 = 0; - int length_deletions2 = 0; - Diff thisDiff = pointer.next(); - while (thisDiff != null) { - if (thisDiff.operation == Operation.EQUAL) { - // Equality found. - equalities.push(thisDiff); - length_insertions1 = length_insertions2; - length_deletions1 = length_deletions2; - length_insertions2 = 0; - length_deletions2 = 0; - lastEquality = thisDiff.text; - } else { - // An insertion or deletion. - if (thisDiff.operation == Operation.INSERT) { - length_insertions2 += thisDiff.text.length(); - } else { - length_deletions2 += thisDiff.text.length(); - } - // Eliminate an equality that is smaller or equal to the edits on both - // sides of it. - if (lastEquality != null && (lastEquality.length() <= Math.max(length_insertions1, length_deletions1)) - && (lastEquality.length() <= Math.max(length_insertions2, length_deletions2))) { - // System.out.println("Splitting: '" + lastEquality + "'"); - // Walk back to offending equality. - while (thisDiff != equalities.peek()) { - thisDiff = pointer.previous(); - } - pointer.next(); - - // Replace equality with a delete. - pointer.set(new Diff(Operation.DELETE, lastEquality)); - // Insert a corresponding an insert. - pointer.add(new Diff(Operation.INSERT, lastEquality)); - - equalities.pop(); // Throw away the equality we just deleted. - if (!equalities.isEmpty()) { - // Throw away the previous equality (it needs to be reevaluated). - equalities.pop(); - } - if (equalities.isEmpty()) { - // There are no previous equalities, walk back to the start. - while (pointer.hasPrevious()) { - pointer.previous(); - } - } else { - // There is a safe equality we can fall back to. - thisDiff = equalities.peek(); - while (thisDiff != pointer.previous()) { - // Intentionally empty loop. - } - } - - length_insertions1 = 0; // Reset the counters. - length_insertions2 = 0; - length_deletions1 = 0; - length_deletions2 = 0; - lastEquality = null; - changes = true; - } - } - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - - // Normalize the diff. - if (changes) { - diff_cleanupMerge(diffs); - } - diff_cleanupSemanticLossless(diffs); - - // Find any overlaps between deletions and insertions. - // e.g: abcxxxxxxdef - // -> abcxxxdef - // e.g: xxxabcdefxxx - // -> defxxxabc - // Only extract an overlap if it is as big as the edit ahead or behind it. - pointer = diffs.listIterator(); - Diff prevDiff = null; - thisDiff = null; - if (pointer.hasNext()) { - prevDiff = pointer.next(); - if (pointer.hasNext()) { - thisDiff = pointer.next(); - } - } - while (thisDiff != null) { - if (prevDiff.operation == Operation.DELETE && - thisDiff.operation == Operation.INSERT) { - String deletion = prevDiff.text; - String insertion = thisDiff.text; - int overlap_length1 = this.diff_commonOverlap(deletion, insertion); - int overlap_length2 = this.diff_commonOverlap(insertion, deletion); - if (overlap_length1 >= overlap_length2) { - if (overlap_length1 >= deletion.length() / 2.0 || - overlap_length1 >= insertion.length() / 2.0) { - // Overlap found. Insert an equality and trim the surrounding edits. - pointer.previous(); - pointer - .add( - new Diff(Operation.EQUAL, - insertion.substring(0, overlap_length1))); - prevDiff.text = deletion.substring(0, deletion.length() - overlap_length1); - thisDiff.text = insertion.substring(overlap_length1); - // pointer.add inserts the element before the cursor, so there is - // no need to step past the new element. - } - } else { - if (overlap_length2 >= deletion.length() / 2.0 || - overlap_length2 >= insertion.length() / 2.0) { - // Reverse overlap found. - // Insert an equality and swap and trim the surrounding edits. - pointer.previous(); - pointer - .add( - new Diff(Operation.EQUAL, - deletion.substring(0, overlap_length2))); - prevDiff.operation = Operation.INSERT; - prevDiff.text = insertion.substring(0, insertion.length() - overlap_length2); - thisDiff.operation = Operation.DELETE; - thisDiff.text = deletion.substring(overlap_length2); - // pointer.add inserts the element before the cursor, so there is - // no need to step past the new element. - } - } - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - prevDiff = thisDiff; - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - } - - /** - * Look for single edits surrounded on both sides by equalities - * which can be shifted sideways to align the edit to a word boundary. - * e.g: The cat came. -> The cat came. - * @param diffs LinkedList of Diff objects. - */ - public void diff_cleanupSemanticLossless(LinkedList diffs) { - String equality1, edit, equality2; - String commonString; - int commonOffset; - int score, bestScore; - String bestEquality1, bestEdit, bestEquality2; - // Create a new iterator at the start. - ListIterator pointer = diffs.listIterator(); - Diff prevDiff = pointer.hasNext() ? pointer.next() : null; - Diff thisDiff = pointer.hasNext() ? pointer.next() : null; - Diff nextDiff = pointer.hasNext() ? pointer.next() : null; - // Intentionally ignore the first and last element (don't need checking). - while (nextDiff != null) { - if (prevDiff.operation == Operation.EQUAL && - nextDiff.operation == Operation.EQUAL) { - // This is a single edit surrounded by equalities. - equality1 = prevDiff.text; - edit = thisDiff.text; - equality2 = nextDiff.text; - - // First, shift the edit as far left as possible. - commonOffset = diff_commonSuffix(equality1, edit); - if (commonOffset != 0) { - commonString = edit.substring(edit.length() - commonOffset); - equality1 = equality1.substring(0, equality1.length() - commonOffset); - edit = commonString + edit.substring(0, edit.length() - commonOffset); - equality2 = commonString + equality2; - } - - // Second, step character by character right, looking for the best fit. - bestEquality1 = equality1; - bestEdit = edit; - bestEquality2 = equality2; - bestScore = diff_cleanupSemanticScore(equality1, edit) - + diff_cleanupSemanticScore(edit, equality2); - while (edit.length() != 0 && equality2.length() != 0 - && edit.charAt(0) == equality2.charAt(0)) { - equality1 += edit.charAt(0); - edit = edit.substring(1) + equality2.charAt(0); - equality2 = equality2.substring(1); - score = diff_cleanupSemanticScore(equality1, edit) - + diff_cleanupSemanticScore(edit, equality2); - // The >= encourages trailing rather than leading whitespace on edits. - if (score >= bestScore) { - bestScore = score; - bestEquality1 = equality1; - bestEdit = edit; - bestEquality2 = equality2; - } - } - - if (!prevDiff.text.equals(bestEquality1)) { - // We have an improvement, save it back to the diff. - if (bestEquality1.length() != 0) { - prevDiff.text = bestEquality1; - } else { - pointer.previous(); // Walk past nextDiff. - pointer.previous(); // Walk past thisDiff. - pointer.previous(); // Walk past prevDiff. - pointer.remove(); // Delete prevDiff. - pointer.next(); // Walk past thisDiff. - pointer.next(); // Walk past nextDiff. - } - thisDiff.text = bestEdit; - if (bestEquality2.length() != 0) { - nextDiff.text = bestEquality2; - } else { - pointer.remove(); // Delete nextDiff. - nextDiff = thisDiff; - thisDiff = prevDiff; - } - } - } - prevDiff = thisDiff; - thisDiff = nextDiff; - nextDiff = pointer.hasNext() ? pointer.next() : null; - } - } - - /** - * Given two strings, compute a score representing whether the internal - * boundary falls on logical boundaries. - * Scores range from 6 (best) to 0 (worst). - * @param one First string. - * @param two Second string. - * @return The score. - */ - private int diff_cleanupSemanticScore(String one, String two) { - if (one.length() == 0 || two.length() == 0) { - // Edges are the best. - return 6; - } - - // Each port of this function behaves slightly differently due to - // subtle differences in each language's definition of things like - // 'whitespace'. Since this function's purpose is largely cosmetic, - // the choice has been made to use each language's native features - // rather than force total conformity. - char char1 = one.charAt(one.length() - 1); - char char2 = two.charAt(0); - boolean nonAlphaNumeric1 = !Character.isLetterOrDigit(char1); - boolean nonAlphaNumeric2 = !Character.isLetterOrDigit(char2); - boolean whitespace1 = nonAlphaNumeric1 && Character.isWhitespace(char1); - boolean whitespace2 = nonAlphaNumeric2 && Character.isWhitespace(char2); - boolean lineBreak1 = whitespace1 - && Character.getType(char1) == Character.CONTROL; - boolean lineBreak2 = whitespace2 - && Character.getType(char2) == Character.CONTROL; - boolean blankLine1 = lineBreak1 && BLANKLINEEND.matcher(one).find(); - boolean blankLine2 = lineBreak2 && BLANKLINESTART.matcher(two).find(); - - if (blankLine1 || blankLine2) { - // Five points for blank lines. - return 5; - } else if (lineBreak1 || lineBreak2) { - // Four points for line breaks. - return 4; - } else if (nonAlphaNumeric1 && !whitespace1 && whitespace2) { - // Three points for end of sentences. - return 3; - } else if (whitespace1 || whitespace2) { - // Two points for whitespace. - return 2; - } else if (nonAlphaNumeric1 || nonAlphaNumeric2) { - // One point for non-alphanumeric. - return 1; - } - return 0; - } - - // Define some regex patterns for matching boundaries. - private Pattern BLANKLINEEND = Pattern.compile("\\n\\r?\\n\\Z", Pattern.DOTALL); - private Pattern BLANKLINESTART = Pattern.compile("\\A\\r?\\n\\r?\\n", Pattern.DOTALL); - - /** - * Reduce the number of edits by eliminating operationally trivial equalities. - * @param diffs LinkedList of Diff objects. - */ - public void diff_cleanupEfficiency(LinkedList diffs) { - if (diffs.isEmpty()) { - return; - } - boolean changes = false; - Deque equalities = new ArrayDeque(); // Double-ended queue of equalities. - String lastEquality = null; // Always equal to equalities.peek().text - ListIterator pointer = diffs.listIterator(); - // Is there an insertion operation before the last equality. - boolean pre_ins = false; - // Is there a deletion operation before the last equality. - boolean pre_del = false; - // Is there an insertion operation after the last equality. - boolean post_ins = false; - // Is there a deletion operation after the last equality. - boolean post_del = false; - Diff thisDiff = pointer.next(); - Diff safeDiff = thisDiff; // The last Diff that is known to be unsplittable. - while (thisDiff != null) { - if (thisDiff.operation == Operation.EQUAL) { - // Equality found. - if (thisDiff.text.length() < Diff_EditCost && (post_ins || post_del)) { - // Candidate found. - equalities.push(thisDiff); - pre_ins = post_ins; - pre_del = post_del; - lastEquality = thisDiff.text; - } else { - // Not a candidate, and can never become one. - equalities.clear(); - lastEquality = null; - safeDiff = thisDiff; - } - post_ins = post_del = false; - } else { - // An insertion or deletion. - if (thisDiff.operation == Operation.DELETE) { - post_del = true; - } else { - post_ins = true; - } - /* - * Five types to be split: ABXYCD - * AXCD ABXC - * AXCD ABXC - */ - if (lastEquality != null - && ((pre_ins && pre_del && post_ins && post_del) - || ((lastEquality.length() < Diff_EditCost / 2) - && ((pre_ins ? 1 : 0) + (pre_del ? 1 : 0) - + (post_ins ? 1 : 0) + (post_del ? 1 : 0)) == 3))) { - // System.out.println("Splitting: '" + lastEquality + "'"); - // Walk back to offending equality. - while (thisDiff != equalities.peek()) { - thisDiff = pointer.previous(); - } - pointer.next(); - - // Replace equality with a delete. - pointer.set(new Diff(Operation.DELETE, lastEquality)); - // Insert a corresponding an insert. - pointer.add(thisDiff = new Diff(Operation.INSERT, lastEquality)); - - equalities.pop(); // Throw away the equality we just deleted. - lastEquality = null; - if (pre_ins && pre_del) { - // No changes made which could affect previous entry, keep going. - post_ins = post_del = true; - equalities.clear(); - safeDiff = thisDiff; - } else { - if (!equalities.isEmpty()) { - // Throw away the previous equality (it needs to be reevaluated). - equalities.pop(); - } - if (equalities.isEmpty()) { - // There are no previous questionable equalities, - // walk back to the last known safe diff. - thisDiff = safeDiff; - } else { - // There is an equality we can fall back to. - thisDiff = equalities.peek(); - } - while (thisDiff != pointer.previous()) { - // Intentionally empty loop. - } - post_ins = post_del = false; - } - - changes = true; - } - } - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - - if (changes) { - diff_cleanupMerge(diffs); - } - } - - /** - * Reorder and merge like edit sections. Merge equalities. - * Any edit section can move as long as it doesn't cross an equality. - * @param diffs LinkedList of Diff objects. - */ - public void diff_cleanupMerge(LinkedList diffs) { - diffs.add(new Diff(Operation.EQUAL, "")); // Add a dummy entry at the end. - ListIterator pointer = diffs.listIterator(); - int count_delete = 0; - int count_insert = 0; - String text_delete = ""; - String text_insert = ""; - Diff thisDiff = pointer.next(); - Diff prevEqual = null; - int commonlength; - while (thisDiff != null) { - switch (thisDiff.operation) { - case INSERT: - count_insert++; - text_insert += thisDiff.text; - prevEqual = null; - break; - case DELETE: - count_delete++; - text_delete += thisDiff.text; - prevEqual = null; - break; - case EQUAL: - if (count_delete + count_insert > 1) { - boolean both_types = count_delete != 0 && count_insert != 0; - // Delete the offending records. - pointer.previous(); // Reverse direction. - while (count_delete-- > 0) { - pointer.previous(); - pointer.remove(); - } - while (count_insert-- > 0) { - pointer.previous(); - pointer.remove(); - } - if (both_types) { - // Factor out any common prefixies. - commonlength = diff_commonPrefix(text_insert, text_delete); - if (commonlength != 0) { - if (pointer.hasPrevious()) { - thisDiff = pointer.previous(); - assert thisDiff.operation == Operation.EQUAL : "Previous diff should have been an equality."; - thisDiff.text += text_insert.substring(0, commonlength); - pointer.next(); - } else { - pointer - .add( - new Diff(Operation.EQUAL, - text_insert.substring(0, commonlength))); - } - text_insert = text_insert.substring(commonlength); - text_delete = text_delete.substring(commonlength); - } - // Factor out any common suffixies. - commonlength = diff_commonSuffix(text_insert, text_delete); - if (commonlength != 0) { - thisDiff = pointer.next(); - thisDiff.text = text_insert - .substring( - text_insert.length() - - commonlength) - + thisDiff.text; - text_insert = text_insert - .substring( - 0, text_insert.length() - - commonlength); - text_delete = text_delete - .substring( - 0, text_delete.length() - - commonlength); - pointer.previous(); - } - } - // Insert the merged records. - if (text_delete.length() != 0) { - pointer.add(new Diff(Operation.DELETE, text_delete)); - } - if (text_insert.length() != 0) { - pointer.add(new Diff(Operation.INSERT, text_insert)); - } - // Step forward to the equality. - thisDiff = pointer.hasNext() ? pointer.next() : null; - } else if (prevEqual != null) { - // Merge this equality with the previous one. - prevEqual.text += thisDiff.text; - pointer.remove(); - thisDiff = pointer.previous(); - pointer.next(); // Forward direction - } - count_insert = 0; - count_delete = 0; - text_delete = ""; - text_insert = ""; - prevEqual = thisDiff; - break; - } - thisDiff = pointer.hasNext() ? pointer.next() : null; - } - if (diffs.getLast().text.length() == 0) { - diffs.removeLast(); // Remove the dummy entry at the end. - } - - /* - * Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to - * eliminate an equality. e.g: ABAC -> ABAC - */ - boolean changes = false; - // Create a new iterator at the start. - // (As opposed to walking the current one back.) - pointer = diffs.listIterator(); - Diff prevDiff = pointer.hasNext() ? pointer.next() : null; - thisDiff = pointer.hasNext() ? pointer.next() : null; - Diff nextDiff = pointer.hasNext() ? pointer.next() : null; - // Intentionally ignore the first and last element (don't need checking). - while (nextDiff != null) { - if (prevDiff.operation == Operation.EQUAL && - nextDiff.operation == Operation.EQUAL) { - // This is a single edit surrounded by equalities. - if (thisDiff.text.endsWith(prevDiff.text)) { - // Shift the edit over the previous equality. - thisDiff.text = prevDiff.text - + thisDiff.text - .substring( - 0, thisDiff.text.length() - - prevDiff.text.length()); - nextDiff.text = prevDiff.text + nextDiff.text; - pointer.previous(); // Walk past nextDiff. - pointer.previous(); // Walk past thisDiff. - pointer.previous(); // Walk past prevDiff. - pointer.remove(); // Delete prevDiff. - pointer.next(); // Walk past thisDiff. - thisDiff = pointer.next(); // Walk past nextDiff. - nextDiff = pointer.hasNext() ? pointer.next() : null; - changes = true; - } else if (thisDiff.text.startsWith(nextDiff.text)) { - // Shift the edit over the next equality. - prevDiff.text += nextDiff.text; - thisDiff.text = thisDiff.text.substring(nextDiff.text.length()) - + nextDiff.text; - pointer.remove(); // Delete nextDiff. - nextDiff = pointer.hasNext() ? pointer.next() : null; - changes = true; - } - } - prevDiff = thisDiff; - thisDiff = nextDiff; - nextDiff = pointer.hasNext() ? pointer.next() : null; - } - // If shifts were made, the diff needs reordering and another shift sweep. - if (changes) { - diff_cleanupMerge(diffs); - } - } - - /** - * loc is a location in text1, compute and return the equivalent location in - * text2. - * e.g. "The cat" vs "The big cat", 1->1, 5->8 - * @param diffs List of Diff objects. - * @param loc Location within text1. - * @return Location within text2. - */ - public int diff_xIndex(List diffs, int loc) { - int chars1 = 0; - int chars2 = 0; - int last_chars1 = 0; - int last_chars2 = 0; - Diff lastDiff = null; - for (Diff aDiff : diffs) { - if (aDiff.operation != Operation.INSERT) { - // Equality or deletion. - chars1 += aDiff.text.length(); - } - if (aDiff.operation != Operation.DELETE) { - // Equality or insertion. - chars2 += aDiff.text.length(); - } - if (chars1 > loc) { - // Overshot the location. - lastDiff = aDiff; - break; - } - last_chars1 = chars1; - last_chars2 = chars2; - } - if (lastDiff != null && lastDiff.operation == Operation.DELETE) { - // The location was deleted. - return last_chars2; - } - // Add the remaining character length. - return last_chars2 + (loc - last_chars1); - } - - /** - * Convert a Diff list into a pretty HTML report. - * @param diffs List of Diff objects. - * @return HTML representation. - */ - public String diff_prettyHtml(List diffs) { - StringBuilder html = new StringBuilder(); - for (Diff aDiff : diffs) { - String text = aDiff.text - .replace("&", "&") - .replace("<", "<") - .replace(">", ">") - .replace("\n", "¶
"); - switch (aDiff.operation) { - case INSERT: - html - .append("") - .append(text) - .append(""); - break; - case DELETE: - html - .append("") - .append(text) - .append(""); - break; - case EQUAL: - html.append("").append(text).append(""); - break; - } - } - return html.toString(); - } - - /** - * Compute and return the source text (all equalities and deletions). - * @param diffs List of Diff objects. - * @return Source text. - */ - public String diff_text1(List diffs) { - StringBuilder text = new StringBuilder(); - for (Diff aDiff : diffs) { - if (aDiff.operation != Operation.INSERT) { - text.append(aDiff.text); - } - } - return text.toString(); - } - - /** - * Compute and return the destination text (all equalities and insertions). - * @param diffs List of Diff objects. - * @return Destination text. - */ - public String diff_text2(List diffs) { - StringBuilder text = new StringBuilder(); - for (Diff aDiff : diffs) { - if (aDiff.operation != Operation.DELETE) { - text.append(aDiff.text); - } - } - return text.toString(); - } - - /** - * Compute the Levenshtein compare; the number of inserted, deleted or - * substituted characters. - * @param diffs List of Diff objects. - * @return Number of changes. - */ - public int diff_levenshtein(List diffs) { - int levenshtein = 0; - int insertions = 0; - int deletions = 0; - for (Diff aDiff : diffs) { - switch (aDiff.operation) { - case INSERT: - insertions += aDiff.text.length(); - break; - case DELETE: - deletions += aDiff.text.length(); - break; - case EQUAL: - // A deletion and an insertion is one substitution. - levenshtein += Math.max(insertions, deletions); - insertions = 0; - deletions = 0; - break; - } - } - levenshtein += Math.max(insertions, deletions); - return levenshtein; - } - - /** - * Crush the diff into an encoded string which describes the operations - * required to transform text1 into text2. - * E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. - * Operations are tab-separated. Inserted text is escaped using %xx notation. - * @param diffs List of Diff objects. - * @return Delta text. - */ - public String diff_toDelta(List diffs) { - StringBuilder text = new StringBuilder(); - for (Diff aDiff : diffs) { - switch (aDiff.operation) { - case INSERT: - try { - text - .append("+") - .append( - URLEncoder - .encode(aDiff.text, "UTF-8") - .replace('+', ' ')) - .append("\t"); - } catch (UnsupportedEncodingException e) { - // Not likely on modern system. - throw new Error("This system does not support UTF-8.", e); - } - break; - case DELETE: - text.append("-").append(aDiff.text.length()).append("\t"); - break; - case EQUAL: - text.append("=").append(aDiff.text.length()).append("\t"); - break; - } - } - String delta = text.toString(); - if (delta.length() != 0) { - // Strip off trailing tab character. - delta = delta.substring(0, delta.length() - 1); - delta = unescapeForEncodeUriCompatability(delta); - } - return delta; - } - - /** - * Given the original text1, and an encoded string which describes the - * operations required to transform text1 into text2, compute the full diff. - * @param text1 Source string for the diff. - * @param delta Delta text. - * @return Array of Diff objects or null if invalid. - * @throws IllegalArgumentException If invalid input. - */ - public LinkedList diff_fromDelta(String text1, String delta) - throws IllegalArgumentException { - LinkedList diffs = new LinkedList(); - int pointer = 0; // Cursor in text1 - String[] tokens = delta.split("\t"); - for (String token : tokens) { - if (token.length() == 0) { - // Blank tokens are ok (from a trailing \t). - continue; - } - // Each token begins with a one character parameter which specifies the - // operation of this token (delete, insert, equality). - String param = token.substring(1); - switch (token.charAt(0)) { - case '+': - // decode would change all "+" to " " - param = param.replace("+", "%2B"); - try { - param = URLDecoder.decode(param, "UTF-8"); - } catch (UnsupportedEncodingException e) { - // Not likely on modern system. - throw new Error("This system does not support UTF-8.", e); - } catch (IllegalArgumentException e) { - // Malformed URI sequence. - throw new IllegalArgumentException( - "Illegal escape in diff_fromDelta: " + param, e); - } - diffs.add(new Diff(Operation.INSERT, param)); - break; - case '-': - // Fall through. - case '=': - int n; - try { - n = Integer.parseInt(param); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - "Invalid number in diff_fromDelta: " + param, e); - } - if (n < 0) { - throw new IllegalArgumentException( - "Negative number in diff_fromDelta: " + param); - } - String text; - try { - text = text1.substring(pointer, pointer += n); - } catch (StringIndexOutOfBoundsException e) { - throw new IllegalArgumentException("Delta length (" + pointer - + ") larger than source text length (" + text1.length() - + ").", e); - } - if (token.charAt(0) == '=') { - diffs.add(new Diff(Operation.EQUAL, text)); - } else { - diffs.add(new Diff(Operation.DELETE, text)); - } - break; - default: - // Anything else is an error. - throw new IllegalArgumentException( - "Invalid diff operation in diff_fromDelta: " + token.charAt(0)); - } - } - if (pointer != text1.length()) { - throw new IllegalArgumentException("Delta length (" + pointer - + ") smaller than source text length (" + text1.length() + ")."); - } - return diffs; - } - - // MATCH FUNCTIONS - - /** - * Locate the best instance of 'pattern' in 'text' near 'loc'. - * Returns -1 if no match found. - * @param text The text to search. - * @param pattern The pattern to search for. - * @param loc The location to search around. - * @return Best match index or -1. - */ - public int match_main(String text, String pattern, int loc) { - // Check for null inputs. - if (text == null || pattern == null) { - throw new IllegalArgumentException("Null inputs. (match_main)"); - } - - loc = Math.max(0, Math.min(loc, text.length())); - if (text.equals(pattern)) { - // Shortcut (potentially not guaranteed by the algorithm) - return 0; - } else if (text.length() == 0) { - // Nothing to match. - return -1; - } else if (loc + pattern.length() <= text.length() - && text.substring(loc, loc + pattern.length()).equals(pattern)) { - // Perfect match at the perfect spot! (Includes case of null pattern) - return loc; - } else { - // Do a fuzzy compare. - return match_bitap(text, pattern, loc); - } - } - - /** - * Locate the best instance of 'pattern' in 'text' near 'loc' using the - * Bitap algorithm. Returns -1 if no match found. - * @param text The text to search. - * @param pattern The pattern to search for. - * @param loc The location to search around. - * @return Best match index or -1. - */ - protected int match_bitap(String text, String pattern, int loc) { - assert (Match_MaxBits == 0 || pattern.length() <= Match_MaxBits) : "Pattern too long for this application."; - - // Initialise the alphabet. - Map s = match_alphabet(pattern); - - // Highest score beyond which we give up. - double score_threshold = Match_Threshold; - // Is there a nearby exact match? (speedup) - int best_loc = text.indexOf(pattern, loc); - if (best_loc != -1) { - score_threshold = Math - .min( - match_bitapScore(0, best_loc, loc, pattern), - score_threshold); - // What about in the other direction? (speedup) - best_loc = text.lastIndexOf(pattern, loc + pattern.length()); - if (best_loc != -1) { - score_threshold = Math - .min( - match_bitapScore(0, best_loc, loc, pattern), - score_threshold); - } - } - - // Initialise the bit arrays. - int matchmask = 1 << (pattern.length() - 1); - best_loc = -1; - - int bin_min, bin_mid; - int bin_max = pattern.length() + text.length(); - // Empty initialization added to appease Java compiler. - int[] last_rd = new int[0]; - for (int d = 0; d < pattern.length(); d++) { - // Scan for the best match; each iteration allows for one more error. - // Run a binary search to determine how far from 'loc' we can stray at - // this error level. - bin_min = 0; - bin_mid = bin_max; - while (bin_min < bin_mid) { - if (match_bitapScore(d, loc + bin_mid, loc, pattern) <= score_threshold) { - bin_min = bin_mid; - } else { - bin_max = bin_mid; - } - bin_mid = (bin_max - bin_min) / 2 + bin_min; - } - // Use the result from this iteration as the maximum for the next. - bin_max = bin_mid; - int start = Math.max(1, loc - bin_mid + 1); - int finish = Math.min(loc + bin_mid, text.length()) + pattern.length(); - - int[] rd = new int[finish + 2]; - rd[finish + 1] = (1 << d) - 1; - for (int j = finish; j >= start; j--) { - int charMatch; - if (text.length() <= j - 1 || !s.containsKey(text.charAt(j - 1))) { - // Out of range. - charMatch = 0; - } else { - charMatch = s.get(text.charAt(j - 1)); - } - if (d == 0) { - // First pass: exact match. - rd[j] = ((rd[j + 1] << 1) | 1) & charMatch; - } else { - // Subsequent passes: fuzzy match. - rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) - | (((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]; - } - if ((rd[j] & matchmask) != 0) { - double score = match_bitapScore(d, j - 1, loc, pattern); - // This match will almost certainly be better than any existing - // match. But check anyway. - if (score <= score_threshold) { - // Told you so. - score_threshold = score; - best_loc = j - 1; - if (best_loc > loc) { - // When passing loc, don't exceed our current compare from loc. - start = Math.max(1, 2 * loc - best_loc); - } else { - // Already passed loc, downhill from here on in. - break; - } - } - } - } - if (match_bitapScore(d + 1, loc, loc, pattern) > score_threshold) { - // No hope for a (better) match at greater error levels. - break; - } - last_rd = rd; - } - return best_loc; - } - - /** - * Compute and return the score for a match with e errors and x location. - * @param e Number of errors in match. - * @param x Location of match. - * @param loc Expected location of match. - * @param pattern Pattern being sought. - * @return Overall score for match (0.0 = good, 1.0 = bad). - */ - private double match_bitapScore(int e, int x, int loc, String pattern) { - float accuracy = (float) e / pattern.length(); - int proximity = Math.abs(loc - x); - if (Match_Distance == 0) { - // Dodge divide by zero error. - return proximity == 0 ? accuracy : 1.0; - } - return accuracy + (proximity / (float) Match_Distance); - } - - /** - * Initialise the alphabet for the Bitap algorithm. - * @param pattern The text to encode. - * @return Hash of character locations. - */ - protected Map match_alphabet(String pattern) { - Map s = new HashMap(); - char[] char_pattern = pattern.toCharArray(); - for (char c : char_pattern) { - s.put(c, 0); - } - int i = 0; - for (char c : char_pattern) { - s.put(c, s.get(c) | (1 << (pattern.length() - i - 1))); - i++; - } - return s; - } - - // PATCH FUNCTIONS - - /** - * Increase the context until it is unique, - * but don't let the pattern expand beyond Match_MaxBits. - * @param patch The patch to grow. - * @param text Source text. - */ - protected void patch_addContext(Patch patch, String text) { - if (text.length() == 0) { - return; - } - String pattern = text.substring(patch.start2, patch.start2 + patch.length1); - int padding = 0; - - // Look for the first and last matches of pattern in text. If two different - // matches are found, increase the pattern length. - while (text.indexOf(pattern) != text.lastIndexOf(pattern) - && pattern.length() < Match_MaxBits - Patch_Margin - Patch_Margin) { - padding += Patch_Margin; - pattern = text - .substring( - Math.max(0, patch.start2 - padding), - Math.min(text.length(), patch.start2 + patch.length1 + padding)); - } - // Add one chunk for good luck. - padding += Patch_Margin; - - // Add the prefix. - String prefix = text - .substring( - Math.max(0, patch.start2 - padding), - patch.start2); - if (prefix.length() != 0) { - patch.diffs.addFirst(new Diff(Operation.EQUAL, prefix)); - } - // Add the suffix. - String suffix = text - .substring( - patch.start2 + patch.length1, - Math.min(text.length(), patch.start2 + patch.length1 + padding)); - if (suffix.length() != 0) { - patch.diffs.addLast(new Diff(Operation.EQUAL, suffix)); - } - - // Roll back the start points. - patch.start1 -= prefix.length(); - patch.start2 -= prefix.length(); - // Extend the lengths. - patch.length1 += prefix.length() + suffix.length(); - patch.length2 += prefix.length() + suffix.length(); - } - - /** - * Compute a list of patches to turn text1 into text2. - * A set of diffs will be computed. - * @param text1 Old text. - * @param text2 New text. - * @return LinkedList of Patch objects. - */ - public LinkedList patch_make(String text1, String text2) { - if (text1 == null || text2 == null) { - throw new IllegalArgumentException("Null inputs. (patch_make)"); - } - // No diffs provided, compute our own. - LinkedList diffs = diff_main(text1, text2, true); - if (diffs.size() > 2) { - diff_cleanupSemantic(diffs); - diff_cleanupEfficiency(diffs); - } - return patch_make(text1, diffs); - } - - /** - * Compute a list of patches to turn text1 into text2. - * text1 will be derived from the provided diffs. - * @param diffs Array of Diff objects for text1 to text2. - * @return LinkedList of Patch objects. - */ - public LinkedList patch_make(LinkedList diffs) { - if (diffs == null) { - throw new IllegalArgumentException("Null inputs. (patch_make)"); - } - // No origin string provided, compute our own. - String text1 = diff_text1(diffs); - return patch_make(text1, diffs); - } - - /** - * Compute a list of patches to turn text1 into text2. - * text2 is ignored, diffs are the delta between text1 and text2. - * @param text1 Old text - * @param text2 Ignored. - * @param diffs Array of Diff objects for text1 to text2. - * @return LinkedList of Patch objects. - * @deprecated Prefer patch_make(String text1, LinkedList diffs). - */ - @Deprecated - public LinkedList patch_make(String text1, String text2, - LinkedList diffs) { - return patch_make(text1, diffs); - } - - /** - * Compute a list of patches to turn text1 into text2. - * text2 is not provided, diffs are the delta between text1 and text2. - * @param text1 Old text. - * @param diffs Array of Diff objects for text1 to text2. - * @return LinkedList of Patch objects. - */ - public LinkedList patch_make(String text1, LinkedList diffs) { - if (text1 == null || diffs == null) { - throw new IllegalArgumentException("Null inputs. (patch_make)"); - } - - LinkedList patches = new LinkedList(); - if (diffs.isEmpty()) { - return patches; // Get rid of the null case. - } - Patch patch = new Patch(); - int char_count1 = 0; // Number of characters into the text1 string. - int char_count2 = 0; // Number of characters into the text2 string. - // Start with text1 (prepatch_text) and apply the diffs until we arrive at - // text2 (postpatch_text). We recreate the patches one by one to determine - // context info. - String prepatch_text = text1; - String postpatch_text = text1; - for (Diff aDiff : diffs) { - if (patch.diffs.isEmpty() && aDiff.operation != Operation.EQUAL) { - // A new patch starts here. - patch.start1 = char_count1; - patch.start2 = char_count2; - } - - switch (aDiff.operation) { - case INSERT: - patch.diffs.add(aDiff); - patch.length2 += aDiff.text.length(); - postpatch_text = postpatch_text.substring(0, char_count2) - + aDiff.text + postpatch_text.substring(char_count2); - break; - case DELETE: - patch.length1 += aDiff.text.length(); - patch.diffs.add(aDiff); - postpatch_text = postpatch_text.substring(0, char_count2) - + postpatch_text.substring(char_count2 + aDiff.text.length()); - break; - case EQUAL: - if (aDiff.text.length() <= 2 * Patch_Margin - && !patch.diffs.isEmpty() && aDiff != diffs.getLast()) { - // Small equality inside a patch. - patch.diffs.add(aDiff); - patch.length1 += aDiff.text.length(); - patch.length2 += aDiff.text.length(); - } - - if (aDiff.text.length() >= 2 * Patch_Margin && !patch.diffs.isEmpty()) { - // Time for a new patch. - if (!patch.diffs.isEmpty()) { - patch_addContext(patch, prepatch_text); - patches.add(patch); - patch = new Patch(); - // Unlike Unidiff, our patch lists have a rolling context. - // https://github.com/google/diff-match-patch/wiki/Unidiff - // Update prepatch text & pos to reflect the application of the - // just completed patch. - prepatch_text = postpatch_text; - char_count1 = char_count2; - } - } - break; - } - - // Update the current character count. - if (aDiff.operation != Operation.INSERT) { - char_count1 += aDiff.text.length(); - } - if (aDiff.operation != Operation.DELETE) { - char_count2 += aDiff.text.length(); - } - } - // Pick up the leftover patch if not empty. - if (!patch.diffs.isEmpty()) { - patch_addContext(patch, prepatch_text); - patches.add(patch); - } - - return patches; - } - - /** - * Given an array of patches, return another array that is identical. - * @param patches Array of Patch objects. - * @return Array of Patch objects. - */ - public LinkedList patch_deepCopy(LinkedList patches) { - LinkedList patchesCopy = new LinkedList(); - for (Patch aPatch : patches) { - Patch patchCopy = new Patch(); - for (Diff aDiff : aPatch.diffs) { - Diff diffCopy = new Diff(aDiff.operation, aDiff.text); - patchCopy.diffs.add(diffCopy); - } - patchCopy.start1 = aPatch.start1; - patchCopy.start2 = aPatch.start2; - patchCopy.length1 = aPatch.length1; - patchCopy.length2 = aPatch.length2; - patchesCopy.add(patchCopy); - } - return patchesCopy; - } - - /** - * Merge a set of patches onto the text. Return a patched text, as well - * as an array of true/false values indicating which patches were applied. - * @param patches Array of Patch objects - * @param text Old text. - * @return Two element Object array, containing the new text and an array of - * boolean values. - */ - public Object[] patch_apply(LinkedList patches, String text) { - if (patches.isEmpty()) { - return new Object[] { - text, new boolean[0] - }; - } - - // Deep copy the patches so that no changes are made to originals. - patches = patch_deepCopy(patches); - - String nullPadding = patch_addPadding(patches); - text = nullPadding + text + nullPadding; - patch_splitMax(patches); - - int x = 0; - // delta keeps track of the offset between the expected and actual location - // of the previous patch. If there are patches expected at positions 10 and - // 20, but the first patch was found at 12, delta is 2 and the second patch - // has an effective expected position of 22. - int delta = 0; - boolean[] results = new boolean[patches.size()]; - for (Patch aPatch : patches) { - int expected_loc = aPatch.start2 + delta; - String text1 = diff_text1(aPatch.diffs); - int start_loc; - int end_loc = -1; - if (text1.length() > this.Match_MaxBits) { - // patch_splitMax will only provide an oversized pattern in the case of - // a monster delete. - start_loc = match_main( - text, - text1.substring(0, this.Match_MaxBits), expected_loc); - if (start_loc != -1) { - end_loc = match_main( - text, - text1.substring(text1.length() - this.Match_MaxBits), - expected_loc + text1.length() - this.Match_MaxBits); - if (end_loc == -1 || start_loc >= end_loc) { - // Can't find valid trailing context. Drop this patch. - start_loc = -1; - } - } - } else { - start_loc = match_main(text, text1, expected_loc); - } - if (start_loc == -1) { - // No match found. :( - results[x] = false; - // Subtract the delta for this failed patch from subsequent patches. - delta -= aPatch.length2 - aPatch.length1; - } else { - // Found a match. :) - results[x] = true; - delta = start_loc - expected_loc; - String text2; - if (end_loc == -1) { - text2 = text - .substring( - start_loc, - Math.min(start_loc + text1.length(), text.length())); - } else { - text2 = text - .substring( - start_loc, - Math.min(end_loc + this.Match_MaxBits, text.length())); - } - if (text1.equals(text2)) { - // Perfect match, just shove the replacement text in. - text = text.substring(0, start_loc) + diff_text2(aPatch.diffs) - + text.substring(start_loc + text1.length()); - } else { - // Imperfect match. Run a diff to get a framework of equivalent - // indices. - LinkedList diffs = diff_main(text1, text2, false); - if (text1.length() > this.Match_MaxBits - && diff_levenshtein(diffs) / (float) text1.length() > this.Patch_DeleteThreshold) { - // The end points match, but the content is unacceptably bad. - results[x] = false; - } else { - diff_cleanupSemanticLossless(diffs); - int index1 = 0; - for (Diff aDiff : aPatch.diffs) { - if (aDiff.operation != Operation.EQUAL) { - int index2 = diff_xIndex(diffs, index1); - if (aDiff.operation == Operation.INSERT) { - // Insertion - text = text.substring(0, start_loc + index2) + aDiff.text - + text.substring(start_loc + index2); - } else if (aDiff.operation == Operation.DELETE) { - // Deletion - text = text.substring(0, start_loc + index2) - + text - .substring( - start_loc + diff_xIndex( - diffs, - index1 + aDiff.text.length())); - } - } - if (aDiff.operation != Operation.DELETE) { - index1 += aDiff.text.length(); - } - } - } - } - } - x++; - } - // Strip the padding off. - text = text - .substring( - nullPadding.length(), text.length() - - nullPadding.length()); - return new Object[] { - text, results - }; - } - - /** - * Add some padding on text start and end so that edges can match something. - * Intended to be called only from within patch_apply. - * @param patches Array of Patch objects. - * @return The padding string added to each side. - */ - public String patch_addPadding(LinkedList patches) { - short paddingLength = this.Patch_Margin; - String nullPadding = ""; - for (short x = 1; x <= paddingLength; x++) { - nullPadding += String.valueOf((char) x); - } - - // Bump all the patches forward. - for (Patch aPatch : patches) { - aPatch.start1 += paddingLength; - aPatch.start2 += paddingLength; - } - - // Add some padding on start of first diff. - Patch patch = patches.getFirst(); - LinkedList diffs = patch.diffs; - if (diffs.isEmpty() || diffs.getFirst().operation != Operation.EQUAL) { - // Add nullPadding equality. - diffs.addFirst(new Diff(Operation.EQUAL, nullPadding)); - patch.start1 -= paddingLength; // Should be 0. - patch.start2 -= paddingLength; // Should be 0. - patch.length1 += paddingLength; - patch.length2 += paddingLength; - } else if (paddingLength > diffs.getFirst().text.length()) { - // Grow first equality. - Diff firstDiff = diffs.getFirst(); - int extraLength = paddingLength - firstDiff.text.length(); - firstDiff.text = nullPadding.substring(firstDiff.text.length()) - + firstDiff.text; - patch.start1 -= extraLength; - patch.start2 -= extraLength; - patch.length1 += extraLength; - patch.length2 += extraLength; - } - - // Add some padding on end of last diff. - patch = patches.getLast(); - diffs = patch.diffs; - if (diffs.isEmpty() || diffs.getLast().operation != Operation.EQUAL) { - // Add nullPadding equality. - diffs.addLast(new Diff(Operation.EQUAL, nullPadding)); - patch.length1 += paddingLength; - patch.length2 += paddingLength; - } else if (paddingLength > diffs.getLast().text.length()) { - // Grow last equality. - Diff lastDiff = diffs.getLast(); - int extraLength = paddingLength - lastDiff.text.length(); - lastDiff.text += nullPadding.substring(0, extraLength); - patch.length1 += extraLength; - patch.length2 += extraLength; - } - - return nullPadding; - } - - /** - * Look through the patches and break up any which are longer than the - * maximum limit of the match algorithm. - * Intended to be called only from within patch_apply. - * @param patches LinkedList of Patch objects. - */ - public void patch_splitMax(LinkedList patches) { - short patch_size = Match_MaxBits; - String precontext, postcontext; - Patch patch; - int start1, start2; - boolean empty; - Operation diff_type; - String diff_text; - ListIterator pointer = patches.listIterator(); - Patch bigpatch = pointer.hasNext() ? pointer.next() : null; - while (bigpatch != null) { - if (bigpatch.length1 <= Match_MaxBits) { - bigpatch = pointer.hasNext() ? pointer.next() : null; - continue; - } - // Remove the big old patch. - pointer.remove(); - start1 = bigpatch.start1; - start2 = bigpatch.start2; - precontext = ""; - while (!bigpatch.diffs.isEmpty()) { - // Create one of several smaller patches. - patch = new Patch(); - empty = true; - patch.start1 = start1 - precontext.length(); - patch.start2 = start2 - precontext.length(); - if (precontext.length() != 0) { - patch.length1 = patch.length2 = precontext.length(); - patch.diffs.add(new Diff(Operation.EQUAL, precontext)); - } - while (!bigpatch.diffs.isEmpty() - && patch.length1 < patch_size - Patch_Margin) { - diff_type = bigpatch.diffs.getFirst().operation; - diff_text = bigpatch.diffs.getFirst().text; - if (diff_type == Operation.INSERT) { - // Insertions are harmless. - patch.length2 += diff_text.length(); - start2 += diff_text.length(); - patch.diffs.addLast(bigpatch.diffs.removeFirst()); - empty = false; - } else if (diff_type == Operation.DELETE && patch.diffs.size() == 1 - && patch.diffs.getFirst().operation == Operation.EQUAL - && diff_text.length() > 2 * patch_size) { - // This is a large deletion. Let it pass in one chunk. - patch.length1 += diff_text.length(); - start1 += diff_text.length(); - empty = false; - patch.diffs.add(new Diff(diff_type, diff_text)); - bigpatch.diffs.removeFirst(); - } else { - // Deletion or equality. Only take as much as we can stomach. - diff_text = diff_text - .substring( - 0, Math - .min( - diff_text.length(), - patch_size - patch.length1 - Patch_Margin)); - patch.length1 += diff_text.length(); - start1 += diff_text.length(); - if (diff_type == Operation.EQUAL) { - patch.length2 += diff_text.length(); - start2 += diff_text.length(); - } else { - empty = false; - } - patch.diffs.add(new Diff(diff_type, diff_text)); - if (diff_text.equals(bigpatch.diffs.getFirst().text)) { - bigpatch.diffs.removeFirst(); - } else { - bigpatch.diffs.getFirst().text = bigpatch.diffs.getFirst().text - .substring(diff_text.length()); - } - } - } - // Compute the head context for the next patch. - precontext = diff_text2(patch.diffs); - precontext = precontext - .substring( - Math - .max( - 0, precontext.length() - - Patch_Margin)); - // Append the end context for this patch. - if (diff_text1(bigpatch.diffs).length() > Patch_Margin) { - postcontext = diff_text1(bigpatch.diffs).substring(0, Patch_Margin); - } else { - postcontext = diff_text1(bigpatch.diffs); - } - if (postcontext.length() != 0) { - patch.length1 += postcontext.length(); - patch.length2 += postcontext.length(); - if (!patch.diffs.isEmpty() - && patch.diffs.getLast().operation == Operation.EQUAL) { - patch.diffs.getLast().text += postcontext; - } else { - patch.diffs.add(new Diff(Operation.EQUAL, postcontext)); - } - } - if (!empty) { - pointer.add(patch); - } - } - bigpatch = pointer.hasNext() ? pointer.next() : null; - } - } - - /** - * Take a list of patches and return a textual representation. - * @param patches List of Patch objects. - * @return Text representation of patches. - */ - public String patch_toText(List patches) { - StringBuilder text = new StringBuilder(); - for (Patch aPatch : patches) { - text.append(aPatch); - } - return text.toString(); - } - - /** - * Parse a textual representation of patches and return a List of Patch - * objects. - * @param textline Text representation of patches. - * @return List of Patch objects. - * @throws IllegalArgumentException If invalid input. - */ - public List patch_fromText(String textline) - throws IllegalArgumentException { - List patches = new LinkedList(); - if (textline.length() == 0) { - return patches; - } - List textList = Arrays.asList(textline.split("\n")); - LinkedList text = new LinkedList(textList); - Patch patch; - Pattern patchHeader = Pattern.compile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$"); - Matcher m; - char sign; - String line; - while (!text.isEmpty()) { - m = patchHeader.matcher(text.getFirst()); - if (!m.matches()) { - throw new IllegalArgumentException( - "Invalid patch string: " + text.getFirst()); - } - patch = new Patch(); - patches.add(patch); - patch.start1 = Integer.parseInt(m.group(1)); - if (m.group(2).length() == 0) { - patch.start1--; - patch.length1 = 1; - } else if (m.group(2).equals("0")) { - patch.length1 = 0; - } else { - patch.start1--; - patch.length1 = Integer.parseInt(m.group(2)); - } - - patch.start2 = Integer.parseInt(m.group(3)); - if (m.group(4).length() == 0) { - patch.start2--; - patch.length2 = 1; - } else if (m.group(4).equals("0")) { - patch.length2 = 0; - } else { - patch.start2--; - patch.length2 = Integer.parseInt(m.group(4)); - } - text.removeFirst(); - - while (!text.isEmpty()) { - try { - sign = text.getFirst().charAt(0); - } catch (IndexOutOfBoundsException e) { - // Blank line? Whatever. - text.removeFirst(); - continue; - } - line = text.getFirst().substring(1); - line = line.replace("+", "%2B"); // decode would change all "+" to " " - try { - line = URLDecoder.decode(line, "UTF-8"); - } catch (UnsupportedEncodingException e) { - // Not likely on modern system. - throw new Error("This system does not support UTF-8.", e); - } catch (IllegalArgumentException e) { - // Malformed URI sequence. - throw new IllegalArgumentException( - "Illegal escape in patch_fromText: " + line, e); - } - if (sign == '-') { - // Deletion. - patch.diffs.add(new Diff(Operation.DELETE, line)); - } else if (sign == '+') { - // Insertion. - patch.diffs.add(new Diff(Operation.INSERT, line)); - } else if (sign == ' ') { - // Minor equality. - patch.diffs.add(new Diff(Operation.EQUAL, line)); - } else if (sign == '@') { - // Start of next patch. - break; - } else { - // WTF? - throw new IllegalArgumentException( - "Invalid patch mode '" + sign + "' in: " + line); - } - text.removeFirst(); - } - } - return patches; - } - - /** - * Class representing one diff operation. - */ - public static class Diff { - /** - * One of: INSERT, DELETE or EQUAL. - */ - public Operation operation; - /** - * The text associated with this diff operation. - */ - public String text; - - /** - * Constructor. Initializes the diff with the provided values. - * @param operation One of INSERT, DELETE or EQUAL. - * @param text The text being applied. - */ - public Diff(Operation operation, String text) { - // Construct a diff with the specified operation and text. - this.operation = operation; - this.text = text; - } - - /** - * Display a human-readable version of this Diff. - * @return text version. - */ - public String toString() { - String prettyText = this.text.replace('\n', '\u00b6'); - return "Diff(" + this.operation + ",\"" + prettyText + "\")"; - } - - /** - * Create a numeric hash value for a Diff. - * This function is not used by DMP. - * @return Hash value. - */ - @Override - public int hashCode() { - final int prime = 31; - int result = (operation == null) ? 0 : operation.hashCode(); - result += prime * ((text == null) ? 0 : text.hashCode()); - return result; - } - - /** - * Is this Diff equivalent to another Diff? - * @param obj Another Diff to compare against. - * @return true or false. - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Diff other = (Diff) obj; - if (operation != other.operation) { - return false; - } - if (text == null) { - if (other.text != null) { - return false; - } - } else if (!text.equals(other.text)) { - return false; - } - return true; - } - } - - /** - * Class representing one patch operation. - */ - public static class Patch { - public LinkedList diffs; - public int start1; - public int start2; - public int length1; - public int length2; - - /** - * Constructor. Initializes with an empty list of diffs. - */ - public Patch() { - this.diffs = new LinkedList(); - } - - /** - * Emulate GNU diff's format. - * Header: @@ -382,8 +481,9 @@ - * Indices are printed as 1-based, not 0-based. - * @return The GNU diff string. - */ - public String toString() { - String coords1, coords2; - if (this.length1 == 0) { - coords1 = this.start1 + ",0"; - } else if (this.length1 == 1) { - coords1 = Integer.toString(this.start1 + 1); - } else { - coords1 = (this.start1 + 1) + "," + this.length1; - } - if (this.length2 == 0) { - coords2 = this.start2 + ",0"; - } else if (this.length2 == 1) { - coords2 = Integer.toString(this.start2 + 1); - } else { - coords2 = (this.start2 + 1) + "," + this.length2; - } - StringBuilder text = new StringBuilder(); - text - .append("@@ -") - .append(coords1) - .append(" +") - .append(coords2) - .append(" @@\n"); - // Escape the body of the patch with %xx notation. - for (Diff aDiff : this.diffs) { - switch (aDiff.operation) { - case INSERT: - text.append('+'); - break; - case DELETE: - text.append('-'); - break; - case EQUAL: - text.append(' '); - break; - } - try { - text - .append(URLEncoder.encode(aDiff.text, "UTF-8").replace('+', ' ')) - .append("\n"); - } catch (UnsupportedEncodingException e) { - // Not likely on modern system. - throw new Error("This system does not support UTF-8.", e); - } - } - return unescapeForEncodeUriCompatability(text.toString()); - } - } - - /** - * Unescape selected chars for compatability with JavaScript's encodeURI. - * In speed critical applications this could be dropped since the - * receiving application will certainly decode these fine. - * Note that this function is case-sensitive. Thus "%3f" would not be - * unescaped. But this is ok because it is only called with the output of - * URLEncoder.encode which returns uppercase hex. - * - * Example: "%3F" -> "?", "%24" -> "$", etc. - * - * @param str The string to escape. - * @return The escaped string. - */ - private static String unescapeForEncodeUriCompatability(String str) { - return str - .replace("%21", "!") - .replace("%7E", "~") - .replace("%27", "'") - .replace("%28", "(") - .replace("%29", ")") - .replace("%3B", ";") - .replace("%2F", "/") - .replace("%3F", "?") - .replace("%3A", ":") - .replace("%40", "@") - .replace("%26", "&") - .replace("%3D", "=") - .replace("%2B", "+") - .replace("%24", "$") - .replace("%2C", ",") - .replace("%23", "#"); - } -} diff --git a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java b/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java deleted file mode 100644 index 33183b0f6..000000000 --- a/dhp-pace-core/src/main/java/eu/dnetlib/pace/util/DotAbbreviations.java +++ /dev/null @@ -1,11 +0,0 @@ - -package eu.dnetlib.pace.util; - -import com.google.common.base.Function; - -public class DotAbbreviations implements Function { - @Override - public String apply(String s) { - return s.length() == 1 ? s + "." : s; - } -}; diff --git a/dhp-workflows/dhp-actionmanager/pom.xml b/dhp-workflows/dhp-actionmanager/pom.xml index 5a5f156fc..ce13502b6 100644 --- a/dhp-workflows/dhp-actionmanager/pom.xml +++ b/dhp-workflows/dhp-actionmanager/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-actionmanager diff --git a/dhp-workflows/dhp-aggregation/pom.xml b/dhp-workflows/dhp-aggregation/pom.xml index d67e880b4..108d25ba6 100644 --- a/dhp-workflows/dhp-aggregation/pom.xml +++ b/dhp-workflows/dhp-aggregation/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-aggregation diff --git a/dhp-workflows/dhp-blacklist/pom.xml b/dhp-workflows/dhp-blacklist/pom.xml index 64be812ba..7ecc8b35d 100644 --- a/dhp-workflows/dhp-blacklist/pom.xml +++ b/dhp-workflows/dhp-blacklist/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-broker-events/pom.xml b/dhp-workflows/dhp-broker-events/pom.xml index b9f572527..322fc7e93 100644 --- a/dhp-workflows/dhp-broker-events/pom.xml +++ b/dhp-workflows/dhp-broker-events/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-dedup-openaire/pom.xml b/dhp-workflows/dhp-dedup-openaire/pom.xml index bc1538e17..8665ebd05 100644 --- a/dhp-workflows/dhp-dedup-openaire/pom.xml +++ b/dhp-workflows/dhp-dedup-openaire/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-dedup-openaire diff --git a/dhp-workflows/dhp-doiboost/pom.xml b/dhp-workflows/dhp-doiboost/pom.xml index cfa5a3fce..6e8911fba 100644 --- a/dhp-workflows/dhp-doiboost/pom.xml +++ b/dhp-workflows/dhp-doiboost/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-enrichment/pom.xml b/dhp-workflows/dhp-enrichment/pom.xml index d7f75de8c..9698dee03 100644 --- a/dhp-workflows/dhp-enrichment/pom.xml +++ b/dhp-workflows/dhp-enrichment/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 @@ -51,7 +51,7 @@ eu.dnetlib.dhp dhp-aggregation - 1.2.5-beta + 1.2.5-SNAPSHOT compile diff --git a/dhp-workflows/dhp-graph-mapper/pom.xml b/dhp-workflows/dhp-graph-mapper/pom.xml index 2c93bab83..d7ae60a91 100644 --- a/dhp-workflows/dhp-graph-mapper/pom.xml +++ b/dhp-workflows/dhp-graph-mapper/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-graph-provision/pom.xml b/dhp-workflows/dhp-graph-provision/pom.xml index 7b879e074..e62fcdf19 100644 --- a/dhp-workflows/dhp-graph-provision/pom.xml +++ b/dhp-workflows/dhp-graph-provision/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/dhp-impact-indicators/pom.xml b/dhp-workflows/dhp-impact-indicators/pom.xml index d931c2323..a9eb0a4a1 100644 --- a/dhp-workflows/dhp-impact-indicators/pom.xml +++ b/dhp-workflows/dhp-impact-indicators/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-impact-indicators diff --git a/dhp-workflows/dhp-stats-actionsets/pom.xml b/dhp-workflows/dhp-stats-actionsets/pom.xml index 5d9b60b87..3daa8f995 100644 --- a/dhp-workflows/dhp-stats-actionsets/pom.xml +++ b/dhp-workflows/dhp-stats-actionsets/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-stats-actionsets diff --git a/dhp-workflows/dhp-stats-hist-snaps/pom.xml b/dhp-workflows/dhp-stats-hist-snaps/pom.xml index 94371dc0b..b31d909f9 100644 --- a/dhp-workflows/dhp-stats-hist-snaps/pom.xml +++ b/dhp-workflows/dhp-stats-hist-snaps/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-stats-hist-snaps diff --git a/dhp-workflows/dhp-stats-monitor-irish/pom.xml b/dhp-workflows/dhp-stats-monitor-irish/pom.xml index 4887005bb..6ab19dced 100644 --- a/dhp-workflows/dhp-stats-monitor-irish/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-irish/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-stats-monitor-irish diff --git a/dhp-workflows/dhp-stats-monitor-update/pom.xml b/dhp-workflows/dhp-stats-monitor-update/pom.xml index c8a69c078..f2bc35f8d 100644 --- a/dhp-workflows/dhp-stats-monitor-update/pom.xml +++ b/dhp-workflows/dhp-stats-monitor-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-stats-monitor-update diff --git a/dhp-workflows/dhp-stats-promote/pom.xml b/dhp-workflows/dhp-stats-promote/pom.xml index 1c711c878..9e17a78dc 100644 --- a/dhp-workflows/dhp-stats-promote/pom.xml +++ b/dhp-workflows/dhp-stats-promote/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-stats-promote diff --git a/dhp-workflows/dhp-stats-update/pom.xml b/dhp-workflows/dhp-stats-update/pom.xml index 246aa63cf..cc15b8a15 100644 --- a/dhp-workflows/dhp-stats-update/pom.xml +++ b/dhp-workflows/dhp-stats-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-stats-update diff --git a/dhp-workflows/dhp-swh/pom.xml b/dhp-workflows/dhp-swh/pom.xml index 4ba5cf868..80fff4587 100644 --- a/dhp-workflows/dhp-swh/pom.xml +++ b/dhp-workflows/dhp-swh/pom.xml @@ -4,7 +4,7 @@ eu.dnetlib.dhp dhp-workflows - 1.2.5-beta + 1.2.5-SNAPSHOT dhp-swh diff --git a/dhp-workflows/dhp-usage-raw-data-update/pom.xml b/dhp-workflows/dhp-usage-raw-data-update/pom.xml index ed3616fde..a9dbb09ae 100644 --- a/dhp-workflows/dhp-usage-raw-data-update/pom.xml +++ b/dhp-workflows/dhp-usage-raw-data-update/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-usage-raw-data-update diff --git a/dhp-workflows/dhp-usage-stats-build/pom.xml b/dhp-workflows/dhp-usage-stats-build/pom.xml index 52cc3bf44..56aec73b7 100644 --- a/dhp-workflows/dhp-usage-stats-build/pom.xml +++ b/dhp-workflows/dhp-usage-stats-build/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 dhp-usage-stats-build diff --git a/dhp-workflows/dhp-workflow-profiles/pom.xml b/dhp-workflows/dhp-workflow-profiles/pom.xml index ef4e0ada6..8c71a5ca1 100644 --- a/dhp-workflows/dhp-workflow-profiles/pom.xml +++ b/dhp-workflows/dhp-workflow-profiles/pom.xml @@ -3,7 +3,7 @@ dhp-workflows eu.dnetlib.dhp - 1.2.5-beta + 1.2.5-SNAPSHOT 4.0.0 diff --git a/dhp-workflows/pom.xml b/dhp-workflows/pom.xml index 9b87c7b44..1c331d126 100644 --- a/dhp-workflows/pom.xml +++ b/dhp-workflows/pom.xml @@ -6,7 +6,7 @@ eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-SNAPSHOT ../pom.xml diff --git a/pom.xml b/pom.xml index d015acd9e..892382b9d 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 eu.dnetlib.dhp dhp - 1.2.5-beta + 1.2.5-SNAPSHOT pom From 69c5efbd8b2015f993a04205e117cbb4b204f0e2 Mon Sep 17 00:00:00 2001 From: Giambattista Bloisi Date: Fri, 3 May 2024 13:57:56 +0200 Subject: [PATCH 18/29] Fix: when applying enrichments with no instance information the resulting merge entity was generated with no instance instead of keeping the original information --- .../java/eu/dnetlib/dhp/schema/oaf/utils/MergeUtils.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/MergeUtils.java b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/MergeUtils.java index 9eb1ec01d..28db94766 100644 --- a/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/MergeUtils.java +++ b/dhp-common/src/main/java/eu/dnetlib/dhp/schema/oaf/utils/MergeUtils.java @@ -874,9 +874,11 @@ public class MergeUtils { if (toEnrichInstances == null) { return enrichmentResult; } - if (enrichmentInstances == null) { - return enrichmentResult; + + if (enrichmentInstances == null || enrichmentInstances.isEmpty()) { + return toEnrichInstances; } + Map ri = toInstanceMap(enrichmentInstances); toEnrichInstances.forEach(i -> { From e1a0fb89334da1f6f8944c1138f3f9ba841e6493 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Fri, 3 May 2024 14:14:18 +0200 Subject: [PATCH 19/29] fixed id prefix creation for the fosnodoi records --- .../createunresolvedentities/PrepareFOSSparkJob.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java b/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java index ffcaedda7..dd85f6a4e 100644 --- a/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java +++ b/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java @@ -80,9 +80,10 @@ public class PrepareFOSSparkJob implements Serializable { fosDataset .groupByKey((MapFunction) v -> v.getOaid().toLowerCase(), Encoders.STRING()) - .mapGroups((MapGroupsFunction) (k, it) -> { - return getResult(ModelSupport.getIdPrefix(Result.class) + "|" + k, it); - }, Encoders.bean(Result.class)) + .mapGroups( + (MapGroupsFunction) (k, + it) -> getResult(ModelSupport.entityIdPrefix.get(Result.class.getSimpleName()) + "|" + k, it), + Encoders.bean(Result.class)) .write() .mode(SaveMode.Overwrite) .option("compression", "gzip") From a5d13d5d2777f36124a86a563a18052d3b41c2a2 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Fri, 3 May 2024 14:14:34 +0200 Subject: [PATCH 20/29] code formatting --- .../eu/dnetlib/pace/common/PaceCommonUtils.java | 15 ++++++++------- .../main/java/eu/dnetlib/pace/model/Person.java | 11 ++++++----- .../java/eu/dnetlib/pace/util/Capitalise.java | 3 ++- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java b/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java index a279271b5..61fbc2470 100644 --- a/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java +++ b/dhp-common/src/main/java/eu/dnetlib/pace/common/PaceCommonUtils.java @@ -1,19 +1,20 @@ package eu.dnetlib.pace.common; -import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; -import com.ibm.icu.text.Transliterator; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; - import java.nio.charset.StandardCharsets; import java.text.Normalizer; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; + +import com.google.common.base.Splitter; +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; +import com.ibm.icu.text.Transliterator; + /** * Set of common functions for the framework * diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java b/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java index c95c9d823..6a1957183 100644 --- a/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java +++ b/dhp-common/src/main/java/eu/dnetlib/pace/model/Person.java @@ -1,20 +1,21 @@ package eu.dnetlib.pace.model; +import java.nio.charset.Charset; +import java.text.Normalizer; +import java.util.List; +import java.util.Set; + import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.hash.Hashing; + import eu.dnetlib.pace.common.PaceCommonUtils; import eu.dnetlib.pace.util.Capitalise; import eu.dnetlib.pace.util.DotAbbreviations; -import java.nio.charset.Charset; -import java.text.Normalizer; -import java.util.List; -import java.util.Set; - public class Person { private static final String UTF8 = "UTF-8"; diff --git a/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java b/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java index 015386423..671320c71 100644 --- a/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java +++ b/dhp-common/src/main/java/eu/dnetlib/pace/util/Capitalise.java @@ -1,9 +1,10 @@ package eu.dnetlib.pace.util; -import com.google.common.base.Function; import org.apache.commons.lang3.text.WordUtils; +import com.google.common.base.Function; + public class Capitalise implements Function { private final char[] DELIM = { From 04862271850f22c92145e878005b62217af8d1d2 Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Fri, 3 May 2024 14:31:12 +0200 Subject: [PATCH 21/29] [cleaning] deactivating the cleaning of FOS subjects found in the metadata provided by repositories --- .../dhp/oa/graph/clean/CleaningRuleMap.java | 38 ++++++++++++++----- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/dhp-workflows/dhp-graph-mapper/src/main/java/eu/dnetlib/dhp/oa/graph/clean/CleaningRuleMap.java b/dhp-workflows/dhp-graph-mapper/src/main/java/eu/dnetlib/dhp/oa/graph/clean/CleaningRuleMap.java index 807055adb..732471f99 100644 --- a/dhp-workflows/dhp-graph-mapper/src/main/java/eu/dnetlib/dhp/oa/graph/clean/CleaningRuleMap.java +++ b/dhp-workflows/dhp-graph-mapper/src/main/java/eu/dnetlib/dhp/oa/graph/clean/CleaningRuleMap.java @@ -4,6 +4,7 @@ package eu.dnetlib.dhp.oa.graph.clean; import java.io.Serializable; import java.util.HashMap; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang3.SerializationUtils; @@ -29,7 +30,10 @@ public class CleaningRuleMap extends HashMap, SerializableConsumer cleanQualifier(vocabularies, (AccessRight) o)); mapping.put(Country.class, o -> cleanCountry(vocabularies, (Country) o)); mapping.put(Relation.class, o -> cleanRelation(vocabularies, (Relation) o)); - mapping.put(Subject.class, o -> cleanSubject(vocabularies, (Subject) o)); + + // commenting out the subject cleaning until we decide if we want to it or not and the implementation will + // be completed. At the moment it is not capable of expanding the whole hierarchy. + // mapping.put(Subject.class, o -> cleanSubject(vocabularies, (Subject) o)); return mapping; } @@ -38,8 +42,15 @@ public class CleaningRuleMap extends HashMap, SerializableConsumer { if (ModelConstants.DNET_SUBJECT_KEYWORD.equalsIgnoreCase(subject.getQualifier().getClassid())) { @@ -49,14 +60,21 @@ public class CleaningRuleMap extends HashMap, SerializableConsumer Date: Fri, 3 May 2024 15:53:52 +0200 Subject: [PATCH 22/29] fixed id prefix creation for the fosnodoi records, again --- .../createunresolvedentities/PrepareFOSSparkJob.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java b/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java index dd85f6a4e..c248423d4 100644 --- a/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java +++ b/dhp-workflows/dhp-aggregation/src/main/java/eu/dnetlib/dhp/actionmanager/createunresolvedentities/PrepareFOSSparkJob.java @@ -82,7 +82,8 @@ public class PrepareFOSSparkJob implements Serializable { .groupByKey((MapFunction) v -> v.getOaid().toLowerCase(), Encoders.STRING()) .mapGroups( (MapGroupsFunction) (k, - it) -> getResult(ModelSupport.entityIdPrefix.get(Result.class.getSimpleName()) + "|" + k, it), + it) -> getResult( + ModelSupport.entityIdPrefix.get(Result.class.getSimpleName().toLowerCase()) + "|" + k, it), Encoders.bean(Result.class)) .write() .mode(SaveMode.Overwrite) From 711048ceedc99383c291bc532373e09294fe0815 Mon Sep 17 00:00:00 2001 From: Giambattista Bloisi Date: Tue, 7 May 2024 15:44:33 +0200 Subject: [PATCH 23/29] PrepareRelationsJob rewritten to use Spark Dataframe API and Windowing functions --- .../dhp/oa/provision/PrepareRelationsJob.java | 190 ++++-------------- 1 file changed, 38 insertions(+), 152 deletions(-) diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java index fdf397ad7..c2eb8c408 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java @@ -1,43 +1,31 @@ package eu.dnetlib.dhp.oa.provision; -import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; - -import java.util.HashSet; -import java.util.Optional; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.stream.Collectors; - -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.spark.SparkConf; -import org.apache.spark.api.java.JavaRDD; -import org.apache.spark.api.java.JavaSparkContext; -import org.apache.spark.api.java.function.FilterFunction; -import org.apache.spark.api.java.function.FlatMapFunction; -import org.apache.spark.api.java.function.Function; -import org.apache.spark.api.java.function.MapFunction; -import org.apache.spark.sql.Encoder; -import org.apache.spark.sql.Encoders; -import org.apache.spark.sql.SaveMode; -import org.apache.spark.sql.SparkSession; -import org.apache.spark.sql.expressions.Aggregator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; import com.google.common.collect.Sets; - import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.common.HdfsSupport; import eu.dnetlib.dhp.oa.provision.model.ProvisionModelSupport; -import eu.dnetlib.dhp.oa.provision.model.SortableRelationKey; -import eu.dnetlib.dhp.oa.provision.utils.RelationPartitioner; import eu.dnetlib.dhp.schema.oaf.Relation; -import scala.Tuple2; +import org.apache.commons.io.IOUtils; +import org.apache.spark.SparkConf; +import org.apache.spark.sql.Encoders; +import org.apache.spark.sql.SaveMode; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.expressions.Window; +import org.apache.spark.sql.expressions.WindowSpec; +import org.apache.spark.sql.functions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; +import static org.apache.spark.sql.functions.col; /** * PrepareRelationsJob prunes the relationships: only consider relationships that are not virtually deleted @@ -130,130 +118,28 @@ public class PrepareRelationsJob { private static void prepareRelationsRDD(SparkSession spark, String inputRelationsPath, String outputPath, Set relationFilter, int sourceMaxRelations, int targetMaxRelations, int relPartitions) { - JavaRDD rels = readPathRelationRDD(spark, inputRelationsPath) - .filter(rel -> !(rel.getSource().startsWith("unresolved") || rel.getTarget().startsWith("unresolved"))) - .filter(rel -> !rel.getDataInfo().getDeletedbyinference()) - .filter(rel -> !relationFilter.contains(StringUtils.lowerCase(rel.getRelClass()))); + WindowSpec source_w = Window + .partitionBy("source", "subRelType") + .orderBy(col("target").desc_nulls_last()); - JavaRDD pruned = pruneRels( - pruneRels( - rels, - sourceMaxRelations, relPartitions, (Function) Relation::getSource), - targetMaxRelations, relPartitions, (Function) Relation::getTarget); - spark - .createDataset(pruned.rdd(), Encoders.bean(Relation.class)) - .repartition(relPartitions) - .write() - .mode(SaveMode.Overwrite) - .parquet(outputPath); - } + WindowSpec target_w = Window + .partitionBy("target", "subRelType") + .orderBy(col("source").desc_nulls_last()); - private static JavaRDD pruneRels(JavaRDD rels, int maxRelations, - int relPartitions, Function idFn) { - return rels - .mapToPair(r -> new Tuple2<>(SortableRelationKey.create(r, idFn.call(r)), r)) - .repartitionAndSortWithinPartitions(new RelationPartitioner(relPartitions)) - .groupBy(Tuple2::_1) - .map(Tuple2::_2) - .map(t -> Iterables.limit(t, maxRelations)) - .flatMap(Iterable::iterator) - .map(Tuple2::_2); - } - - // experimental - private static void prepareRelationsDataset( - SparkSession spark, String inputRelationsPath, String outputPath, Set relationFilter, int maxRelations, - int relPartitions) { - spark - .read() - .textFile(inputRelationsPath) - .repartition(relPartitions) - .map( - (MapFunction) s -> OBJECT_MAPPER.readValue(s, Relation.class), - Encoders.kryo(Relation.class)) - .filter((FilterFunction) rel -> !rel.getDataInfo().getDeletedbyinference()) - .filter((FilterFunction) rel -> !relationFilter.contains(rel.getRelClass())) - .groupByKey( - (MapFunction) Relation::getSource, - Encoders.STRING()) - .agg(new RelationAggregator(maxRelations).toColumn()) - .flatMap( - (FlatMapFunction, Relation>) t -> Iterables - .limit(t._2().getRelations(), maxRelations) - .iterator(), - Encoders.bean(Relation.class)) - .repartition(relPartitions) - .write() - .mode(SaveMode.Overwrite) - .parquet(outputPath); - } - - public static class RelationAggregator - extends Aggregator { - - private final int maxRelations; - - public RelationAggregator(int maxRelations) { - this.maxRelations = maxRelations; - } - - @Override - public RelationList zero() { - return new RelationList(); - } - - @Override - public RelationList reduce(RelationList b, Relation a) { - b.getRelations().add(a); - return getSortableRelationList(b); - } - - @Override - public RelationList merge(RelationList b1, RelationList b2) { - b1.getRelations().addAll(b2.getRelations()); - return getSortableRelationList(b1); - } - - @Override - public RelationList finish(RelationList r) { - return getSortableRelationList(r); - } - - private RelationList getSortableRelationList(RelationList b1) { - RelationList sr = new RelationList(); - sr - .setRelations( - b1 - .getRelations() - .stream() - .limit(maxRelations) - .collect(Collectors.toCollection(() -> new PriorityQueue<>(new RelationComparator())))); - return sr; - } - - @Override - public Encoder bufferEncoder() { - return Encoders.kryo(RelationList.class); - } - - @Override - public Encoder outputEncoder() { - return Encoders.kryo(RelationList.class); - } - } - - /** - * Reads a JavaRDD of eu.dnetlib.dhp.oa.provision.model.SortableRelation objects from a newline delimited json text - * file, - * - * @param spark - * @param inputPath - * @return the JavaRDD containing all the relationships - */ - private static JavaRDD readPathRelationRDD( - SparkSession spark, final String inputPath) { - JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext()); - return sc.textFile(inputPath).map(s -> OBJECT_MAPPER.readValue(s, Relation.class)); + spark.read().schema(Encoders.bean(Relation.class).schema()).json(inputRelationsPath) + .where("source NOT LIKE 'unresolved%' AND target NOT LIKE 'unresolved%'") + .where("datainfo.deletedbyinference != true") + .where(relationFilter.isEmpty() ? "" : "lower(relClass) NOT IN ("+ Joiner.on(',').join(relationFilter) +")") + .withColumn("source_w_pos", functions.row_number().over(source_w)) + .where("source_w_pos < " + sourceMaxRelations ) + .drop("source_w_pos") + .withColumn("target_w_pos", functions.row_number().over(target_w)) + .where("target_w_pos < " + targetMaxRelations) + .drop( "target_w_pos") + .coalesce(relPartitions) + .write() + .mode(SaveMode.Overwrite) + .parquet(outputPath); } private static void removeOutputDir(SparkSession spark, String path) { From b4e33894322d1693460be2cfcf0afb23d3b9135f Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Tue, 7 May 2024 16:25:17 +0200 Subject: [PATCH 24/29] fixed property mapping creating the RelatedEntity transient objects. spark cores & memory adjustments. Code formatting --- .../CreateRelatedEntitiesJob_phase1.java | 9 ++- .../dhp/oa/provision/PrepareRelationsJob.java | 72 +++++++++++-------- .../dhp/oa/provision/oozie_app/workflow.xml | 10 +-- 3 files changed, 54 insertions(+), 37 deletions(-) diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/CreateRelatedEntitiesJob_phase1.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/CreateRelatedEntitiesJob_phase1.java index da80deee0..63f3c2ead 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/CreateRelatedEntitiesJob_phase1.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/CreateRelatedEntitiesJob_phase1.java @@ -153,10 +153,15 @@ public class CreateRelatedEntitiesJob_phase1 { result .getTitle() .stream() + .filter(t -> StringUtils.isNotBlank(t.getValue())) .findFirst() - .map(StructuredProperty::getValue) .ifPresent( - title -> re.getTitle().setValue(StringUtils.left(title, ModelHardLimits.MAX_TITLE_LENGTH))); + title -> { + re.setTitle(title); + re + .getTitle() + .setValue(StringUtils.left(title.getValue(), ModelHardLimits.MAX_TITLE_LENGTH)); + }); } if (Objects.nonNull(result.getDescription()) && !result.getDescription().isEmpty()) { result diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java index c2eb8c408..f50c7774b 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PrepareRelationsJob.java @@ -1,14 +1,15 @@ package eu.dnetlib.dhp.oa.provision; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; -import com.google.common.collect.Sets; -import eu.dnetlib.dhp.application.ArgumentApplicationParser; -import eu.dnetlib.dhp.common.HdfsSupport; -import eu.dnetlib.dhp.oa.provision.model.ProvisionModelSupport; -import eu.dnetlib.dhp.schema.oaf.Relation; +import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; +import static org.apache.spark.sql.functions.col; + +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + import org.apache.commons.io.IOUtils; import org.apache.spark.SparkConf; import org.apache.spark.sql.Encoders; @@ -20,12 +21,15 @@ import org.apache.spark.sql.functions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.HashSet; -import java.util.Optional; -import java.util.Set; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import com.google.common.collect.Sets; -import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; -import static org.apache.spark.sql.functions.col; +import eu.dnetlib.dhp.application.ArgumentApplicationParser; +import eu.dnetlib.dhp.common.HdfsSupport; +import eu.dnetlib.dhp.oa.provision.model.ProvisionModelSupport; +import eu.dnetlib.dhp.schema.oaf.Relation; /** * PrepareRelationsJob prunes the relationships: only consider relationships that are not virtually deleted @@ -119,27 +123,33 @@ public class PrepareRelationsJob { Set relationFilter, int sourceMaxRelations, int targetMaxRelations, int relPartitions) { WindowSpec source_w = Window - .partitionBy("source", "subRelType") - .orderBy(col("target").desc_nulls_last()); + .partitionBy("source", "subRelType") + .orderBy(col("target").desc_nulls_last()); WindowSpec target_w = Window - .partitionBy("target", "subRelType") - .orderBy(col("source").desc_nulls_last()); + .partitionBy("target", "subRelType") + .orderBy(col("source").desc_nulls_last()); - spark.read().schema(Encoders.bean(Relation.class).schema()).json(inputRelationsPath) - .where("source NOT LIKE 'unresolved%' AND target NOT LIKE 'unresolved%'") - .where("datainfo.deletedbyinference != true") - .where(relationFilter.isEmpty() ? "" : "lower(relClass) NOT IN ("+ Joiner.on(',').join(relationFilter) +")") - .withColumn("source_w_pos", functions.row_number().over(source_w)) - .where("source_w_pos < " + sourceMaxRelations ) - .drop("source_w_pos") - .withColumn("target_w_pos", functions.row_number().over(target_w)) - .where("target_w_pos < " + targetMaxRelations) - .drop( "target_w_pos") - .coalesce(relPartitions) - .write() - .mode(SaveMode.Overwrite) - .parquet(outputPath); + spark + .read() + .schema(Encoders.bean(Relation.class).schema()) + .json(inputRelationsPath) + .where("source NOT LIKE 'unresolved%' AND target NOT LIKE 'unresolved%'") + .where("datainfo.deletedbyinference != true") + .where( + relationFilter.isEmpty() ? "" + : "lower(relClass) NOT IN (" + + relationFilter.stream().map(s -> "'" + s + "'").collect(Collectors.joining(",")) + ")") + .withColumn("source_w_pos", functions.row_number().over(source_w)) + .where("source_w_pos < " + sourceMaxRelations) + .drop("source_w_pos") + .withColumn("target_w_pos", functions.row_number().over(target_w)) + .where("target_w_pos < " + targetMaxRelations) + .drop("target_w_pos") + .coalesce(relPartitions) + .write() + .mode(SaveMode.Overwrite) + .parquet(outputPath); } private static void removeOutputDir(SparkSession spark, String path) { diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml index eb446ddd8..434b4c9af 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml @@ -144,21 +144,23 @@ eu.dnetlib.dhp.oa.provision.PrepareRelationsJob dhp-graph-provision-${projectVersion}.jar - --executor-cores=${sparkExecutorCoresForJoining} - --executor-memory=${sparkExecutorMemoryForJoining} + --executor-cores=4 + --executor-memory=6G --driver-memory=${sparkDriverMemoryForJoining} + --conf spark.executor.memoryOverhead=6G --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir} - --conf spark.sql.shuffle.partitions=3840 + --conf spark.sql.shuffle.partitions=15000 + --conf spark.network.timeout=${sparkNetworkTimeout} --inputRelationsPath${inputGraphRootPath}/relation --outputPath${workingDir}/relation --sourceMaxRelations${sourceMaxRelations} --targetMaxRelations${targetMaxRelations} --relationFilter${relationFilter} - --relPartitions5000 + --relPartitions15000 From 18aa323ee972c8b0565273ada553892f0568f83e Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Wed, 8 May 2024 11:36:46 +0200 Subject: [PATCH 25/29] cleanup unused classes, adjustments in the oozie wf definition --- .../dhp/oa/provision/RelationComparator.java | 44 ---------- .../dhp/oa/provision/RelationList.java | 25 ------ .../dhp/oa/provision/SortableRelation.java | 81 ------------------- .../model/ProvisionModelSupport.java | 10 +-- .../dhp/oa/provision/oozie_app/workflow.xml | 11 +-- 5 files changed, 7 insertions(+), 164 deletions(-) delete mode 100644 dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationComparator.java delete mode 100644 dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationList.java delete mode 100644 dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/SortableRelation.java diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationComparator.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationComparator.java deleted file mode 100644 index e13bc60eb..000000000 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationComparator.java +++ /dev/null @@ -1,44 +0,0 @@ - -package eu.dnetlib.dhp.oa.provision; - -import java.util.Comparator; -import java.util.Map; -import java.util.Optional; - -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Maps; - -import eu.dnetlib.dhp.schema.common.ModelConstants; -import eu.dnetlib.dhp.schema.oaf.Relation; - -public class RelationComparator implements Comparator { - - private static final Map weights = Maps.newHashMap(); - - static { - weights.put(ModelConstants.OUTCOME, 0); - weights.put(ModelConstants.SUPPLEMENT, 1); - weights.put(ModelConstants.REVIEW, 2); - weights.put(ModelConstants.CITATION, 3); - weights.put(ModelConstants.AFFILIATION, 4); - weights.put(ModelConstants.RELATIONSHIP, 5); - weights.put(ModelConstants.PUBLICATION_DATASET, 6); - weights.put(ModelConstants.SIMILARITY, 7); - - weights.put(ModelConstants.PROVISION, 8); - weights.put(ModelConstants.PARTICIPATION, 9); - weights.put(ModelConstants.DEDUP, 10); - } - - private Integer getWeight(Relation o) { - return Optional.ofNullable(weights.get(o.getSubRelType())).orElse(Integer.MAX_VALUE); - } - - @Override - public int compare(Relation o1, Relation o2) { - return ComparisonChain - .start() - .compare(getWeight(o1), getWeight(o2)) - .result(); - } -} diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationList.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationList.java deleted file mode 100644 index 6e5fd7dba..000000000 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/RelationList.java +++ /dev/null @@ -1,25 +0,0 @@ - -package eu.dnetlib.dhp.oa.provision; - -import java.io.Serializable; -import java.util.PriorityQueue; -import java.util.Queue; - -import eu.dnetlib.dhp.schema.oaf.Relation; - -public class RelationList implements Serializable { - - private Queue relations; - - public RelationList() { - this.relations = new PriorityQueue<>(new RelationComparator()); - } - - public Queue getRelations() { - return relations; - } - - public void setRelations(Queue relations) { - this.relations = relations; - } -} diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/SortableRelation.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/SortableRelation.java deleted file mode 100644 index 8740b47fc..000000000 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/SortableRelation.java +++ /dev/null @@ -1,81 +0,0 @@ - -package eu.dnetlib.dhp.oa.provision; - -import java.io.Serializable; -import java.util.Map; -import java.util.Optional; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Maps; - -import eu.dnetlib.dhp.schema.common.ModelConstants; -import eu.dnetlib.dhp.schema.oaf.Relation; - -public class SortableRelation extends Relation implements Comparable, Serializable { - - private static final Map weights = Maps.newHashMap(); - - static { - weights.put(ModelConstants.OUTCOME, 0); - weights.put(ModelConstants.SUPPLEMENT, 1); - weights.put(ModelConstants.REVIEW, 2); - weights.put(ModelConstants.CITATION, 3); - weights.put(ModelConstants.AFFILIATION, 4); - weights.put(ModelConstants.RELATIONSHIP, 5); - weights.put(ModelConstants.PUBLICATION_RESULTTYPE_CLASSID, 6); - weights.put(ModelConstants.SIMILARITY, 7); - - weights.put(ModelConstants.PROVISION, 8); - weights.put(ModelConstants.PARTICIPATION, 9); - weights.put(ModelConstants.DEDUP, 10); - } - - private static final long serialVersionUID = 34753984579L; - - private String groupingKey; - - public static SortableRelation create(Relation r, String groupingKey) { - SortableRelation sr = new SortableRelation(); - sr.setGroupingKey(groupingKey); - sr.setSource(r.getSource()); - sr.setTarget(r.getTarget()); - sr.setRelType(r.getRelType()); - sr.setSubRelType(r.getSubRelType()); - sr.setRelClass(r.getRelClass()); - sr.setDataInfo(r.getDataInfo()); - sr.setCollectedfrom(r.getCollectedfrom()); - sr.setLastupdatetimestamp(r.getLastupdatetimestamp()); - sr.setProperties(r.getProperties()); - sr.setValidated(r.getValidated()); - sr.setValidationDate(r.getValidationDate()); - - return sr; - } - - @JsonIgnore - public Relation asRelation() { - return this; - } - - @Override - public int compareTo(SortableRelation o) { - return ComparisonChain - .start() - .compare(getGroupingKey(), o.getGroupingKey()) - .compare(getWeight(this), getWeight(o)) - .result(); - } - - private Integer getWeight(SortableRelation o) { - return Optional.ofNullable(weights.get(o.getSubRelType())).orElse(Integer.MAX_VALUE); - } - - public String getGroupingKey() { - return groupingKey; - } - - public void setGroupingKey(String groupingKey) { - this.groupingKey = groupingKey; - } -} diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java index 0e6e95de5..10a99704c 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java @@ -1,8 +1,6 @@ package eu.dnetlib.dhp.oa.provision.model; -import static org.apache.commons.lang3.StringUtils.substringBefore; - import java.io.StringReader; import java.util.*; import java.util.stream.Collectors; @@ -16,12 +14,9 @@ import org.jetbrains.annotations.Nullable; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup; import eu.dnetlib.dhp.common.vocabulary.VocabularyTerm; -import eu.dnetlib.dhp.oa.provision.RelationList; -import eu.dnetlib.dhp.oa.provision.SortableRelation; import eu.dnetlib.dhp.oa.provision.utils.ContextDef; import eu.dnetlib.dhp.oa.provision.utils.ContextMapper; import eu.dnetlib.dhp.schema.common.ModelSupport; @@ -55,10 +50,7 @@ public class ProvisionModelSupport { .newArrayList( RelatedEntityWrapper.class, JoinedEntity.class, - RelatedEntity.class, - SortableRelationKey.class, - SortableRelation.class, - RelationList.class)); + RelatedEntity.class)); return modelClasses.toArray(new Class[] {}); } diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml index 434b4c9af..1fc28e7ca 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml @@ -125,7 +125,7 @@ ${wf:conf('resumeFrom') eq 'prepare_relations'} ${wf:conf('resumeFrom') eq 'fork_join_related_entities'} ${wf:conf('resumeFrom') eq 'fork_join_all_entities'} - ${wf:conf('resumeFrom') eq 'convert_to_xml'} + ${wf:conf('resumeFrom') eq 'create_payloads'} ${wf:conf('resumeFrom') eq 'drop_solr_collection'} ${wf:conf('resumeFrom') eq 'to_solr_index'} @@ -587,19 +587,20 @@ - + - + yarn cluster - convert_to_xml + create_payloads eu.dnetlib.dhp.oa.provision.XmlConverterJob dhp-graph-provision-${projectVersion}.jar --executor-cores=${sparkExecutorCores} --executor-memory=${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} + --conf spark.executor.memoryOverhead=${sparkExecutorMemory} --conf spark.extraListeners=${spark2ExtraListeners} --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners} --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress} @@ -607,7 +608,7 @@ --conf spark.sql.shuffle.partitions=3840 --conf spark.network.timeout=${sparkNetworkTimeout} - --inputPath${workingDir}/join_entities + --inputPath/user/claudio.atzori/data/beta_provision/join_entities --outputPath${workingDir}/xml_json --contextApiBaseUrl${contextApiBaseUrl} --isLookupUrl${isLookupUrl} From 90a4fb3547af243dd2960127d23ac28dd32bcfb9 Mon Sep 17 00:00:00 2001 From: Antonis Lempesis Date: Wed, 8 May 2024 13:17:58 +0300 Subject: [PATCH 26/29] fixed typos --- .../oozie_app/scripts/step16-createIndicatorsTables.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql index 78bea9126..f5b950fe8 100755 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -249,7 +249,7 @@ create table if not exists ${stats_db_name}.indi_pub_gold_oa stored as parquet a left semi join dd on dd.id=pd.datasource union all select ra.id, 1 as is_gold - from ${stats_db_name}.result_accessroute ra on ra.id = pd.id where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ + from ${stats_db_name}.result_accessroute ra where ra.accessroute = 'gold') tmp on tmp.id=pd.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc purge; /*EOS*/ create table if not exists ${stats_db_name}.indi_pub_hybrid_oa_with_cc stored as parquet as @@ -294,7 +294,7 @@ left outer join ( join ${stats_db_name}.indi_pub_gold_oa indi_gold on indi_gold.id=p.id left outer join ${stats_db_name}.result_accessroute ra on ra.id=p.id where indi_gold.is_gold=0 and - ((d.type like '%Journal%' and ri.accessright not in ('Closed Access', 'Restricted', 'Not Available') and ri.license is not null) or ra.accessroute='hybrid')) tmp on pd.i=tmp.id; /*EOS*/ + ((d.type like '%Journal%' and ri.accessright not in ('Closed Access', 'Restricted', 'Not Available') and ri.license is not null) or ra.accessroute='hybrid')) tmp on p.id=tmp.id; /*EOS*/ drop table if exists ${stats_db_name}.indi_org_fairness purge; /*EOS*/ create table if not exists ${stats_db_name}.indi_org_fairness stored as parquet as @@ -1006,14 +1006,14 @@ left outer join ( drop table if exists ${stats_db_name}.result_country purge; /*EOS*/ create table ${stats_db_name}.result_country stored as parquet as -select distinct * +select distinct id, country from ( select ro.id, o.country from ${stats_db_name}.result_organization ro left outer join ${stats_db_name}.organization o on o.id=ro.organization union all select rp.id, f.country - from ${stats_db_name}.result_projects + from ${stats_db_name}.result_projects rp left outer join ${stats_db_name}.project p on p.id=rp.project left outer join ${stats_db_name}.funder f on f.name=p.funder ) rc From 0cada3cc8f502b4528bb6fdef06e7a5c032dbc68 Mon Sep 17 00:00:00 2001 From: antleb Date: Wed, 8 May 2024 13:42:53 +0300 Subject: [PATCH 27/29] every step is run in the analytics queue. Hardcoded for now, will make a parameter later --- .../dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step14.sql | 1 + .../dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql | 4 +++- .../dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql | 4 +++- .../oa/graph/stats/oozie_app/scripts/step16_1-definitions.sql | 4 +++- .../dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_5.sql | 4 +++- .../eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step2.sql | 4 +++- .../eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml | 3 ++- 7 files changed, 18 insertions(+), 6 deletions(-) diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step14.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step14.sql index f50c13521..7bad34e86 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step14.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step14.sql @@ -1,3 +1,4 @@ +set mapred.job.queue.name=analytics; ------------------------------------------------------ ------------------------------------------------------ -- Additional relations diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql index ce6b6cc2f..65a5d789f 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15.sql @@ -1,3 +1,5 @@ +set mapred.job.queue.name=analytics; + ------------------------------------------------------ ------------------------------------------------------ -- Additional relations @@ -104,4 +106,4 @@ rel.properties[1].value apc_currency from ${openaire_db_name}.relation rel join ${openaire_db_name}.organization o on o.id=rel.source join ${openaire_db_name}.result r on r.id=rel.target -where rel.subreltype = 'affiliation' and rel.datainfo.deletedbyinference = false and size(rel.properties)>0; \ No newline at end of file +where rel.subreltype = 'affiliation' and rel.datainfo.deletedbyinference = false and size(rel.properties)>0; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql index 6ed686a05..e3d910454 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step15_5.sql @@ -1,3 +1,5 @@ +set mapred.job.queue.name=analytics; + ------------------------------------------- --- Extra tables, mostly used by indicators @@ -63,4 +65,4 @@ from ( join ${stats_db_name}.result res on res.id=r.id where r.amount is not null; -create or replace view ${stats_db_name}.issn_gold_oa_dataset as select * from ${external_stats_db_name}.issn_gold_oa_dataset; \ No newline at end of file +create or replace view ${stats_db_name}.issn_gold_oa_dataset as select * from ${external_stats_db_name}.issn_gold_oa_dataset; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_1-definitions.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_1-definitions.sql index b55af13d4..c837ea579 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_1-definitions.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_1-definitions.sql @@ -1,3 +1,5 @@ +set mapred.job.queue.name=analytics; + ---------------------------------------------------- -- Shortcuts for various definitions in stats db --- ---------------------------------------------------- @@ -25,4 +27,4 @@ drop table if exists ${stats_db_name}.result_gold purge; create table IF NOT EXISTS ${stats_db_name}.result_gold STORED AS PARQUET as select r.id, case when gold.is_gold=1 then true else false end as gold from ${stats_db_name}.result r - left outer join ${stats_db_name}.indi_pub_gold_oa gold on gold.id=r.id; \ No newline at end of file + left outer join ${stats_db_name}.indi_pub_gold_oa gold on gold.id=r.id; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_5.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_5.sql index 7faa91697..fe3bb6799 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_5.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16_5.sql @@ -1,3 +1,5 @@ +set mapred.job.queue.name=analytics; + -- replace the creation of the result view to include the boolean fields from the previous tables (green, gold, -- peer reviewed) drop table if exists ${stats_db_name}.result_tmp; @@ -53,4 +55,4 @@ LEFT OUTER JOIN ${stats_db_name}.result_gold gold on gold.id=r.id; drop table if exists ${stats_db_name}.result; drop view if exists ${stats_db_name}.result; create table ${stats_db_name}.result stored as parquet as select * from ${stats_db_name}.result_tmp; -drop table ${stats_db_name}.result_tmp; \ No newline at end of file +drop table ${stats_db_name}.result_tmp; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step2.sql b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step2.sql index 8e56f98fc..4f7247e14 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step2.sql +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step2.sql @@ -1,3 +1,5 @@ +set mapred.job.queue.name=analytics; + -------------------------------------------------------------- -------------------------------------------------------------- -- Publication table/view and Publication related tables/views @@ -111,4 +113,4 @@ SELECT substr(p.id, 4) AS id, xpath_string(citation.value, "//citation/id[@type= FROM ${openaire_db_name}.publication p lateral view explode(p.extrainfo) citations AS citation WHERE xpath_string(citation.value, "//citation/id[@type='openaire']/@value") != "" - and p.datainfo.deletedbyinference = false and p.datainfo.invisible=false; \ No newline at end of file + and p.datainfo.deletedbyinference = false and p.datainfo.invisible=false; diff --git a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml index 813fffcf9..d5f9ae886 100644 --- a/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-stats-update/src/main/resources/eu/dnetlib/dhp/oa/graph/stats/oozie_app/workflow.xml @@ -368,6 +368,7 @@ ${sparkClusterOpts} ${sparkResourceOpts} ${sparkApplicationOpts} + --queue analytics --hiveMetastoreUris${hive_metastore_uris} --sqleu/dnetlib/dhp/oa/graph/stats/oozie_app/scripts/step16-createIndicatorsTables.sql @@ -551,4 +552,4 @@ - \ No newline at end of file + From 39a2afe8b538c45b1e4d20ed31d3eee1c9dbdd7b Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Thu, 9 May 2024 13:54:42 +0200 Subject: [PATCH 28/29] [graph provision] fixed XML serialization of the usage counts measures, renamed workflow actions to better reflect their role --- ...erterJob.java => PayloadConverterJob.java} | 16 +-- .../model/ProvisionModelSupport.java | 11 +- .../oa/provision/utils/XmlRecordFactory.java | 110 ++++++++++-------- .../utils/XmlSerializationUtils.java | 33 ++++++ ...on => input_params_payload_converter.json} | 0 .../dhp/oa/provision/oozie_app/workflow.xml | 2 +- .../dhp/oa/provision/EOSCFuture_Test.java | 2 +- .../provision/IndexRecordTransformerTest.java | 6 +- .../oa/provision/XmlRecordFactoryTest.java | 14 +-- 9 files changed, 120 insertions(+), 74 deletions(-) rename dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/{XmlConverterJob.java => PayloadConverterJob.java} (92%) rename dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/{input_params_xml_converter.json => input_params_payload_converter.json} (100%) diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/XmlConverterJob.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java similarity index 92% rename from dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/XmlConverterJob.java rename to dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java index 4353e863f..f34caad75 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/XmlConverterJob.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java @@ -3,24 +3,16 @@ package eu.dnetlib.dhp.oa.provision; import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession; import static eu.dnetlib.dhp.utils.DHPUtils.toSeq; -import static org.apache.spark.sql.functions.*; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.compress.GzipCodec; -import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.spark.SparkConf; import org.apache.spark.SparkContext; -import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.function.MapFunction; -import org.apache.spark.api.java.function.PairFunction; import org.apache.spark.sql.*; -import org.apache.spark.sql.expressions.UserDefinedFunction; -import org.apache.spark.sql.types.DataTypes; import org.apache.spark.util.LongAccumulator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,9 +37,9 @@ import scala.Tuple2; /** * XmlConverterJob converts the JoinedEntities as XML records */ -public class XmlConverterJob { +public class PayloadConverterJob { - private static final Logger log = LoggerFactory.getLogger(XmlConverterJob.class); + private static final Logger log = LoggerFactory.getLogger(PayloadConverterJob.class); public static final String schemaLocation = "https://www.openaire.eu/schema/1.0/oaf-1.0.xsd"; @@ -56,8 +48,8 @@ public class XmlConverterJob { final ArgumentApplicationParser parser = new ArgumentApplicationParser( IOUtils .toString( - XmlConverterJob.class - .getResourceAsStream("/eu/dnetlib/dhp/oa/provision/input_params_xml_converter.json"))); + PayloadConverterJob.class + .getResourceAsStream("/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json"))); parser.parseArgument(args); final Boolean isSparkSessionManaged = Optional diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java index 10a99704c..a085a72e0 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/model/ProvisionModelSupport.java @@ -19,8 +19,10 @@ import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup; import eu.dnetlib.dhp.common.vocabulary.VocabularyTerm; import eu.dnetlib.dhp.oa.provision.utils.ContextDef; import eu.dnetlib.dhp.oa.provision.utils.ContextMapper; +import eu.dnetlib.dhp.schema.common.ModelConstants; import eu.dnetlib.dhp.schema.common.ModelSupport; import eu.dnetlib.dhp.schema.oaf.*; +import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory; import eu.dnetlib.dhp.schema.solr.*; import eu.dnetlib.dhp.schema.solr.AccessRight; import eu.dnetlib.dhp.schema.solr.Author; @@ -66,7 +68,11 @@ public class ProvisionModelSupport { .setHeader( SolrRecordHeader .newInstance( - e.getId(), e.getOriginalId(), type, deletedbyinference)); + StringUtils + .substringAfter( + e.getId(), + IdentifierFactory.ID_PREFIX_SEPARATOR), + e.getOriginalId(), type, deletedbyinference)); r.setCollectedfrom(asProvenance(e.getCollectedfrom())); r.setContext(asContext(e.getContext(), contextMapper)); r.setPid(asPid(e.getPid())); @@ -106,7 +112,8 @@ public class ProvisionModelSupport { .newInstance( relation.getRelType(), relation.getRelClass(), - relation.getTarget(), relatedRecordType)); + StringUtils.substringAfter(relation.getTarget(), IdentifierFactory.ID_PREFIX_SEPARATOR), + relatedRecordType)); rr.setAcronym(re.getAcronym()); rr.setCode(re.getCode()); diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlRecordFactory.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlRecordFactory.java index 63597c61e..65fa122c8 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlRecordFactory.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlRecordFactory.java @@ -1,25 +1,23 @@ package eu.dnetlib.dhp.oa.provision.utils; -import static eu.dnetlib.dhp.oa.provision.utils.GraphMappingUtils.authorPidTypes; -import static eu.dnetlib.dhp.oa.provision.utils.GraphMappingUtils.getRelDescriptor; -import static org.apache.commons.lang3.StringUtils.isNotBlank; -import static org.apache.commons.lang3.StringUtils.substringBefore; - -import java.io.IOException; -import java.io.Serializable; -import java.io.StringReader; -import java.io.StringWriter; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.*; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import javax.xml.transform.*; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; - +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.mycila.xmltool.XMLDoc; +import com.mycila.xmltool.XMLTag; +import eu.dnetlib.dhp.oa.provision.model.JoinedEntity; +import eu.dnetlib.dhp.oa.provision.model.RelatedEntity; +import eu.dnetlib.dhp.oa.provision.model.RelatedEntityWrapper; +import eu.dnetlib.dhp.oa.provision.model.XmlInstance; +import eu.dnetlib.dhp.schema.common.*; +import eu.dnetlib.dhp.schema.oaf.Result; +import eu.dnetlib.dhp.schema.oaf.*; +import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory; +import eu.dnetlib.dhp.schema.oaf.utils.ModelHardLimits; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; @@ -31,27 +29,26 @@ import org.dom4j.Node; import org.dom4j.io.OutputFormat; import org.dom4j.io.SAXReader; import org.dom4j.io.XMLWriter; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.mycila.xmltool.XMLDoc; -import com.mycila.xmltool.XMLTag; - -import eu.dnetlib.dhp.oa.provision.model.JoinedEntity; -import eu.dnetlib.dhp.oa.provision.model.RelatedEntity; -import eu.dnetlib.dhp.oa.provision.model.RelatedEntityWrapper; -import eu.dnetlib.dhp.oa.provision.model.XmlInstance; -import eu.dnetlib.dhp.schema.common.*; -import eu.dnetlib.dhp.schema.oaf.*; -import eu.dnetlib.dhp.schema.oaf.Result; -import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory; -import eu.dnetlib.dhp.schema.oaf.utils.ModelHardLimits; import scala.Tuple2; +import javax.xml.transform.*; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; +import java.io.IOException; +import java.io.Serializable; +import java.io.StringReader; +import java.io.StringWriter; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static eu.dnetlib.dhp.oa.provision.utils.GraphMappingUtils.authorPidTypes; +import static eu.dnetlib.dhp.oa.provision.utils.GraphMappingUtils.getRelDescriptor; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.commons.lang3.StringUtils.substringBefore; + public class XmlRecordFactory implements Serializable { /** @@ -93,10 +90,13 @@ public class XmlRecordFactory implements Serializable { } public String build(final JoinedEntity je) { + return build(je, false); + } + + public String build(final JoinedEntity je, final Boolean validate) { final Set contexts = Sets.newHashSet(); - // final OafEntity entity = toOafEntity(je.getEntity()); final OafEntity entity = je.getEntity(); final TemplateFactory templateFactory = new TemplateFactory(); try { @@ -122,8 +122,14 @@ public class XmlRecordFactory implements Serializable { .buildBody( mainType, metadata, relations, listChildren(entity, je, templateFactory), listExtraInfo(entity)); - return templateFactory.buildRecord(entity, schemaLocation, body); - // return printXML(templateFactory.buildRecord(entity, schemaLocation, body), indent); + String xmlRecord = templateFactory.buildRecord(entity, schemaLocation, body); + + if (Boolean.TRUE.equals(validate)) { + // rise an exception when an invalid record was built + new SAXReader().read(new StringReader(xmlRecord)); + } + return xmlRecord; + // return printXML(templateFactory.buildRecord(entity, schemaLocation, body), indent); } catch (final Throwable e) { throw new RuntimeException(String.format("error building record '%s'", entity.getId()), e); } @@ -1038,13 +1044,21 @@ public class XmlRecordFactory implements Serializable { } private List measuresAsXml(List measures) { - return measures - .stream() - .map(m -> { - List> l = Lists.newArrayList(new Tuple2<>("id", m.getId())); - m.getUnit().forEach(kv -> l.add(new Tuple2<>(kv.getKey(), kv.getValue()))); - return XmlSerializationUtils.asXmlElement("measure", l); - }) + return Stream + .concat( + measures + .stream() + .filter(m -> !"downloads".equals(m.getId()) && !"views".equals(m.getId())) + .map(m -> { + List> l = Lists.newArrayList(new Tuple2<>("id", m.getId())); + m.getUnit().forEach(kv -> l.add(new Tuple2<>(kv.getKey(), kv.getValue()))); + return XmlSerializationUtils.asXmlElement("measure", l); + }), + measures + .stream() + .filter(m -> "downloads".equals(m.getId()) || "views".equals(m.getId())) + .filter(m -> m.getUnit().stream().anyMatch(u -> Integer.parseInt(u.getValue()) > 0)) + .map(m -> XmlSerializationUtils.usageMeasureAsXmlElement("measure", m))) .collect(Collectors.toList()); } diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlSerializationUtils.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlSerializationUtils.java index deacac3ad..31763ace3 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlSerializationUtils.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/utils/XmlSerializationUtils.java @@ -5,7 +5,11 @@ import static eu.dnetlib.dhp.oa.provision.utils.GraphMappingUtils.removePrefix; import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isNotBlank; +import java.util.HashSet; import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -166,6 +170,35 @@ public class XmlSerializationUtils { return sb.toString(); } + // infrastruct_::f66f1bd369679b5b077dcdf006089556||OpenAIRE + public static String usageMeasureAsXmlElement(String name, Measure measure) { + HashSet dsIds = Optional + .ofNullable(measure.getUnit()) + .map( + m -> m + .stream() + .map(KeyValue::getKey) + .collect(Collectors.toCollection(HashSet::new))) + .orElse(new HashSet<>()); + + StringBuilder sb = new StringBuilder(); + dsIds.forEach(dsId -> { + sb + .append("<") + .append(name); + for (KeyValue kv : measure.getUnit()) { + sb.append(" ").append(attr(measure.getId(), kv.getValue())); + } + sb + .append(">") + .append(dsId) + .append(""); + }); + return sb.toString(); + } + public static String mapEoscIf(EoscIfGuidelines e) { return asXmlElement( "eoscifguidelines", Lists diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_xml_converter.json b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json similarity index 100% rename from dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_xml_converter.json rename to dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml index 1fc28e7ca..59058d467 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml @@ -594,7 +594,7 @@ yarn cluster create_payloads - eu.dnetlib.dhp.oa.provision.XmlConverterJob + eu.dnetlib.dhp.oa.provision.PayloadConverterJob dhp-graph-provision-${projectVersion}.jar --executor-cores=${sparkExecutorCores} diff --git a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/EOSCFuture_Test.java b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/EOSCFuture_Test.java index 1a982ca39..4c43de25c 100644 --- a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/EOSCFuture_Test.java +++ b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/EOSCFuture_Test.java @@ -50,7 +50,7 @@ public class EOSCFuture_Test { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final OtherResearchProduct p = OBJECT_MAPPER .readValue( diff --git a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/IndexRecordTransformerTest.java b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/IndexRecordTransformerTest.java index 8d5aa3f3a..718b43f03 100644 --- a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/IndexRecordTransformerTest.java +++ b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/IndexRecordTransformerTest.java @@ -57,7 +57,7 @@ public class IndexRecordTransformerTest { public void testPublicationRecordTransformation() throws IOException, TransformerException { final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = load("publication.json", Publication.class); final Project pj = load("project.json", Project.class); @@ -82,7 +82,7 @@ public class IndexRecordTransformerTest { void testPeerReviewed() throws IOException, TransformerException { final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = load("publication.json", Publication.class); @@ -98,7 +98,7 @@ public class IndexRecordTransformerTest { public void testRiunet() throws IOException, TransformerException { final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = load("riunet.json", Publication.class); diff --git a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/XmlRecordFactoryTest.java b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/XmlRecordFactoryTest.java index f26c384d2..d617991a1 100644 --- a/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/XmlRecordFactoryTest.java +++ b/dhp-workflows/dhp-graph-provision/src/test/java/eu/dnetlib/dhp/oa/provision/XmlRecordFactoryTest.java @@ -37,7 +37,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = OBJECT_MAPPER .readValue(IOUtils.toString(getClass().getResourceAsStream("publication.json")), Publication.class); @@ -105,7 +105,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = OBJECT_MAPPER .readValue(IOUtils.toString(getClass().getResourceAsStream("publication.json")), Publication.class); @@ -136,7 +136,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = OBJECT_MAPPER .readValue(IOUtils.toString(getClass().getResourceAsStream("publication.json")), Publication.class); @@ -166,7 +166,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Datasource d = OBJECT_MAPPER .readValue(IOUtils.toString(getClass().getResourceAsStream("datasource.json")), Datasource.class); @@ -203,7 +203,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final OtherResearchProduct p = OBJECT_MAPPER .readValue( @@ -226,7 +226,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final OtherResearchProduct p = OBJECT_MAPPER .readValue( @@ -249,7 +249,7 @@ public class XmlRecordFactoryTest { final ContextMapper contextMapper = new ContextMapper(); final XmlRecordFactory xmlRecordFactory = new XmlRecordFactory(contextMapper, false, - XmlConverterJob.schemaLocation); + PayloadConverterJob.schemaLocation); final Publication p = OBJECT_MAPPER .readValue( From 55f39f785094f6500171d06945b3e5fcfc479a4c Mon Sep 17 00:00:00 2001 From: Claudio Atzori Date: Thu, 9 May 2024 14:06:04 +0200 Subject: [PATCH 29/29] [graph provision] adds the possibility to validate the XML records before storing them via the validateXML parameter --- .../dhp/oa/provision/PayloadConverterJob.java | 17 ++++++++++++----- .../input_params_payload_converter.json | 6 ++++++ .../dhp/oa/provision/oozie_app/workflow.xml | 6 ++++++ 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java index f34caad75..d7e22e557 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java +++ b/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/PayloadConverterJob.java @@ -64,6 +64,12 @@ public class PayloadConverterJob { final String outputPath = parser.get("outputPath"); log.info("outputPath: {}", outputPath); + final Boolean validateXML = Optional + .ofNullable(parser.get("validateXML")) + .map(Boolean::valueOf) + .orElse(Boolean.FALSE); + log.info("validateXML: {}", validateXML); + final String contextApiBaseUrl = parser.get("contextApiBaseUrl"); log.info("contextApiBaseUrl: {}", contextApiBaseUrl); @@ -78,18 +84,19 @@ public class PayloadConverterJob { runWithSparkSession(conf, isSparkSessionManaged, spark -> { removeOutputDir(spark, outputPath); - convertToXml( + createPayloads( spark, inputPath, outputPath, ContextMapper.fromAPI(contextApiBaseUrl), - VocabularyGroup.loadVocsFromIS(isLookup)); + VocabularyGroup.loadVocsFromIS(isLookup), validateXML); }); } - private static void convertToXml( + private static void createPayloads( final SparkSession spark, final String inputPath, final String outputPath, final ContextMapper contextMapper, - final VocabularyGroup vocabularies) { + final VocabularyGroup vocabularies, + final Boolean validateXML) { final XmlRecordFactory recordFactory = new XmlRecordFactory( prepareAccumulators(spark.sparkContext()), @@ -110,7 +117,7 @@ public class PayloadConverterJob { .as(Encoders.kryo(JoinedEntity.class)) .map( (MapFunction>) je -> new Tuple2<>( - recordFactory.build(je), + recordFactory.build(je, validateXML), ProvisionModelSupport.transform(je, contextMapper, vocabularies)), Encoders.tuple(Encoders.STRING(), Encoders.bean(SolrRecord.class))) .map( diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json index 4509eb9de..1b43ca5fd 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/input_params_payload_converter.json @@ -22,5 +22,11 @@ "paramLongName": "isLookupUrl", "paramDescription": "URL of the context ISLookup Service", "paramRequired": true + }, + { + "paramName": "val", + "paramLongName": "validateXML", + "paramDescription": "should the process check the XML validity", + "paramRequired": false } ] diff --git a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml index 59058d467..1682f2ed5 100644 --- a/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml +++ b/dhp-workflows/dhp-graph-provision/src/main/resources/eu/dnetlib/dhp/oa/provision/oozie_app/workflow.xml @@ -13,6 +13,11 @@ contextApiBaseUrl context API URL + + validateXML + should the payload converter validate the XMLs + false + relPartitions number or partitions for the relations Dataset @@ -610,6 +615,7 @@ --inputPath/user/claudio.atzori/data/beta_provision/join_entities --outputPath${workingDir}/xml_json + --validateXML${validateXML} --contextApiBaseUrl${contextApiBaseUrl} --isLookupUrl${isLookupUrl}