From 1ee0e95090cc24d497e6cd7f53912ba5bf14255f Mon Sep 17 00:00:00 2001 From: Mauro Mugnaini Date: Fri, 6 Nov 2020 14:30:06 +0100 Subject: [PATCH] Added role for mysql DB (mariadb) and init scripts (still TBT). Changes to conductor setup generalizing vars to JDBC. Also added cluster-replacement version. --- ansible/roles/conductor/tasks/main.yaml | 12 +- .../templates/conductor-db-init-mysql.sql | 242 ++++++++++++++++++ .../conductor-swarm-config.properties.j2 | 10 +- ansible/roles/mysql/defaults/main.yml | 10 + ansible/roles/mysql/tasks/main.yaml | 5 + .../roles/mysql/templates/mysql-swarm.yaml.j2 | 31 +++ ansible/roles/postgres/defaults/main.yml | 10 +- .../postgres/templates/postgres-swarm.yaml.j2 | 6 +- .../site-with-mysql-cluster-replacement.yaml | 34 +++ ansible/site-with-mysql.yaml | 25 ++ ...ite-with-postgres-cluster-replacement.yaml | 2 +- ansible/site-with-postgres.yaml | 2 +- 12 files changed, 369 insertions(+), 20 deletions(-) create mode 100644 ansible/roles/conductor/templates/conductor-db-init-mysql.sql create mode 100644 ansible/roles/mysql/defaults/main.yml create mode 100644 ansible/roles/mysql/tasks/main.yaml create mode 100644 ansible/roles/mysql/templates/mysql-swarm.yaml.j2 create mode 100644 ansible/site-with-mysql-cluster-replacement.yaml create mode 100644 ansible/site-with-mysql.yaml diff --git a/ansible/roles/conductor/tasks/main.yaml b/ansible/roles/conductor/tasks/main.yaml index a8e0481..89efe90 100644 --- a/ansible/roles/conductor/tasks/main.yaml +++ b/ansible/roles/conductor/tasks/main.yaml @@ -13,22 +13,22 @@ dest: "{{ target_path }}/conductor-swarm.yaml" - name: Generate conductor config from dynomite seeds - when: not use_postgres + when: not use_jdbc vars: seeds: "{{ lookup('file', '{{ target_path}}/seeds.list').splitlines() }}" template: src: "templates/{{ conductor_config_template }}" dest: "{{ target_path }}/{{ conductor_config }}" -- name: Generate conductor config for postgres DB - when: use_postgres +- name: Generate conductor config for JDBC DB + when: use_jdbc template: src: "templates/{{ conductor_config_template }}" dest: "{{ target_path }}/{{ conductor_config }}" -- name: Copy conductor SQL schema init for postgres DB - when: use_postgres and init_db +- name: Copy conductor SQL schema init for JDBC DB + when: use_jdbc and init_db template: - src: "templates/conductor-db-init-postgres.sql" + src: "templates/conductor-db-init-{{ conductor_db }}.sql" dest: "{{ target_path }}/conductor-db-init.sql" diff --git a/ansible/roles/conductor/templates/conductor-db-init-mysql.sql b/ansible/roles/conductor/templates/conductor-db-init-mysql.sql new file mode 100644 index 0000000..9884820 --- /dev/null +++ b/ansible/roles/conductor/templates/conductor-db-init-mysql.sql @@ -0,0 +1,242 @@ +-- V1__initial_schema.sql -- +-- -------------------------------------------------------------------------------------------------------------- +-- SCHEMA FOR METADATA DAO +-- -------------------------------------------------------------------------------------------------------------- + +CREATE TABLE meta_event_handler ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + name varchar(255) NOT NULL, + event varchar(255) NOT NULL, + active boolean NOT NULL, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + KEY event_handler_name_index (name), + KEY event_handler_event_index (event) +); + +CREATE TABLE meta_task_def ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + name varchar(255) NOT NULL, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_task_def_name (name) +); + +CREATE TABLE meta_workflow_def ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + name varchar(255) NOT NULL, + version int(11) NOT NULL, + latest_version int(11) NOT NULL DEFAULT 0, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_name_version (name,version), + KEY workflow_def_name_index (name) +); + +-- -------------------------------------------------------------------------------------------------------------- +-- SCHEMA FOR EXECUTION DAO +-- -------------------------------------------------------------------------------------------------------------- + +CREATE TABLE event_execution ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + event_handler_name varchar(255) NOT NULL, + event_name varchar(255) NOT NULL, + message_id varchar(255) NOT NULL, + execution_id varchar(255) NOT NULL, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_event_execution (event_handler_name,event_name,message_id) +); + +CREATE TABLE poll_data ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + queue_name varchar(255) NOT NULL, + domain varchar(255) NOT NULL, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_poll_data (queue_name,domain), + KEY (queue_name) +); + +CREATE TABLE task_scheduled ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_id varchar(255) NOT NULL, + task_key varchar(255) NOT NULL, + task_id varchar(255) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_workflow_id_task_key (workflow_id,task_key) +); + +CREATE TABLE task_in_progress ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + task_def_name varchar(255) NOT NULL, + task_id varchar(255) NOT NULL, + workflow_id varchar(255) NOT NULL, + in_progress_status boolean NOT NULL DEFAULT false, + PRIMARY KEY (id), + UNIQUE KEY unique_task_def_task_id1 (task_def_name,task_id) +); + +CREATE TABLE task ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + task_id varchar(255) NOT NULL, + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_task_id (task_id) +); + +CREATE TABLE workflow ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_id varchar(255) NOT NULL, + correlation_id varchar(255), + json_data mediumtext NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_workflow_id (workflow_id) +); + +CREATE TABLE workflow_def_to_workflow ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_def varchar(255) NOT NULL, + date_str integer NOT NULL, + workflow_id varchar(255) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_workflow_def_date_str (workflow_def,date_str,workflow_id) +); + +CREATE TABLE workflow_pending ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_type varchar(255) NOT NULL, + workflow_id varchar(255) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_workflow_type_workflow_id (workflow_type,workflow_id), + KEY workflow_type_index (workflow_type) +); + +CREATE TABLE workflow_to_task ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_id varchar(255) NOT NULL, + task_id varchar(255) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_workflow_to_task_id (workflow_id,task_id), + KEY workflow_id_index (workflow_id) +); + +-- -------------------------------------------------------------------------------------------------------------- +-- SCHEMA FOR QUEUE DAO +-- -------------------------------------------------------------------------------------------------------------- + +CREATE TABLE queue ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + queue_name varchar(255) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY unique_queue_name (queue_name) +); + +CREATE TABLE queue_message ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + queue_name varchar(255) NOT NULL, + message_id varchar(255) NOT NULL, + popped boolean DEFAULT false, + offset_time_seconds long, + payload mediumtext, + PRIMARY KEY (id), + UNIQUE KEY unique_queue_name_message_id (queue_name,message_id), + KEY combo_queue_message (queue_name,popped,deliver_on,created_on) +); + +-- V2__queue_message_timestamps.sql -- +ALTER TABLE `queue_message` CHANGE `created_on` `created_on` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP; +ALTER TABLE `queue_message` CHANGE `deliver_on` `deliver_on` TIMESTAMP DEFAULT CURRENT_TIMESTAMP; + +-- V3__queue_add_priority.sql -- +SET @dbname = DATABASE(); +SET @tablename = "queue_message"; +SET @columnname = "priority"; +SET @preparedStatement = (SELECT IF( + ( + SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS + WHERE + (table_name = @tablename) + AND (table_schema = @dbname) + AND (column_name = @columnname) + ) > 0, + "SELECT 1", + CONCAT("ALTER TABLE ", @tablename, " ADD ", @columnname, " TINYINT DEFAULT 0 AFTER `message_id`") +)); +PREPARE addColumnIfNotExist FROM @preparedStatement; +EXECUTE addColumnIfNotExist; +DEALLOCATE PREPARE addColumnIfNotExist; + +-- V4__1009_Fix_MySQLExecutionDAO_Index.sql -- +# Drop the 'unique_event_execution' index if it exists +SET @exist := (SELECT COUNT(INDEX_NAME) + FROM information_schema.STATISTICS + WHERE `TABLE_NAME` = 'event_execution' + AND `INDEX_NAME` = 'unique_event_execution' + AND TABLE_SCHEMA = database()); +SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `event_execution` DROP INDEX `unique_event_execution`', + 'SELECT ''INFO: Index already exists.'''); +PREPARE stmt FROM @sqlstmt; +EXECUTE stmt; + +# Create the 'unique_event_execution' index with execution_id column instead of 'message_id' so events can be executed multiple times. +ALTER TABLE `event_execution` + ADD CONSTRAINT `unique_event_execution` UNIQUE (event_handler_name, event_name, execution_id); + +-- V5__correlation_id_index.sql -- +# Drop the 'workflow_corr_id_index' index if it exists +SET @exist := (SELECT COUNT(INDEX_NAME) + FROM information_schema.STATISTICS + WHERE `TABLE_NAME` = 'workflow' + AND `INDEX_NAME` = 'workflow_corr_id_index' + AND TABLE_SCHEMA = database()); +SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `workflow` DROP INDEX `workflow_corr_id_index`', + 'SELECT ''INFO: Index already exists.'''); +PREPARE stmt FROM @sqlstmt; +EXECUTE stmt; + +# Create the 'workflow_corr_id_index' index with correlation_id column because correlation_id queries are slow in large databases. +CREATE INDEX workflow_corr_id_index ON workflow (correlation_id); + +-- V6__new_qm_index_with_priority.sql -- +# Drop the 'combo_queue_message' index if it exists +SET @exist := (SELECT COUNT(INDEX_NAME) + FROM information_schema.STATISTICS + WHERE `TABLE_NAME` = 'queue_message' + AND `INDEX_NAME` = 'combo_queue_message' + AND TABLE_SCHEMA = database()); +SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `queue_message` DROP INDEX `combo_queue_message`', + 'SELECT ''INFO: Index already exists.'''); +PREPARE stmt FROM @sqlstmt; +EXECUTE stmt; + +# Re-create the 'combo_queue_message' index to add priority column because queries that order by priority are slow in large databases. +CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on); + diff --git a/ansible/roles/conductor/templates/conductor-swarm-config.properties.j2 b/ansible/roles/conductor/templates/conductor-swarm-config.properties.j2 index 6f2cd8f..afbcb0b 100644 --- a/ansible/roles/conductor/templates/conductor-swarm-config.properties.j2 +++ b/ansible/roles/conductor/templates/conductor-swarm-config.properties.j2 @@ -8,11 +8,11 @@ conductor.grpc.server.enabled=false # memory : The data is stored in memory and lost when the server dies. Useful for testing or demo # redis : non-Dynomite based redis instance # dynomite : Dynomite cluster. Use this for HA configuration. -{% if use_postgres is defined and use_postgres %} -db=postgres -jdbc.url=jdbc:postgresql://{{ postgres_service_name }}:5432/{{ postgres_db }} -jdbc.username={{ postgres_user }} -jdbc.password={{ postgres_pass }} +{% if use_jdbc is defined and use_jdbc %} +db={{ conductor_db }} +jdbc.url={{ jdbc_url }} +jdbc.username={{ jdbc_user }} +jdbc.password={{ jdbc_pass }} conductor.postgres.connection.pool.size.max=10 conductor.postgres.connection.pool.idle.min=2 flyway.enabled=false diff --git a/ansible/roles/mysql/defaults/main.yml b/ansible/roles/mysql/defaults/main.yml new file mode 100644 index 0000000..fe78fbe --- /dev/null +++ b/ansible/roles/mysql/defaults/main.yml @@ -0,0 +1,10 @@ +--- +use_jdbc: True +mysql_image_name: 'mariadb' +mysql_service_name: 'mysqldb' +mysql_replicas: 1 +conductor_db: mysql +jdbc_user: conductor +jdbc_pass: password +jdbc_db: conductor +jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ jdbc_db }} diff --git a/ansible/roles/mysql/tasks/main.yaml b/ansible/roles/mysql/tasks/main.yaml new file mode 100644 index 0000000..780b145 --- /dev/null +++ b/ansible/roles/mysql/tasks/main.yaml @@ -0,0 +1,5 @@ +--- +- name: "Generate mysql swarm, image used: {{ mysql_image_nameĀ }}" + template: + src: templates/mysql-swarm.yaml.j2 + dest: "{{ target_path }}/mysql-swarm.yaml" diff --git a/ansible/roles/mysql/templates/mysql-swarm.yaml.j2 b/ansible/roles/mysql/templates/mysql-swarm.yaml.j2 new file mode 100644 index 0000000..8cbdddf --- /dev/null +++ b/ansible/roles/mysql/templates/mysql-swarm.yaml.j2 @@ -0,0 +1,31 @@ +version: '3.6' + +services: + + {{ mysql_service_name }}: + image: {{ mysql_image_nameĀ }} + ports: + - "3306:3306" + environment: + MYSQL_USER: "{{ jdbc_user }}" + MYSQL_PASSWORD: "{{ jdbc_pass }}" + MYSQL_DB: "{{ jdbc_db }}" +{% if init_db %} + configs: + - source: db-init + target: "/docker-entrypoint-initdb.d/db-init.sql" +{% endif %} + networks: + - {{ conductor_network }} + deploy: + replicas: {{ mysql_replicas }} + placement: + constraints: [node.role == worker] + +networks: + {{ conductor_network }}: +{% if init_db %} +configs: + db-init: + file: {{ target_path }}/conductor-db-init.sql +{% endif %} diff --git a/ansible/roles/postgres/defaults/main.yml b/ansible/roles/postgres/defaults/main.yml index c5cd30b..b215103 100644 --- a/ansible/roles/postgres/defaults/main.yml +++ b/ansible/roles/postgres/defaults/main.yml @@ -1,7 +1,9 @@ --- -use_postgres: True +use_jdbc: True postgres_service_name: 'postgresdb' postgres_replicas: 1 -postgres_user: conductor -postgres_pass: password -postgres_db: conductor +conductor_db: postgres +jdbc_user: conductor +jdbc_pass: password +jdbc_db: conductor +jdbc_url: jdbc:postgresql://{{ postgres_service_name }}:5432/{{ jdbc_db }} diff --git a/ansible/roles/postgres/templates/postgres-swarm.yaml.j2 b/ansible/roles/postgres/templates/postgres-swarm.yaml.j2 index 65b6e4b..0fbfbdd 100644 --- a/ansible/roles/postgres/templates/postgres-swarm.yaml.j2 +++ b/ansible/roles/postgres/templates/postgres-swarm.yaml.j2 @@ -7,9 +7,9 @@ services: ports: - "5432:5432" environment: - POSTGRES_USER: "{{ postgres_user }}" - POSTGRES_PASSWORD: "{{ postgres_pass }}" - POSTGRES_DB: "{{ postgres_db }}" + POSTGRES_USER: "{{ jdbc_user }}" + POSTGRES_PASSWORD: "{{ jdbc_pass }}" + POSTGRES_DB: "{{ jdbc_db }}" {% if init_db %} configs: - source: db-init diff --git a/ansible/site-with-mysql-cluster-replacement.yaml b/ansible/site-with-mysql-cluster-replacement.yaml new file mode 100644 index 0000000..a5e37b3 --- /dev/null +++ b/ansible/site-with-mysql-cluster-replacement.yaml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + roles: + - common + - cluster-replacement + - mysql + - elasticsearch + - conductor + tasks: + - name: Start mysql and es + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/mysql-swarm.yaml" + - "{{ target_path }}/elasticsearch-swarm.yaml" + - name: Waiting for mysql and es DBs + pause: + seconds: 10 + + - name: Start conductor + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/conductor-swarm.yaml" + + - name: Start haproxy + docker_stack: + name: haproxy + state: present + compose: + - "{{ target_path }}/haproxy-swarm.yaml" + diff --git a/ansible/site-with-mysql.yaml b/ansible/site-with-mysql.yaml new file mode 100644 index 0000000..f6bfd0d --- /dev/null +++ b/ansible/site-with-mysql.yaml @@ -0,0 +1,25 @@ +--- +- hosts: localhost + roles: + - common + - mysql + - elasticsearch + - conductor + tasks: + - name: Start {{ mysql_image }} and es + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/mysql-swarm.yaml" + - "{{ target_path }}/elasticsearch-swarm.yaml" + - name: Waiting for {{ mysql_image }} and es DBs + pause: + seconds: 10 + + - name: Start conductor + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/conductor-swarm.yaml" diff --git a/ansible/site-with-postgres-cluster-replacement.yaml b/ansible/site-with-postgres-cluster-replacement.yaml index d5b70fe..f79cce6 100644 --- a/ansible/site-with-postgres-cluster-replacement.yaml +++ b/ansible/site-with-postgres-cluster-replacement.yaml @@ -14,7 +14,7 @@ compose: - "{{ target_path }}/postgres-swarm.yaml" - "{{ target_path }}/elasticsearch-swarm.yaml" - - name: Waiting for postgres and ES DBs + - name: Waiting for postgres and es DBs pause: seconds: 10 diff --git a/ansible/site-with-postgres.yaml b/ansible/site-with-postgres.yaml index 3689d11..91775e4 100644 --- a/ansible/site-with-postgres.yaml +++ b/ansible/site-with-postgres.yaml @@ -13,7 +13,7 @@ compose: - "{{ target_path }}/postgres-swarm.yaml" - "{{ target_path }}/elasticsearch-swarm.yaml" - - name: Waiting for postgres and ES DBs + - name: Waiting for postgres and es DBs pause: seconds: 10