Browse Source

conductor 3.0.4 with oauth2 and pep

master
dcore94 8 months ago
parent
commit
288482d5b6
  1. 3
      group_vars/nw_cluster.yaml
  2. 3
      group_vars/pre_cluster.yaml
  3. 3
      group_vars/prod_cluster.yaml
  4. 6
      inventory/hosts.dev
  5. 5
      inventory/hosts.nw_cluster
  6. 14
      local-site/base-config.cfg
  7. 188
      local-site/conductor-db-init.sql
  8. 42
      local-site/conductor-swarm-config.properties
  9. 32
      local-site/conductor-workers-swarm.yaml
  10. 144
      local-site/conductor.yaml
  11. 20
      local-site/keycloak.js
  12. 22
      local-site/nginx.conf
  13. 78
      local-site/nginx.default.conf
  14. 4
      local-site/nginx.env
  15. 24
      local-site/oauth2auth.cfg
  16. 28
      local-site/pep.cfg
  17. 56
      roles/cluster-replacement/defaults/main.yml
  18. 16
      roles/cluster-replacement/tasks/main.yml
  19. 56
      roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2
  20. 75
      roles/cluster-replacement/templates/haproxy.cfg.j2
  21. 2
      roles/cluster-replacement/vars/main.yml
  22. 1
      roles/common/defaults/main.yaml
  23. 15
      roles/conductor/defaults/conductor_ui_secrets.yaml
  24. 9
      roles/conductor/defaults/main.yaml
  25. 20
      roles/conductor/tasks/main.yaml
  26. 23
      roles/conductor/templates/auth.cfg.j2
  27. 99
      roles/conductor/templates/conductor-swarm-config.properties.j2
  28. 40
      roles/conductor/templates/conductor-swarm.yaml.j2
  29. 0
      roles/conductor/templates/local_auth.cfg.j2
  30. 24
      roles/conductor/templates/oauth2_auth.cfg.j2
  31. 5
      roles/databases/templates/elasticsearch-swarm.yaml.j2
  32. 12
      roles/databases/templates/mysql-swarm.yaml.j2
  33. 15
      roles/databases/templates/postgres-swarm.yaml.j2
  34. 6
      roles/elasticsearch/templates/elasticsearch-swarm.yaml.j2
  35. 10
      roles/mysql/defaults/main.yml
  36. 5
      roles/mysql/tasks/main.yaml
  37. 30
      roles/mysql/templates/mysql-swarm.yaml.j2
  38. 2
      roles/pep/defaults/main.yaml
  39. 18
      roles/pep/defaults/pep_credentials.yaml
  40. 34
      roles/pep/tasks/main.yaml
  41. 98
      roles/pep/templates/config.js.j2
  42. 18
      roles/pep/templates/nginx.conf.j2
  43. 96
      roles/pep/templates/nginx.default.conf.j2
  44. 40
      roles/pep/templates/nginx.default.conf.nopep.j2
  45. 35
      roles/pep/templates/pep-swarm.yaml.j2
  46. 299
      roles/pep/templates/pep.js.j2
  47. 9
      roles/postgres/defaults/main.yml
  48. 5
      roles/postgres/tasks/main.yaml
  49. 31
      roles/postgres/templates/postgres-swarm.yaml.j2
  50. 3
      roles/workers/defaults/main.yaml
  51. 3
      roles/workers/templates/conductor-workers-swarm.yaml.j2
  52. 4
      roles/workers/templates/config.cfg.j2
  53. 21
      site-dev.yaml
  54. 52
      site-local.yaml
  55. 61
      site-nw-cluster.yaml
  56. 33
      site-pre.yaml
  57. 33
      site-prod.yaml
  58. 56
      site.yaml

3
group_vars/nw_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: dev
conductor_workers_server: http://conductor-dev.int.d4science.net/api

3
group_vars/pre_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: pre
conductor_workers_server: https://conductor.pre.d4science.org/api

3
group_vars/prod_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: prod
conductor_workers_server: https://conductor.d4science.org/api

6
inventory/hosts.dev

@ -1,5 +1,5 @@
[dev_infra:children]
nw_cluster
dev_cluster
[nw_cluster]
nubis1.int.d4science.net
[dev_cluster]
conductor.dev.d4science.org

5
inventory/hosts.nw_cluster

@ -0,0 +1,5 @@
[nw_cluster_infra:children]
nw_cluster
[nw_cluster]
nubis1.int.d4science.net

14
local-site/base-config.cfg

@ -1,14 +0,0 @@
[common]
loglevel = info
server = http://conductor-server:8080/api
threads = 1
pollrate = 1
[pymail]
server=smtp-relay.d4science.org
user=conductor_dev
password=d20d6ea975b01bc
protocol=starttls
port=587

188
local-site/conductor-db-init.sql

@ -1,188 +0,0 @@
-- V1__initial_schema.sql--
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR METADATA DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE meta_event_handler (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
event varchar(255) NOT NULL,
active boolean NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE INDEX event_handler_name_index ON meta_event_handler (name);
CREATE INDEX event_handler_event_index ON meta_event_handler (event);
CREATE TABLE meta_task_def (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_def_name ON meta_task_def (name);
CREATE TABLE meta_workflow_def (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
version int NOT NULL,
latest_version int NOT NULL DEFAULT 0,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_name_version ON meta_workflow_def (name,version);
CREATE INDEX workflow_def_name_index ON meta_workflow_def (name);
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR EXECUTION DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE event_execution (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
event_handler_name varchar(255) NOT NULL,
event_name varchar(255) NOT NULL,
message_id varchar(255) NOT NULL,
execution_id varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,message_id);
CREATE TABLE poll_data (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
domain varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_poll_data ON poll_data (queue_name,domain);
CREATE INDEX ON poll_data (queue_name);
CREATE TABLE task_scheduled (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
task_key varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_id_task_key ON task_scheduled (workflow_id,task_key);
CREATE TABLE task_in_progress (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
task_def_name varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
workflow_id varchar(255) NOT NULL,
in_progress_status boolean NOT NULL DEFAULT false,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_def_task_id1 ON task_in_progress (task_def_name,task_id);
CREATE TABLE task (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
task_id varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_id ON task (task_id);
CREATE TABLE workflow (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
correlation_id varchar(255),
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_id ON workflow (workflow_id);
CREATE TABLE workflow_def_to_workflow (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_def varchar(255) NOT NULL,
date_str varchar(60),
workflow_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_def_date_str ON workflow_def_to_workflow (workflow_def,date_str,workflow_id);
CREATE TABLE workflow_pending (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_type varchar(255) NOT NULL,
workflow_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_type_workflow_id ON workflow_pending (workflow_type,workflow_id);
CREATE INDEX workflow_type_index ON workflow_pending (workflow_type);
CREATE TABLE workflow_to_task (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_to_task_id ON workflow_to_task (workflow_id,task_id);
CREATE INDEX workflow_id_index ON workflow_to_task (workflow_id);
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR QUEUE DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE queue (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_queue_name ON queue (queue_name);
CREATE TABLE queue_message (
id SERIAL,
created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
message_id varchar(255) NOT NULL,
priority integer DEFAULT 0,
popped boolean DEFAULT false,
offset_time_seconds BIGINT,
payload TEXT,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_queue_name_message_id ON queue_message (queue_name,message_id);
CREATE INDEX combo_queue_message ON queue_message (queue_name,popped,deliver_on,created_on);
-- V2__1009_Fix_PostgresExecutionDAO_Index.sql --
DROP INDEX IF EXISTS unique_event_execution;
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,execution_id);
-- V3__correlation_id_index.sql --
DROP INDEX IF EXISTS workflow_corr_id_index;
CREATE INDEX workflow_corr_id_index ON workflow (correlation_id);
-- V4__new_qm_index_with_priority.sql --
DROP INDEX IF EXISTS combo_queue_message;
CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on);

42
local-site/conductor-swarm-config.properties

@ -1,42 +0,0 @@
# Servers.
conductor.jetty.server.enabled=true
conductor.grpc.server.enabled=false
# Database persistence model. Possible values are memory, redis, and dynomite.
# If ommitted, the persistence used is memory
#
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
# redis : non-Dynomite based redis instance
# dynomite : Dynomite cluster. Use this for HA configuration.
db=postgres
jdbc.url=jdbc:postgresql://postgresdb:5432/conductor
jdbc.username=conductor
jdbc.password=password
conductor.postgres.connection.pool.size.max=10
conductor.postgres.connection.pool.idle.min=2
flyway.enabled=false
# Elastic search instance type. Possible values are memory and external.
# If not specified, the instance type will be embedded in memory
#
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
# the server dies. Useful for more stable environments like staging or production.
workflow.elasticsearch.instanceType=external
# Transport address to elasticsearch
workflow.elasticsearch.url=elasticsearch:9300
# Name of the elasticsearch cluster
workflow.elasticsearch.index.name=conductor
# Additional modules (optional)
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
# Additional modules for metrics collection (optional)
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
# Load sample kitchen sink workflow
loadSample=false

32
local-site/conductor-workers-swarm.yaml

@ -1,32 +0,0 @@
version: '3.6'
services:
base:
environment:
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
configs:
- source: base-config
target: /app/config.cfg
image: 'nubisware/nubisware-conductor-worker-py-base'
networks:
- conductor-network
deploy:
mode: replicated
replicas: 2
placement:
constraints: [node.role == worker]
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
logging:
driver: "journald"
networks:
conductor-network:
configs:
base-config:
file: base-config.cfg

144
local-site/conductor.yaml

@ -1,144 +0,0 @@
version: '3.6'
services:
postgresdb:
image: postgres
environment:
POSTGRES_USER: "conductor"
POSTGRES_PASSWORD: "password"
POSTGRES_DB: "conductor"
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
networks:
- conductor-network
deploy:
replicas: 1
conductor-server:
environment:
- CONFIG_PROP=conductor-swarm-config.properties
image: nubisware/conductor-server
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 15s
max_attempts: 10
window: 120s
configs:
- source: swarm-config
target: /app/config/conductor-swarm-config.properties
# logging:
# driver: "journald"
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
- discovery.type=single-node
- xpack.security.enabled=false
networks:
conductor-network:
aliases:
- es
# logging:
# driver: "journald"
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
conductor-ui:
environment:
- WF_SERVER=http://conductor-server:8080/api/
- AUTH_CONFIG_PATH=/app/config/auth.config
#image: nubisware/conductor-ui
#image: nubisware/conductor-ui_oauth2:2.31
image: conductor-ui_oauth2:2.31
networks:
- conductor-network
configs:
- source: auth-config
target: /app/config/auth.config
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 15s
max_attempts: 10
window: 120s
base:
environment:
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
configs:
- source: base-config
target: /app/config.cfg
image: 'nubisware/nubisware-conductor-worker-py-base'
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
# logging:
# driver: "journald"
pep:
image: nginx:1.19.8-alpine
networks:
- conductor-network
ports:
- "80:80"
env_file:
- nginx.env
volumes:
- "${PWD}/keycloak.js:/etc/nginx/keycloak.js"
# to be uncommented for debug porposes
#command: [nginx-debug, '-g', 'daemon off;']
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
# max_attempts: 3
window: 120s
configs:
- source: nginxconf
target: /etc/nginx/templates/default.conf.template
- source: nginxbaseconf
target: /etc/nginx/nginx.conf
networks:
conductor-network:
configs:
swarm-config:
file: ./conductor-swarm-config.properties
auth-config:
file: ./oauth2auth.cfg
db-init:
file: ./conductor-db-init.sql
base-config:
file: base-config.cfg
nginxconf:
file: ${PWD}/nginx.default.conf
nginxbaseconf:
file: ${PWD}/nginx.conf

20
local-site/keycloak.js

@ -1,20 +0,0 @@
export default { introspectAccessToken };
function introspectAccessToken(r) {
r.error("Inside introspectAccessToken " + njs.dump(r.variables))
r.subrequest("/jwt_verify_request",
function(reply) {
if (reply.status == 200) {
var response = JSON.parse(reply.responseBody);
r.error("Response is " + reply.responseBody)
if (response.active == true) {
r.return(204); // Token is valid, return success code
} else {
r.return(403); // Token is invalid, return forbidden code
}
} else {
r.return(401); // Unexpected response, return 'auth required'
}
}
);
}

22
local-site/nginx.conf

@ -1,22 +0,0 @@
load_module modules/ngx_http_js_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
http {
js_import keycloak.js;
proxy_cache_path /var/cache/nginx/keycloak keys_zone=token_responses:1m max_size=2m;
# js_import json_log.js;
# js_set $json_debug_log json_log.debugLog;
# log_format access_debug escape=none $json_debug_log; # Offload to njs
# access_log /var/log/nginx/access.log access_debug;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

78
local-site/nginx.default.conf

@ -1,78 +0,0 @@
upstream _conductor-server {
ip_hash;
server conductor-server:8080;
}
upstream _conductor-ui {
ip_hash;
server conductor-ui:5000;
}
map $http_authorization $source_token {
default "";
"~*^Bearer\s+(?<token>[\S]+)$" $token;
}
server {
listen *:80;
listen [::]:80;
server_name conductor-server;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
#auth_request /jwt_verify;
proxy_pass http://_conductor-server;
}
location = /jwt_verify {
internal;
js_content keycloak.introspectAccessToken;
}
location /jwt_verify_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Host "127.0.0.1";
proxy_set_header Authorization "Basic Z2F5YV9wZXA6NWJiN2RjYWItN2NlNy00YTQ3LTlmNTUtZmE4MWFlYmNjM2I4";
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_set_body "token=$source_token&token_type_hint=access_token";
proxy_pass http://accounts.dev.d4science.org/auth/realms/master/protocol/openid-connect/token/introspect;
proxy_cache token_responses; # Enable caching
proxy_cache_key $source_token; # Cache for each access token
proxy_cache_lock on; # Duplicate tokens must wait
proxy_cache_valid 200 10s; # How long to use each response
proxy_ignore_headers Cache-Control Expires Set-Cookie;
}
}
server {
listen *:80 default_server;
listen [::]:80 default_server;
server_name conductor-ui;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
#auth_request /jwt_verify;
proxy_pass http://_conductor-ui;
}
}

4
local-site/nginx.env

@ -1,4 +0,0 @@
NGINX_PORT=80
CAMUNDA_PORT=8080
PGADMIN_PORT=80
KEYCLOAK_PORT=8080

24
local-site/oauth2auth.cfg

@ -1,24 +0,0 @@
{
"strategy": "oauth2",
"strategySettings": {
"authorizationURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/auth",
"tokenURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/token",
"clientID": "conductor-ui",
"clientSecret": "b10d40b3-1f3c-47ce-baf4-d58bf6386eb3",
"callbackURL": "http://localhost/login/callback",
"logoutURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/logout",
"logoutCallbackURL": "http://localhost/logout/callback",
"roles": [ "admin", "viewer" ]
},
"cookieSecret": "b10d40b3-1f3c-47ce-baf4-d58bf6386eb3",
"audit": true,
"acl": [
"POST /(.*) admin",
"PUT /(.*) admin",
"DELETE /(.*) admin",
"GET /api/(.*) *",
"GET /(.*) viewer,admin"
]
}

28
local-site/pep.cfg

@ -1,28 +0,0 @@
frontend http
bind *:80
mode http
option http-keep-alive
use_backend conductor-server_bck if { hdr_dom(host) -i conductor-server.local.net }
use_backend conductor-ui_bck if { hdr_dom(host) -i conductor-ui.local.net }
#
# Backends
#
backend conductor-server_bck
mode http
option httpchk
balance roundrobin
http-check send meth GET uri /api/health ver HTTP/1.1 hdr Host localhost
http-check expect rstatus (2|3)[0-9][0-9]
server-template conductor-server- 2 conductor-local_conductor-server:8080 check resolvers docker init-addr libc,none
backend conductor-ui_bck
mode http
option httpchk
balance roundrobin
http-check send meth GET uri / ver HTTP/1.1 hdr Host localhost
http-check expect rstatus (2|3)[0-9][0-9]
server-template conductor-ui- 2 conductor-local_conductor-ui:5000 check resolvers docker init-addr libc,none

56
roles/cluster-replacement/defaults/main.yml

@ -1,56 +0,0 @@
---
haproxy_latest_release: True
haproxy_version: 2.2
haproxy_repo_key: 'http://haproxy.debian.net/bernat.debian.org.gpg'
haproxy_debian_latest_repo: "deb http://haproxy.debian.net {{ ansible_lsb.codename }}-backports-{{ haproxy_version }} main"
haproxy_ubuntu_latest_repo: "ppa:vbernat/haproxy-{{ haproxy_version }}"
haproxy_pkg_state: present
haproxy_enabled: True
haproxy_loglevel: info
haproxy_k_bind_non_local_ip: True
haproxy_docker_container: False
haproxy_docker_version: '{{ haproxy_version }}.4'
haproxy_docker_image: 'haproxytech/haproxy-debian:{{ haproxy_version }}.4'
haproxy_docker_compose_dir: /srv/haproxy_swarm
haproxy_docker_restart_policy: 'on-failure'
haproxy_ha_with_keepalived: False
haproxy_docker_swarm_networks:
- '{{ docker_swarm_portainer_network }}'
haproxy_docker_swarm_additional_networks: []
haproxy_docker_swarm_haproxy_constraints:
- 'node.role == manager'
haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductorui-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }]
# - { acl_name: 'service', acl_rule: 'hdr_dom(host) -i service.example.com', stack_name: 'stack', service_name: 'service', service_replica_num: '1', service_port: '9999', service_overlay_network: 'service-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth HEAD uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]', allowed_networks: '192.168.1.0/24 192.168.2.0/24' }
haproxy_default_port: 80
haproxy_terminate_tls: False
haproxy_ssl_port: 443
haproxy_admin_port: 8880
haproxy_admin_socket: /run/haproxy/admin.sock
haproxy_install_additional_pkgs: False
haproxy_additional_pkgs:
- haproxyctl
- haproxy-log-analysis
haproxy_nagios_check: False
# It's a percentage
haproxy_nagios_check_w: 70
haproxy_nagios_check_c: 90
# Used by some other role as defaults, eg docker-swarm
haproxy_spread_checks: 5
haproxy_connect_timeout: 10s
haproxy_client_timeout: 120s
haproxy_server_timeout: 480s
haproxy_global_keepalive_timeout: 10s
haproxy_client_keepalive_timeout: 5184000s
haproxy_backend_maxconn: 2048
haproxy_check_interval: 3s
haproxy_check_timeout: 2s
haproxy_maxconns: 4096
haproxy_sysctl_conntrack_max: 131072

16
roles/cluster-replacement/tasks/main.yml

@ -1,16 +0,0 @@
---
- name: Generate haproxy config
template:
src: templates/haproxy.cfg.j2
dest: "{{ target_path }}/haproxy.cfg"
- name: Generate haproxy-docker-swarm
template:
src: templates/haproxy-docker-swarm.yaml.j2
dest: "{{ target_path }}/haproxy-swarm.yaml"
- name: Create the overlay network that will be joined by the proxied services
docker_network:
name: '{{ haproxy_docker_overlay_network }}'
driver: overlay
scope: swarm

56
roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2

@ -1,56 +0,0 @@
version: '3.6'
services:
haproxy:
image: {{ haproxy_docker_image }}
configs:
- source: haproxy-config
target: /usr/local/etc/haproxy/haproxy.cfg
networks:
- {{ haproxy_docker_overlay_network }}
volumes:
#- /etc/haproxy:/usr/local/etc/haproxy:ro
- /var/run/docker.sock:/var/run/docker.sock
ports:
- target: {{ haproxy_default_port }}
published: {{ haproxy_default_port }}
protocol: tcp
mode: host
- target: {{ haproxy_ssl_port }}
published: {{ haproxy_ssl_port }}
protocol: tcp
mode: host
- target: {{ haproxy_admin_port }}
published: {{ haproxy_admin_port }}
protocol: tcp
mode: host
dns: [127.0.0.11]
deploy:
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 20s
placement:
constraints:
- "node.role==manager"
restart_policy:
condition: {{ haproxy_docker_restart_policy}}
delay: 20s
max_attempts: 5
window: 120s
resources:
limits:
cpus: '2.0'
memory: 768M
reservations:
cpus: '1.0'
memory: 384M
logging:
driver: 'journald'
configs:
haproxy-config:
file: ./haproxy.cfg
networks:
{{ haproxy_docker_overlay_network }}:
external: true

75
roles/cluster-replacement/templates/haproxy.cfg.j2

@ -1,75 +0,0 @@
global
log fd@2 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
stats socket /var/lib/haproxy/stats expose-fd listeners
master-worker
resolvers docker
nameserver dns1 127.0.0.11:53
resolve_retries 3
timeout resolve 1s
timeout retry 1s
hold other 10s
hold refused 10s
hold nx 10s
hold timeout 10s
hold valid 10s
hold obsolete 10s
defaults
timeout connect 10s
timeout client 30s
timeout server 30s
log global
monitor-uri /_haproxy_health_check
timeout http-keep-alive {{ haproxy_global_keepalive_timeout }}
timeout connect {{ haproxy_connect_timeout }}
timeout client {{ haproxy_client_timeout }}
timeout server {{ haproxy_server_timeout }}
timeout check {{ haproxy_check_timeout }}
timeout http-request 10s # slowloris protection
default-server inter 3s fall 2 rise 2 slowstart 60s
# Needed to preserve the stick tables
peers mypeers
peer local_haproxy 127.0.0.1:1024
frontend http
bind *:{{ haproxy_default_port }}
mode http
option http-keep-alive
{% for srv in haproxy_docker_swarm_additional_services %}
use_backend {{ srv.acl_name }}_bck if { {{ srv.acl_rule }} }
{% endfor %}
#
# Backends
#
{% for srv in haproxy_docker_swarm_additional_services %}
backend {{ srv.acl_name }}_bck
mode http
option httpchk
balance {{ srv.balance_type | default('roundrobin') }}
{% if srv.http_check_enabled is defined and srv.http_check_enabled %}
http-check send {{ srv.http_check }}
http-check expect {{ srv.http_check_expect }}
{% endif %}
{% if srv.stick_sessions %}
{% if srv.stick_on_cookie %}
cookie {{ srv.stick_cookie }}
{% else %}
stick on src
stick-table {{ srv.stick_table }}
{% endif %}
{% endif %}
server-template {{ srv.service_name }}- {{ srv.service_replica_num }} {{ srv.stack_name }}_{{ srv.service_name }}:{{ srv.service_port }} {{ srv.backend_options | default('') }} check resolvers docker init-addr libc,none
{% endfor %}

2
roles/cluster-replacement/vars/main.yml

@ -1,2 +0,0 @@
---
haproxy_docker_overlay_network: 'haproxy-public'

1
roles/common/defaults/main.yaml

@ -1,4 +1,5 @@
---
conductor_server: http://conductor-server:8080/api
target_path: "/tmp/conductor_stack"
conductor_network: conductor-network
conductor_db: postgres

15
roles/conductor/defaults/conductor_ui_secrets.yaml

@ -0,0 +1,15 @@
$ANSIBLE_VAULT;1.1;AES256
64626566636365626537356334643266623431393062653538313362663664643538373137383732
3865313539323962666165386336373633303066353634350a633231666363356238326130373561
64336633343066323464343136336333613233396164623537623762323261383537633137363234
3534616537666436370a326331316232613839656436646164343236356233646233623430623665
64663265313964653063333133326636353162353532626364316433373030396434616434333631
32333936666339303963636438616164343063393364666332323831363833323131653666303534
62376162313737303036366532316163383434333130643363613166333433616331393636613635
30613132636261613165613136356638353532663634393431383739363636323961323538383566
62316261373262663335393632376366383031306563343531643632633234346531633164303038
62633039363961613538393832623039383237623663366430313238653030376263613032663437
38386635303332386630386133366232343966393761643635313833316536386634633563326639
38626435393963643866363663353834343333346139363565353161663737393166613938353562
62623661326237353163623138386432376531353864383036613931643164333633646431353162
3666373032663262623438353236626436303132306436326636

9
roles/conductor/defaults/main.yaml

@ -1,5 +1,12 @@
---
conductor_replicas: 2
conductor_replicas: 1
conductor_ui_replicas: 1
conductor_image: nubisware/conductor-server:3.0.4
conductor_ui_image: nubisware/conductor-ui-oauth2:3.0.4
conductor_config: conductor-swarm-config.properties
conductor_config_template: "{{ conductor_config }}.j2"
#nw_cluster_conductor_ui_secret: in vault
#dev_conductor_ui_secret: in vault
#pre_conductor_ui_secret: in vault
#prod_conductor_ui_secret: in vault

20
roles/conductor/tasks/main.yaml

@ -4,28 +4,14 @@
src: templates/conductor-swarm.yaml.j2
dest: "{{ target_path }}/conductor-swarm.yaml"
- name: Generate auth config
- name: Generate local auth config
when: conductor_auth is defined
template:
src: templates/auth.cfg.j2
src: "templates/{{ conductor_auth }}_auth.cfg.j2"
dest: "{{ target_path }}/auth.cfg"
- name: Generate conductor config from dynomite seeds
when: conductor_db is defined and conductor_db == 'dynomite'
vars:
seeds: "{{ lookup('file', '{{ target_path}}/seeds.list').splitlines() }}"
template:
src: "templates/{{ conductor_config_template }}"
dest: "{{ target_path }}/{{ conductor_config }}"
- name: Generate conductor config for JDBC DB
when: conductor_db is not defined or conductor_db != 'dynomite'
template:
src: "templates/{{ conductor_config_template }}"
dest: "{{ target_path }}/{{ conductor_config }}"
- name: Copy conductor SQL schema init for JDBC DB
when: (conductor_db is not defined or conductor_db != 'dynomite') and init_db
template:
src: "templates/conductor-db-init-{{ conductor_db }}.sql.j2"
dest: "{{ target_path }}/conductor-db-init.sql"

23
roles/conductor/templates/auth.cfg.j2

@ -1,23 +0,0 @@
{
"strategy": "local",
"strategySettings":{
"users": {
"admin": {
"hash": "098039dd5e84e486f83eadefc31ce038ccc90d6d62323528181049371c9460b4",
"salt": "salt",
"displayName": "Admin",
"email": "marco.lettere@nubisware.com",
"roles": [ "admin", "viewer" ]
}
}
},
"audit": true,
"acl": [
"POST /(.*) admin",
"PUT /(.*) admin",
"DELETE /(.*) admin",
"GET /api/(.*) viewer",
"GET /(.*) *"
]
}

99
roles/conductor/templates/conductor-swarm-config.properties.j2

@ -1,92 +1,31 @@
# Servers.
conductor.jetty.server.enabled=true
conductor.grpc.server.enabled=false
conductor.grpc-server.enabled=false
# Database persistence model. Possible values are memory, redis, and dynomite.
# If ommitted, the persistence used is memory
#
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
# redis : non-Dynomite based redis instance
# dynomite : Dynomite cluster. Use this for HA configuration.
# Database persistence type.
{% if conductor_db is not defined or conductor_db == 'postgres' %}
db=postgres
jdbc.url={{ postgres_jdbc_url }}
jdbc.username={{ postgres_jdbc_user }}
jdbc.password={{ postgres_jdbc_pass }}
conductor.{{ conductor_db }}.connection.pool.size.max=10
conductor.{{ conductor_db }}.connection.pool.idle.min=2
flyway.enabled=false
{% elif conductor_db is defined and conductor_db == 'mysql' %}
db=mysql
jdbc.url={{ mysql_jdbc_url }}
jdbc.username={{ mysql_jdbc_user }}
jdbc.password={{ mysql_jdbc_pass }}
conductor.{{ conductor_db }}.connection.pool.size.max=10
conductor.{{ conductor_db }}.connection.pool.idle.min=2
flyway.enabled=false
{% else %}
db=dynomite
# Dynomite Cluster details.
# format is host:port:rack separated by semicolon
workflow.dynomite.cluster.hosts={% set ns = namespace() %}
{% set ns.availability_zone = "" %}
{% for seed in seeds %}
{% set ns.seed_tokens = seed.split(':') %}
{% if ns.availability_zone == "" %}
{% set ns.availability_zone = ns.seed_tokens[2] %}
{% endif %}
{% if ns.availability_zone == ns.seed_tokens[2] %}
{{ ns.seed_tokens[0] }}:8102:{{ ns.availability_zone }}{%- if not loop.last %};{%- endif %}
{% endif %}
{%- endfor %}
# If you are running using dynomite, also add the following line to the property
# to set the rack/availability zone of the conductor server to be same as dynomite cluster config
EC2_AVAILABILTY_ZONE={{ ns.availability_zone }}
# Dynomite cluster name
workflow.dynomite.cluster.name=dyno1
# Namespace for the keys stored in Dynomite/Redis
workflow.namespace.prefix=conductor
# Namespace prefix for the dyno queues
workflow.namespace.queue.prefix=conductor_queues
# No. of threads allocated to dyno-queues (optional)
queues.dynomite.threads=3
conductor.db.type=postgres
conductor.postgres.jdbcUrl={{ postgres_jdbc_url }}
conductor.postgres.jdbcUsername={{ postgres_jdbc_user }}
conductor.postgres.jdbcPassword={{ postgres_jdbc_pass }}
{% endif %}
# Non-quorum port used to connect to local redis. Used by dyno-queues.
# When using redis directly, set this to the same port as redis server
# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite.
queues.dynomite.nonQuorum.port=22122
{% if conductor_db == 'imysql' %}
conductor.db.type=mysql
conductor.mysql.jdbcUrl={{ mysql_jdbc_url }}
conductor.mysql.jdbcUsername={{ mysql_jdbc_user }}
conductor.mysql.jdbcPassword={{ mysql_jdbc_pass }}
{% endif %}
# Elastic search instance type. Possible values are memory and external.
# If not specified, the instance type will be embedded in memory
#
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
# the server dies. Useful for more stable environments like staging or production.
workflow.elasticsearch.instanceType=external
# Hikari pool sizes are -1 by default and prevent startup
conductor.{{conductor_db}}.connectionPoolMaxSize=10
conductor.{{conductor_db}}.connectionPoolMinIdle=2
# Transport address to elasticsearch
workflow.elasticsearch.url=elasticsearch:9300
# Name of the elasticsearch cluster
# Elastic search instance indexing is enabled.
conductor.indexing.enabled=true
conductor.elasticsearch.url=http://elasticsearch:9200
workflow.elasticsearch.instanceType=EXTERNAL
workflow.elasticsearch.index.name=conductor
# Additional modules (optional)
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
# Additional modules for metrics collection (optional)
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
# Load sample kitchen sink workflow
loadSample=false

40
roles/conductor/templates/conductor-swarm.yaml.j2

@ -6,28 +6,19 @@ services:
conductor-server:
environment:
- CONFIG_PROP={{ conductor_config }}
image: nubisware/conductor-server
image: "{{ conductor_image }}"
networks:
- {{ conductor_network }}
{% if clustered %}
- {{ haproxy_docker_overlay_network }}
{% endif %}
{% if not clustered %}
ports:
- "8080:8080"
{% endif %}
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
{% if clustered %}
endpoint_mode: dnsrr
{% endif %}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
configs:
- source: swarm-config
@ -40,42 +31,33 @@ services:
environment:
- WF_SERVER=http://conductor-server:8080/api/
- AUTH_CONFIG_PATH=/app/config/auth.config
image: nubisware/conductor-ui
image: "{{ conductor_ui_image }}"
networks:
- {{ conductor_network }}
{% if clustered %}
- {{ haproxy_docker_overlay_network }}
{% endif %}
{% if not clustered %}
ports:
- "5000:5000"
{% endif %}
{% if conductor_auth is defined %}
configs:
- source: auth-config
target: /app/config/auth.config
{% endif %}
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
{% if clustered %}
endpoint_mode: dnsrr
{% endif %}
replicas: {{ conductor_ui_replicas }}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
{{ conductor_network }}:
{% if clustered %}
{{ haproxy_docker_overlay_network }}:
external: True
{% endif %}
configs:
swarm-config:
file: ./{{ conductor_config }}
{% if conductor_auth is defined %}
auth-config:
file: ./auth.cfg
{% endif %}

0
local-site/auth.cfg → roles/conductor/templates/local_auth.cfg.j2

24
roles/conductor/templates/oauth2_auth.cfg.j2

@ -0,0 +1,24 @@
{
"strategy": "oauth2",
"strategySettings": {
"authorizationURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/auth",
"tokenURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token",
"clientID": "conductor-ui",
"clientSecret": "{{ conductor_ui_secret }}",
"callbackURL": "http://conductor-ui/login/callback",
"logoutURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/logout",
"logoutCallbackURL": "http://conductor-ui/logout/callback",
"roles": [ "admin", "viewer" ]
},
"cookieSecret": "{{ conductor_ui_secret }}",
"audit": true,
"acl": [
"POST /(.*) admin",
"PUT /(.*) admin",
"DELETE /(.*) admin",
"GET /api/(.*) *",
"GET /(.*) viewer,admin"
]
}

5
roles/databases/templates/elasticsearch-swarm.yaml.j2

@ -3,7 +3,7 @@ version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
@ -18,9 +18,10 @@ services:
deploy:
mode: replicated
replicas: {{ elasticsearch_replicas }}
#endpoint_mode: dnsrr
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s

12
roles/databases/templates/mysql-swarm.yaml.j2

@ -9,22 +9,14 @@ services:
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_DB: {{ mysql_jdbc_db }}
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ mysql_replicas }}
{% if infrastructure == 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

15
roles/databases/templates/postgres-swarm.yaml.j2

@ -4,28 +4,17 @@ services:
{{ postgres_service_name }}:
image: postgres
ports:
- "5432:5432"
environment:
POSTGRES_USER: "{{ postgres_jdbc_user }}"
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
POSTGRES_DB: "{{ postgres_jdbc_db }}"
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ postgres_replicas }}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

6
roles/elasticsearch/templates/elasticsearch-swarm.yaml.j2

@ -3,7 +3,7 @@ version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
@ -18,13 +18,13 @@ services:
deploy:
mode: replicated
replicas: {{ elasticsearch_replicas }}
#endpoint_mode: dnsrr
{% if infrastructure !== 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:

10
roles/mysql/defaults/main.yml

@ -1,10 +0,0 @@
---
use_jdbc: True
mysql_image_name: 'mariadb'
mysql_service_name: 'mysqldb'
mysql_replicas: 1
conductor_db: mysql
jdbc_user: conductor
jdbc_pass: password
jdbc_db: conductor
jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ mysql_jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true

5
roles/mysql/tasks/main.yaml

@ -1,5 +0,0 @@
---
- name: "Generate mysql swarm, image used: {{ mysql_image_name }}"
template:
src: templates/mysql-swarm.yaml.j2
dest: "{{ target_path }}/mysql-swarm.yaml"

30
roles/mysql/templates/mysql-swarm.yaml.j2

@ -1,30 +0,0 @@
version: '3.6'
services:
{{ mysql_service_name }}:
image: {{ mysql_image_name }}
environment:
MYSQL_USER: {{ mysql_jdbc_user }}
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_DB: {{ jdbc_db }}
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ mysql_replicas }}
placement:
constraints: [node.role == worker]
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

2
roles/pep/defaults/main.yaml

@ -0,0 +1,2 @@
pep_port: 80
#pep_credentials: in vault

18
roles/pep/defaults/pep_credentials.yaml

@ -0,0 +1,18 @@
$ANSIBLE_VAULT;1.1;AES256
64326266376663626435303764383036326164336561303030633464333131373161336461326162
3630623962383434623834313737616435613966343637390a353562636535376539353538353061
31383933313734646661633661353836386266393565633830353137646431613431663236376137
6362613731386433370a323934373363383565323337373239666434353036333435613061666231
34346134313038366165343861316233326331393732353334303039616535633866366261623764
64653630353830396665363862633730396432633062363932636335643136613237373339613139
32386330396237396363383638653431663864333162303936663563313535343536376139343166
65316137326533306335643833353338376533633733393333623131316662386334653633353332
66363734636237363637303863323638393339373364356433666466643038343930616166396136
61666232356337613431316662353766393335306232616266363933653032656536386562373665
36306234636233313237623364613033313261393431633139343037623732646431663139383062
30396230326432376335303362356534613937306431636361663335376265363139366463656638
31386430393037306233663161333465616236383134623961343732383633386665333231363036
64346630633337643961653464613336623363303737626231326138633736656530653138326537
35386161656461313034343935353863333635376664386565393530633532613965646662363634
65396137646561353534373536616162353631383130363466356637643639323333643964323638
3535

34
roles/pep/tasks/main.yaml

@ -0,0 +1,34 @@
---
- name: Generate PEP config
template:
src: templates/nginx.conf.j2
dest: "{{ target_path }}/nginx.conf"
- name: Generate PEP default config
when: pep is defined and pep == True
template:
src: templates/nginx.default.conf.j2
dest: "{{ target_path }}/nginx.default.conf"
- name: Generate PEP default config
when: pep is not defined or pep == False
template:
src: templates/nginx.default.conf.nopep.j2
dest: "{{ target_path }}/nginx.default.conf"
- name: Generate config.js
when: pep is defined and pep == True
template:
src: templates/config.js.j2
dest: "{{ target_path }}/config.js"
- name: Generate pep.js
when: pep is defined and pep == True
template:
src: templates/pep.js.j2
dest: "{{ target_path }}/pep.js"
- name: Generate pep-docker-swarm
template:
src: templates/pep-swarm.yaml.j2
dest: "{{ target_path }}/pep-swarm.yaml"

98
roles/pep/templates/config.js.j2

@ -0,0 +1,98 @@
export default { config };
var config = {
"pep-credentials" : "{{ pep_credentials }}",
"hosts" : [
{
"host": "conductor-server",
"allow-basic-auth" : true,
"pip" : [ { claim: "context", operator : "get-contexts" } ],
"paths" : [
{
"name" : "metadata",
"path" : "^/api/metadata/(taskdefs|workflow)/?.*$",
"methods" : [
{
"method" : "GET",
"scopes" : ["get","list"]
}
]
},
{
"name" : "metadata.taskdefs",
"path" : "^/api/metadata/taskdefs/?.*$",
"methods" : [
{
"method" : "POST",
"scopes" : ["create"]
},
{
"method" : "DELETE",
"scopes" : ["delete"],
},
{
"method" : "PUT",
"scopes" : ["update"],
}
]
},
{
"name" : "metadata.workflow",
"path" : "^/api/metadata/workflow/?.*$",
"methods" : [
{
"method" : "POST",
"scopes" : ["create"]
},
{
"method" : "DELETE",
"scopes" : ["delete"],
},
{
"method" : "PUT",
"scopes" : ["update"],
}
]
},
{
"name" : "workflow",
"path" : "^/api/workflow/?.*$",
"methods" : [
{
"method" : "GET",
"scopes" : ["get"],
},
{
"method" : "POST",
"scopes" : ["start"],
},
{
"method" : "DELETE",
"scopes" : ["terminate"],
}
]
},
{
"name" : "task",
"path" : "^/api/tasks/poll/.+$",
"methods" : [
{
"method" : "GET",
"scopes" : ["poll"],
}
]
},
{
"name" : "task",
"path" : "^/api/tasks$",
"methods" : [
{
"method" : "POST",
"scopes" : ["update"],
}
]
}
]
}
]
}

18
roles/pep/templates/nginx.conf.j2

@ -0,0 +1,18 @@
load_module modules/ngx_http_js_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
http {
{% if pep is defined and pep == True %}
js_import pep.js;
js_set $authorization pep.enforce;
proxy_cache_path /var/cache/nginx/pep keys_zone=token_responses:1m max_size=2m;
{% endif %}
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

96
roles/pep/templates/nginx.default.conf.j2

@ -0,0 +1,96 @@
upstream _conductor-server {
ip_hash;
server conductor-server:8080;
}
upstream _conductor-ui {
ip_hash;
server conductor-ui:5000;
}
map $http_authorization $source_auth {
default "";
}
js_var $auth_token;
js_var $pep_credentials;
server {
listen *:80;
listen [::]:80;
server_name conductor-server;
location / {
proxy_pass http://_conductor-server;
}
location /api/ {
js_content pep.enforce;
}
location @backend {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_pass http://_conductor-server;
}
location /jwt_verify_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Authorization $pep_credentials;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token/introspect";
proxy_cache token_responses; # Enable caching
proxy_cache_key $source_auth; # Cache for each source authentication
proxy_cache_lock on; # Duplicate tokens must wait
proxy_cache_valid 200 10s; # How long to use each response
proxy_ignore_headers Cache-Control Expires Set-Cookie;
}
location /jwt_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Authorization $pep_credentials;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
}
location /permission_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_set_header Authorization "Bearer $auth_token";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
}
}
server {
listen *:80 default_server;
listen [::]:80 default_server;
server_name conductor-ui;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://_conductor-ui;
}
}

40
roles/pep/templates/nginx.default.conf.nopep.j2

@ -0,0 +1,40 @@
upstream _conductor-server {
ip_hash;
server conductor-server:8080;
}
upstream _conductor-ui {
ip_hash;
server conductor-ui:5000;
}
server {
listen *:80;
listen [::]:80;
server_name conductor-server;
location / {
proxy_pass http://_conductor-server;
}
}
server {
listen *:80 default_server;
listen [::]:80 default_server;
server_name conductor-ui;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://_conductor-ui;
}
}

35
roles/pep/templates/pep-swarm.yaml.j2

@ -0,0 +1,35 @@
version: '3.6'
services:
pep:
image: nginx:stable-alpine
networks:
- conductor-network
ports:
- "{{ pep_port }}:80"
{% if pep is defined and pep == True %}