Compare commits

...

35 Commits

Author SHA1 Message Date
Marco Lettere e01bffd1dc fixed support for client_credentials when basic auth 5 months ago
Marco Lettere 1cc1f1bb8c support IAM sending gunzipped tokens 7 months ago
Marco Lettere 571a988be9 added more info on token parse error 7 months ago
Marco Lettere 66a86dfe1a retry basic auth also when IAM returns 400 9 months ago
Marco Lettere 63806e6a6b fixed path for tasks in order to match updates 10 months ago
Marco Lettere 1253174c74 added support for client_credentials before password flow which is downgraded to backup 10 months ago
Marco Lettere 11716f0d4d try setting baseline for flyway 11 months ago
Marco Lettere 7c7535f94f try setting baseline for flyway 11 months ago
Marco Lettere bfd86a8697 try setting baseline for flyway 11 months ago
Marco Lettere 7948081d04 added external postgres vault 11 months ago
Marco Lettere 94eb5bd2fb added external postgres vault 11 months ago
Marco Lettere 2c54e97aeb adopted jdbc pass from vault 11 months ago
Marco Lettere eb40300249 removed unnecessary = 11 months ago
Marco Lettere a8b8f41446 changed oauth2 strategy to work around multiple role bug 11 months ago
Marco Lettere f3ec4f6327 scaled down otriginal conductor replicas to two to help startup. 12 months ago
Marco Lettere 3224c53ae5 corrected local site 12 months ago
Marco Lettere 139043faa2 minor fixes 12 months ago
Marco Lettere fafd89a278 fixed typo 12 months ago
Marco Lettere c4bb342b3f changed naming of service to incorporate infrastructure 12 months ago
Marco Lettere c1db229a68 removed authorization constraint from health check and fixed host header for backend calls 12 months ago
Marco Lettere 9cc76a61d5 pep replicas set to two 12 months ago
dcore94 981b8e1ac7 moved volumes to configs 1 year ago
dcore94 33499eb123 all nodes on master for clustered deployment 1 year ago
dcore94 e69fc35258 tuned redirect uris 1 year ago
dcore94 b76b34c624 tuned redirect uris 1 year ago
dcore94 2f6d6e28ee set callback uri to https 1 year ago
dcore94 eeb843341a inventory fix 1 year ago
dcore94 d9467bf520 added support for load balanced network on external node 1 year ago
dcore94 676cac24ec tuned generation for all environments and added local-site 1 year ago
dcore94 288482d5b6 conductor 3.0.4 with oauth2 and pep 1 year ago
Mauro Mugnaini 2d4585d086 Commented out journald logging driver, the default will be used. The docker image for conductor is the local and not the public on the hub (for the moment for dev purposes). Oauth2 strategy is used for the login. 1 year ago
Mauro Mugnaini ab66713941 Config for oauth2 strategy 1 year ago
dcore94 e12f87fd85 use oauth2 enabled image for conductor-ui 1 year ago
dcore94 bf1bf82c0f added local-site with pep 1 year ago
dcore94 ca0b62bcfe added configurations for email workers 1 year ago
  1. 74
      README.md
  2. 3
      group_vars/nw_cluster.yaml
  3. 3
      group_vars/pre_cluster.yaml
  4. 3
      group_vars/prod_cluster.yaml
  5. 6
      inventory/hosts.dev
  6. 5
      inventory/hosts.nw_cluster
  7. 13
      local-site/base-config.cfg
  8. 23
      local-site/conductor-swarm-config.properties
  9. 44
      local-site/conductor-swarm.yaml
  10. 30
      local-site/conductor-workers-swarm.yaml
  11. 28
      local-site/elasticsearch-swarm.yaml
  12. 13
      local-site/nginx.conf
  13. 41
      local-site/nginx.default.conf
  14. 30
      local-site/pep-swarm.yaml
  15. 16
      local-site/postgres-swarm.yaml
  16. 56
      roles/cluster-replacement/defaults/main.yml
  17. 16
      roles/cluster-replacement/tasks/main.yml
  18. 56
      roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2
  19. 75
      roles/cluster-replacement/templates/haproxy.cfg.j2
  20. 2
      roles/cluster-replacement/vars/main.yml
  21. 6
      roles/common/defaults/main.yaml
  22. 18
      roles/conductor/defaults/conductor_ui_secrets.yaml
  23. 12
      roles/conductor/defaults/main.yaml
  24. 20
      roles/conductor/tasks/main.yaml
  25. 104
      roles/conductor/templates/conductor-swarm-config.properties.j2
  26. 48
      roles/conductor/templates/conductor-swarm.yaml.j2
  27. 0
      roles/conductor/templates/local_auth.cfg.j2
  28. 24
      roles/conductor/templates/oauth2_auth.cfg.j2
  29. 5
      roles/databases/templates/elasticsearch-swarm.yaml.j2
  30. 12
      roles/databases/templates/mysql-swarm.yaml.j2
  31. 15
      roles/databases/templates/postgres-swarm.yaml.j2
  32. 6
      roles/elasticsearch/templates/elasticsearch-swarm.yaml.j2
  33. 1
      roles/external-postgres/defaults/main.yml
  34. 10
      roles/mysql/defaults/main.yml
  35. 5
      roles/mysql/tasks/main.yaml
  36. 30
      roles/mysql/templates/mysql-swarm.yaml.j2
  37. 4
      roles/pep/defaults/main.yaml
  38. 24
      roles/pep/defaults/pep_credentials.yaml
  39. 41
      roles/pep/tasks/main.yaml
  40. 99
      roles/pep/templates/config.js.j2
  41. 18
      roles/pep/templates/nginx.conf.j2
  42. 109
      roles/pep/templates/nginx.default.conf.j2
  43. 41
      roles/pep/templates/nginx.default.conf.nopep.j2
  44. 46
      roles/pep/templates/pep-swarm-ha_network.yaml.j2
  45. 46
      roles/pep/templates/pep-swarm.yaml.j2
  46. 325
      roles/pep/templates/pep.js.j2
  47. 9
      roles/postgres/defaults/main.yml
  48. 5
      roles/postgres/tasks/main.yaml
  49. 31
      roles/postgres/templates/postgres-swarm.yaml.j2
  50. 11
      roles/workers/defaults/main.yaml
  51. 10
      roles/workers/defaults/smtp.yaml
  52. 6
      roles/workers/templates/conductor-workers-swarm.yaml.j2
  53. 11
      roles/workers/templates/config.cfg.j2
  54. 86
      run.sh
  55. 31
      site-dev.yaml
  56. 52
      site-local.yaml
  57. 64
      site-nw-cluster.yaml
  58. 39
      site-pre.yaml
  59. 37
      site-prod.yaml
  60. 56
      site.yaml

74
README.md

@ -1,53 +1,55 @@
# Conductor Setup
**Conductor Setup** is composed of a set of ansible roles and a playbook named site.yaml useful for deploying a docker swarm running Conductor microservice orchestrator by [Netflix OSS](https://netflix.github.io/conductor/).
**Conductor Setup** is composed of a set of ansible roles and a playbook named site-*.yaml useful for deploying a docker swarm running Conductor microservice orchestrator by [Netflix OSS](https://netflix.github.io/conductor/).
Current setup is based on Conductor 3.0.4 version adapted by Nubisware S.r.l.
It uses the docker images on dockerhub:
- nubisware/conductor-server:3.0.4
- nubisware/conductor-ui-oauth2:3.0.4 (which has been improved with Oauth2 login in collaboration with Keycloak)
Besides the basic components Conductor itself (server and ui) and Elasticsearch 6.1.8, the repository can be configured to launch postgres or mysql persistence plus basic python based workers for running PyRest, PyMail, PyExec and PyShell in the same Swarm.
In addition a nginx based PEP can be executed to protect the conductor REST API server.
It is also possible to connect to an external postgres for stateful deployments.
## Structure of the project
The AutoDynomite Docker image script file is present in `dynomite` folder.
The Docker Compose Swarm files are present in the `stack` folder.
The folder roles contains the necessary roles for configuring the different configurations
There are 4 file for deploying to local, nw-cluster or D4SCience dev, pre and prod environments.
To run a deployment
`ansible-playbook site-X.yaml`
whereas
`ansible-playbook site-X.yaml -e dry=true`
only generates the files for the stack without actually deploying it.
The folder *local-site* contains a ready version for quickly launching a conductor instance with no replications (except workers), no auth in the Conductor UI and no PEP.
`docker stack deploy -c elasticsearch-swarm.yaml -c postgres-swarm.yaml -c conductor-swarm.yaml -c conductor-workers-swarm.yaml -c pep-swarm.yaml conductor`
When you have ensured that Postgres and Elasticsearch are running, execute:
`docker stack deploy -c conductor-swarm.yaml conductor`
This will create a local stack accessible through permissive pep at port 80. Please add two mappings for localhost in your /etc/hosts
`127.0.1.1 conductor-server conductor-ui`
and point your browser to http://conductor-ui.
## Built With
* [Ansible](https://www.ansible.com)
* [Docker](https://www.docker.com)
## Documentation
The provided Docker stack files provide the following configuration:
- 2 Conductor Server nodes with 2 replicas handled by Swarm
- 2 Conductor UI nodes with 2 replicas handled by Swarm
- 1 Elasticsearch node
- 1 Database node that can be postgres (default), mysql or mariadb
- 2 Optional replicated instances of PyExec worker running the tasks Http, Eval and Shell
- 1 Optional cluster-replacement service that sets up a networking environment (including on HAProxy LB) similar to the one available in production. By default it's disabled.
The default configuration is run with the command: `ansible-playbook site.yaml`
Files for swarms and configurations will be generated inside a temporary folder named /tmp/conductor_stack on the local machine.
In order to change destination folder use the switch: `-e target_path=anotherdir`
If you only want to review the generated files run the command `ansible-playbook site.yaml -e dry=true`
In order to switch between postgres and mysql specify the db on the proper variable: `-e db=mysql`
In order to connect to an external postgres withouth pulling up a service in the stack use -e db=remote-postgres
In order to skip worker creation specify the noworker variable: `-e noworker=true`
In order to enable the cluster replacement use the switch: `-e cluster_replacement=true`
If you run the stack in production behind a load balanced setup ensure the variable cluster_check is true: `ansible-playbook site.yaml -e cluster_check=true`
In order to avoid confusion of all variables a proper site.yaml has been created for prod environment. Run `ansible-playbook site-prod.yaml` and this should deploy a stack with prod setup.
Other setting can be fine tuned by checking the variables in the proper roles which are:
- *common*: defaults and common tasks
- *conductor*: defaults, templates and tasks for generating swarm files for replicated conductor-server and ui.
- *elasticsearch*: defaults, templates and task for starting in the swarm a single instance of elasticsearch
- *mysql*: defaults, template and tasks for starting in the swarm a single instance of mysql/mariadb
- *postgres*: defaults, templates and tasks for starting in the swarm a single instance of postgres
- *workers*: defaults and task for starting in the swarm a replicated instance of the workers for executing HTTP, Shell, Eval operations.
## Examples
The following example runs as user username on the remote hosts listed in hosts a swarm with 2 replicas of conductor server and ui, 1 postgres, 1 elasticsearch, 2 replicas of simple PyExec, an HAProxy that acts as load balancer.
`ansible-playbook -u username -i hosts site.yaml -e target_path=/tmp/conductor -e cluster_replacement=true`
Checkout the files site-X.yaml as a reference for different configurations.
## Change log

3
group_vars/nw_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: dev
conductor_workers_server: http://conductor-dev.int.d4science.net/api

3
group_vars/pre_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: pre
conductor_workers_server: https://conductor.pre.d4science.org/api

3
group_vars/prod_cluster.yaml

@ -1,3 +0,0 @@
---
infrastructure: prod
conductor_workers_server: https://conductor.d4science.org/api

6
inventory/hosts.dev

@ -1,5 +1,5 @@
[dev_infra:children]
nw_cluster
dev_cluster
[nw_cluster]
nubis1.int.d4science.net
[dev_cluster]
docker-swarm1.int.d4science.net docker_swarm_manager_main_node=True

5
inventory/hosts.nw_cluster

@ -0,0 +1,5 @@
[nw_cluster_infra:children]
nw_cluster
[nw_cluster]
nubis1.int.d4science.net

13
local-site/base-config.cfg

@ -0,0 +1,13 @@
[common]
loglevel = info
#server =
threads = 1
pollrate = 1
[pymail]
server = smtp-relay.d4science.org
user = conductor_local
password =
protocol = starttls
port = 587

23
local-site/conductor-swarm-config.properties

@ -0,0 +1,23 @@
# Servers.
conductor.grpc-server.enabled=false
# Database persistence type.
conductor.db.type=postgres
conductor.postgres.jdbcUrl=jdbc:postgresql://postgresdb:5432/conductor
conductor.postgres.jdbcUsername=conductor
conductor.postgres.jdbcPassword=password
# Hikari pool sizes are -1 by default and prevent startup
conductor.postgres.connectionPoolMaxSize=10
conductor.postgres.connectionPoolMinIdle=2
# Elastic search instance indexing is enabled.
conductor.indexing.enabled=true
conductor.elasticsearch.url=http://elasticsearch:9200
workflow.elasticsearch.instanceType=EXTERNAL
workflow.elasticsearch.index.name=conductor
# Load sample kitchen sink workflow
loadSample=false

44
local-site/conductor-swarm.yaml

@ -0,0 +1,44 @@
version: '3.6'
services:
conductor-server-local:
environment:
- CONFIG_PROP=conductor-swarm-config.properties
image: "nubisware/conductor-server:3.0.4"
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
window: 120s
configs:
- source: swarm-config
target: /app/config/conductor-swarm-config.properties
logging:
driver: "journald"
conductor-ui-local:
environment:
- WF_SERVER=http://conductor-server-local:8080/api/
image: "nubisware/conductor-ui-oauth2:3.0.4"
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
window: 120s
networks:
conductor-network:
configs:
swarm-config:
file: ./conductor-swarm-config.properties

30
local-site/conductor-workers-swarm.yaml

@ -0,0 +1,30 @@
version: '3.6'
services:
base:
environment:
CONDUCTOR_SERVER: http://conductor-server-local:8080/api/
CONDUCTOR_HEALTH: http://conductor-server-local:8080/health
configs:
- source: base-config
target: /app/config.cfg
image: 'nubisware/nubisware-conductor-worker-py-base'
networks:
- conductor-network
deploy:
mode: replicated
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
window: 120s
logging:
driver: "journald"
networks:
conductor-network:
configs:
base-config:
file: base-config.cfg

28
local-site/elasticsearch-swarm.yaml

@ -0,0 +1,28 @@
version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
- discovery.type=single-node
- xpack.security.enabled=false
networks:
conductor-network:
aliases:
- es
logging:
driver: "journald"
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
conductor-network:

13
local-site/nginx.conf

@ -0,0 +1,13 @@
load_module modules/ngx_http_js_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
http {
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

41
local-site/nginx.default.conf

@ -0,0 +1,41 @@
upstream _conductor-server {
ip_hash;
server conductor-server-local:8080;
}
upstream _conductor-ui {
ip_hash;
server conductor-ui-local:5000;
}
server {
listen *:80;
listen [::]:80;
server_name conductor-server;
location / {
proxy_set_header Host $host;
proxy_pass http://_conductor-server;
}
}
server {
listen *:80 default_server;
listen [::]:80 default_server;
server_name conductor-ui;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://_conductor-ui;
}
}

30
local-site/pep-swarm.yaml

@ -0,0 +1,30 @@
version: '3.6'
services:
pep:
image: nginx:stable-alpine
networks:
- conductor-network
ports:
- "80:80"
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
window: 120s
configs:
- source: nginxconf
target: /etc/nginx/templates/default.conf.template
- source: nginxbaseconf
target: /etc/nginx/nginx.conf
networks:
conductor-network:
configs:
nginxconf:
file: ./nginx.default.conf
nginxbaseconf:
file: ./nginx.conf

16
local-site/postgres-swarm.yaml

@ -0,0 +1,16 @@
version: '3.6'
services:
postgresdb:
image: postgres
environment:
POSTGRES_USER: "conductor"
POSTGRES_PASSWORD: "password"
POSTGRES_DB: "conductor"
networks:
- conductor-network
deploy:
replicas: 1
networks:
conductor-network:

56
roles/cluster-replacement/defaults/main.yml

@ -1,56 +0,0 @@
---
haproxy_latest_release: True
haproxy_version: 2.2
haproxy_repo_key: 'http://haproxy.debian.net/bernat.debian.org.gpg'
haproxy_debian_latest_repo: "deb http://haproxy.debian.net {{ ansible_lsb.codename }}-backports-{{ haproxy_version }} main"
haproxy_ubuntu_latest_repo: "ppa:vbernat/haproxy-{{ haproxy_version }}"
haproxy_pkg_state: present
haproxy_enabled: True
haproxy_loglevel: info
haproxy_k_bind_non_local_ip: True
haproxy_docker_container: False
haproxy_docker_version: '{{ haproxy_version }}.4'
haproxy_docker_image: 'haproxytech/haproxy-debian:{{ haproxy_version }}.4'
haproxy_docker_compose_dir: /srv/haproxy_swarm
haproxy_docker_restart_policy: 'on-failure'
haproxy_ha_with_keepalived: False
haproxy_docker_swarm_networks:
- '{{ docker_swarm_portainer_network }}'
haproxy_docker_swarm_additional_networks: []
haproxy_docker_swarm_haproxy_constraints:
- 'node.role == manager'
haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductorui-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }]
# - { acl_name: 'service', acl_rule: 'hdr_dom(host) -i service.example.com', stack_name: 'stack', service_name: 'service', service_replica_num: '1', service_port: '9999', service_overlay_network: 'service-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth HEAD uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]', allowed_networks: '192.168.1.0/24 192.168.2.0/24' }
haproxy_default_port: 80
haproxy_terminate_tls: False
haproxy_ssl_port: 443
haproxy_admin_port: 8880
haproxy_admin_socket: /run/haproxy/admin.sock
haproxy_install_additional_pkgs: False
haproxy_additional_pkgs:
- haproxyctl
- haproxy-log-analysis
haproxy_nagios_check: False
# It's a percentage
haproxy_nagios_check_w: 70
haproxy_nagios_check_c: 90
# Used by some other role as defaults, eg docker-swarm
haproxy_spread_checks: 5
haproxy_connect_timeout: 10s
haproxy_client_timeout: 120s
haproxy_server_timeout: 480s
haproxy_global_keepalive_timeout: 10s
haproxy_client_keepalive_timeout: 5184000s
haproxy_backend_maxconn: 2048
haproxy_check_interval: 3s
haproxy_check_timeout: 2s
haproxy_maxconns: 4096
haproxy_sysctl_conntrack_max: 131072

16
roles/cluster-replacement/tasks/main.yml

@ -1,16 +0,0 @@
---
- name: Generate haproxy config
template:
src: templates/haproxy.cfg.j2
dest: "{{ target_path }}/haproxy.cfg"
- name: Generate haproxy-docker-swarm
template:
src: templates/haproxy-docker-swarm.yaml.j2
dest: "{{ target_path }}/haproxy-swarm.yaml"
- name: Create the overlay network that will be joined by the proxied services
docker_network:
name: '{{ haproxy_docker_overlay_network }}'
driver: overlay
scope: swarm

56
roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2

@ -1,56 +0,0 @@
version: '3.6'
services:
haproxy:
image: {{ haproxy_docker_image }}
configs:
- source: haproxy-config
target: /usr/local/etc/haproxy/haproxy.cfg
networks:
- {{ haproxy_docker_overlay_network }}
volumes:
#- /etc/haproxy:/usr/local/etc/haproxy:ro
- /var/run/docker.sock:/var/run/docker.sock
ports:
- target: {{ haproxy_default_port }}
published: {{ haproxy_default_port }}
protocol: tcp
mode: host
- target: {{ haproxy_ssl_port }}
published: {{ haproxy_ssl_port }}
protocol: tcp
mode: host
- target: {{ haproxy_admin_port }}
published: {{ haproxy_admin_port }}
protocol: tcp
mode: host
dns: [127.0.0.11]
deploy:
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 20s
placement:
constraints:
- "node.role==manager"
restart_policy:
condition: {{ haproxy_docker_restart_policy}}
delay: 20s
max_attempts: 5
window: 120s
resources:
limits:
cpus: '2.0'
memory: 768M
reservations:
cpus: '1.0'
memory: 384M
logging:
driver: 'journald'
configs:
haproxy-config:
file: ./haproxy.cfg
networks:
{{ haproxy_docker_overlay_network }}:
external: true

75
roles/cluster-replacement/templates/haproxy.cfg.j2

@ -1,75 +0,0 @@
global
log fd@2 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
stats socket /var/lib/haproxy/stats expose-fd listeners
master-worker
resolvers docker
nameserver dns1 127.0.0.11:53
resolve_retries 3
timeout resolve 1s
timeout retry 1s
hold other 10s
hold refused 10s
hold nx 10s
hold timeout 10s
hold valid 10s
hold obsolete 10s
defaults
timeout connect 10s
timeout client 30s
timeout server 30s
log global
monitor-uri /_haproxy_health_check
timeout http-keep-alive {{ haproxy_global_keepalive_timeout }}
timeout connect {{ haproxy_connect_timeout }}
timeout client {{ haproxy_client_timeout }}
timeout server {{ haproxy_server_timeout }}
timeout check {{ haproxy_check_timeout }}
timeout http-request 10s # slowloris protection
default-server inter 3s fall 2 rise 2 slowstart 60s
# Needed to preserve the stick tables
peers mypeers
peer local_haproxy 127.0.0.1:1024
frontend http
bind *:{{ haproxy_default_port }}
mode http
option http-keep-alive
{% for srv in haproxy_docker_swarm_additional_services %}
use_backend {{ srv.acl_name }}_bck if { {{ srv.acl_rule }} }
{% endfor %}
#
# Backends
#
{% for srv in haproxy_docker_swarm_additional_services %}
backend {{ srv.acl_name }}_bck
mode http
option httpchk
balance {{ srv.balance_type | default('roundrobin') }}
{% if srv.http_check_enabled is defined and srv.http_check_enabled %}
http-check send {{ srv.http_check }}
http-check expect {{ srv.http_check_expect }}
{% endif %}
{% if srv.stick_sessions %}
{% if srv.stick_on_cookie %}
cookie {{ srv.stick_cookie }}
{% else %}
stick on src
stick-table {{ srv.stick_table }}
{% endif %}
{% endif %}
server-template {{ srv.service_name }}- {{ srv.service_replica_num }} {{ srv.stack_name }}_{{ srv.service_name }}:{{ srv.service_port }} {{ srv.backend_options | default('') }} check resolvers docker init-addr libc,none
{% endfor %}

2
roles/cluster-replacement/vars/main.yml

@ -1,2 +0,0 @@
---
haproxy_docker_overlay_network: 'haproxy-public'

6
roles/common/defaults/main.yaml

@ -1,5 +1,9 @@
---
target_path: "/tmp/conductor_stack"
conductor_service: "conductor-server-{{ infrastructure }}"
conductor_ui_service: "conductor-ui-{{ infrastructure }}"
conductor_service_url: "http://{{ conductor_service }}:8080/api/"
conductor_service_health_url: "http://{{ conductor_service }}:8080/health"
target_path: "/tmp/conductor_stack_{{ infrastructure }}"
conductor_network: conductor-network
conductor_db: postgres
init_db: True

18
roles/conductor/defaults/conductor_ui_secrets.yaml

@ -0,0 +1,18 @@
$ANSIBLE_VAULT;1.1;AES256
62366130363930353837376531653565316531653234366233663032386266346338356335623537
3765393265633163396330646365393865386130393661650a666264363165656539396365643465
35313238313135363736386661633833333736396236303861383061313366613235623731356336
3634376335626138370a646666343033316165343665633338316432636562323736626466376233
64633738356663666563643465363137636261643639643035633931386631383436353936613334
64333135643036336539313164386264643737636164613462646130393730393334626335333262
30373231353061376565336366353938356338643432633664306632366436383262636333643961
62613562666463633164313235366433616134613831393436303466366236323337323635616337
34383634613736343034626330303661663662633661383734633834373464313137656461356562
37336430633865656330623863396133613636316136613133633965353932333266663532356334
35333138316339353236623963383739663730313737303838396538666338316366636537643663
35366537353736343462383734663762393433666266303963306136626631653539396632326337
39326266316532623232643437323238313765653261343630636339633936356138646262346634
63363763306533363839386364646130396534383437366631343537303165326539393639613735
39393364616361393435643531363462393633343437393936613861353266356230353338616163
37373562393362356563623966313034653138616632336264343533363165313362306330386639
32356363653031656465623463373337643930386361393839613139623530363635

12
roles/conductor/defaults/main.yaml

@ -1,5 +1,15 @@
---
conductor_replicas: 2
conductor_replicas: 1
conductor_ui_replicas: 1
conductor_image: nubisware/conductor-server:3.0.4
conductor_ui_image: nubisware/conductor-ui-oauth2:3.0.4
conductor_config: conductor-swarm-config.properties
conductor_config_template: "{{ conductor_config }}.j2"
conductor_ui_clientid: "conductor-ui"
conductor_ui_public_url: "http://conductor-ui"
#nw_cluster_conductor_ui_secret: in vault
#dev_conductor_ui_secret: in vault
#pre_conductor_ui_secret: in vault
#prod_conductor_ui_secret: in vault

20
roles/conductor/tasks/main.yaml

@ -4,28 +4,14 @@
src: templates/conductor-swarm.yaml.j2
dest: "{{ target_path }}/conductor-swarm.yaml"
- name: Generate auth config
- name: Generate local auth config
when: conductor_auth is defined
template:
src: templates/auth.cfg.j2
src: "templates/{{ conductor_auth }}_auth.cfg.j2"
dest: "{{ target_path }}/auth.cfg"
- name: Generate conductor config from dynomite seeds
when: conductor_db is defined and conductor_db == 'dynomite'
vars:
seeds: "{{ lookup('file', '{{ target_path}}/seeds.list').splitlines() }}"
template:
src: "templates/{{ conductor_config_template }}"
dest: "{{ target_path }}/{{ conductor_config }}"
- name: Generate conductor config for JDBC DB
when: conductor_db is not defined or conductor_db != 'dynomite'
template:
src: "templates/{{ conductor_config_template }}"
dest: "{{ target_path }}/{{ conductor_config }}"
- name: Copy conductor SQL schema init for JDBC DB
when: (conductor_db is not defined or conductor_db != 'dynomite') and init_db
template:
src: "templates/conductor-db-init-{{ conductor_db }}.sql.j2"
dest: "{{ target_path }}/conductor-db-init.sql"

104
roles/conductor/templates/conductor-swarm-config.properties.j2

@ -1,92 +1,36 @@
# Servers.
conductor.jetty.server.enabled=true
conductor.grpc.server.enabled=false
conductor.grpc-server.enabled=false
# Database persistence model. Possible values are memory, redis, and dynomite.
# If ommitted, the persistence used is memory
#
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
# redis : non-Dynomite based redis instance
# dynomite : Dynomite cluster. Use this for HA configuration.
# Database persistence type.
{% if conductor_db is not defined or conductor_db == 'postgres' %}
db=postgres
jdbc.url={{ postgres_jdbc_url }}
jdbc.username={{ postgres_jdbc_user }}
jdbc.password={{ postgres_jdbc_pass }}
conductor.{{ conductor_db }}.connection.pool.size.max=10
conductor.{{ conductor_db }}.connection.pool.idle.min=2
flyway.enabled=false
{% elif conductor_db is defined and conductor_db == 'mysql' %}
db=mysql
jdbc.url={{ mysql_jdbc_url }}
jdbc.username={{ mysql_jdbc_user }}
jdbc.password={{ mysql_jdbc_pass }}
conductor.{{ conductor_db }}.connection.pool.size.max=10
conductor.{{ conductor_db }}.connection.pool.idle.min=2
flyway.enabled=false
{% else %}
db=dynomite
# Dynomite Cluster details.
# format is host:port:rack separated by semicolon
workflow.dynomite.cluster.hosts={% set ns = namespace() %}
{% set ns.availability_zone = "" %}
{% for seed in seeds %}
{% set ns.seed_tokens = seed.split(':') %}
{% if ns.availability_zone == "" %}
{% set ns.availability_zone = ns.seed_tokens[2] %}
{% endif %}
{% if ns.availability_zone == ns.seed_tokens[2] %}
{{ ns.seed_tokens[0] }}:8102:{{ ns.availability_zone }}{%- if not loop.last %};{%- endif %}
{% endif %}
{%- endfor %}
# If you are running using dynomite, also add the following line to the property
# to set the rack/availability zone of the conductor server to be same as dynomite cluster config
EC2_AVAILABILTY_ZONE={{ ns.availability_zone }}
# Dynomite cluster name
workflow.dynomite.cluster.name=dyno1
# Namespace for the keys stored in Dynomite/Redis
workflow.namespace.prefix=conductor
# Namespace prefix for the dyno queues
workflow.namespace.queue.prefix=conductor_queues
# No. of threads allocated to dyno-queues (optional)
queues.dynomite.threads=3
conductor.db.type=postgres
conductor.postgres.jdbcUrl={{ postgres_jdbc_url }}
conductor.postgres.jdbcUsername={{ postgres_jdbc_user }}
conductor.postgres.jdbcPassword={{ postgres_jdbc_pass }}
flyway.baseline-on-migrate=true
conductor.flyway.baseline-on-migrate=true
{% endif %}
# Non-quorum port used to connect to local redis. Used by dyno-queues.
# When using redis directly, set this to the same port as redis server
# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite.
queues.dynomite.nonQuorum.port=22122
{% if conductor_db == 'mysql' %}
conductor.db.type=mysql
conductor.mysql.jdbcUrl={{ mysql_jdbc_url }}
conductor.mysql.jdbcUsername={{ mysql_jdbc_user }}
conductor.mysql.jdbcPassword={{ mysql_jdbc_pass }}
{% endif %}
# Elastic search instance type. Possible values are memory and external.
# If not specified, the instance type will be embedded in memory
#
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
# the server dies. Useful for more stable environments like staging or production.
workflow.elasticsearch.instanceType=external
# Hikari pool sizes are -1 by default and prevent startup
conductor.{{conductor_db}}.connectionPoolMaxSize=10
conductor.{{conductor_db}}.connectionPoolMinIdle=2
# Transport address to elasticsearch
workflow.elasticsearch.url=elasticsearch:9300
# Name of the elasticsearch cluster
# Elastic search instance indexing is enabled.
conductor.indexing.enabled=true
conductor.elasticsearch.url=http://elasticsearch:9200
workflow.elasticsearch.instanceType=EXTERNAL
workflow.elasticsearch.index.name=conductor
# Additional modules (optional)
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
# Additional modules for metrics collection (optional)
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
# Load sample kitchen sink workflow
loadSample=false
#flyway.baseline-on-migrate=true

48
roles/conductor/templates/conductor-swarm.yaml.j2

@ -3,31 +3,22 @@ version: '3.6'
{% set clustered = (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %}
services:
conductor-server:
{{ conductor_service }}:
environment:
- CONFIG_PROP={{ conductor_config }}
image: nubisware/conductor-server
image: "{{ conductor_image }}"
networks:
- {{ conductor_network }}
{% if clustered %}
- {{ haproxy_docker_overlay_network }}
{% endif %}
{% if not clustered %}
ports:
- "8080:8080"
{% endif %}
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
{% if clustered %}
endpoint_mode: dnsrr
{% endif %}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
configs:
- source: swarm-config
@ -36,46 +27,39 @@ services:
logging:
driver: "journald"
conductor-ui:
{{ conductor_ui_service }}:
environment:
- WF_SERVER=http://conductor-server:8080/api/
- WF_SERVER={{ conductor_service_url }}
{% if conductor_auth is defined %}
- AUTH_CONFIG_PATH=/app/config/auth.config
image: nubisware/conductor-ui
{% endif %}
image: "{{ conductor_ui_image }}"
networks:
- {{ conductor_network }}
{% if clustered %}
- {{ haproxy_docker_overlay_network }}
{% endif %}
{% if not clustered %}
ports:
- "5000:5000"
{% endif %}
{% if conductor_auth is defined %}
configs:
- source: auth-config
target: /app/config/auth.config
{% endif %}
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
{% if clustered %}
endpoint_mode: dnsrr
{% endif %}
replicas: {{ conductor_ui_replicas }}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
{{ conductor_network }}:
{% if clustered %}
{{ haproxy_docker_overlay_network }}:
external: True
{% endif %}
configs:
swarm-config:
file: ./{{ conductor_config }}
{% if conductor_auth is defined %}
auth-config:
file: ./auth.cfg
{% endif %}

0
roles/conductor/templates/auth.cfg.j2 → roles/conductor/templates/local_auth.cfg.j2

24
roles/conductor/templates/oauth2_auth.cfg.j2

@ -0,0 +1,24 @@
{
"strategy": "oauth2",
"strategySettings": {
"authorizationURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/auth",
"tokenURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token",
"clientID": "{{ conductor_ui_clientid }}",
"clientSecret": "{{ conductor_ui_secret }}",
"callbackURL": "{{ conductor_ui_public_url }}/login/callback",
"logoutURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/logout",
"logoutCallbackURL": "{{ conductor_ui_public_url }}/logout/callback",
"roles": [ "admin", "viewer" ]
},
"cookieSecret": "{{ conductor_ui_secret }}",
"audit": true,
"acl": [
"POST /(.*) admin",
"PUT /(.*) admin",
"DELETE /(.*) admin",
"GET /api/(.*) *",
"GET /(.*) viewer"
]
}

5
roles/databases/templates/elasticsearch-swarm.yaml.j2

@ -3,7 +3,7 @@ version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
@ -18,9 +18,10 @@ services:
deploy:
mode: replicated
replicas: {{ elasticsearch_replicas }}
#endpoint_mode: dnsrr
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s

12
roles/databases/templates/mysql-swarm.yaml.j2

@ -9,22 +9,14 @@ services:
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_DB: {{ mysql_jdbc_db }}
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ mysql_replicas }}
{% if infrastructure == 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

15
roles/databases/templates/postgres-swarm.yaml.j2

@ -4,28 +4,17 @@ services:
{{ postgres_service_name }}:
image: postgres
ports:
- "5432:5432"
environment:
POSTGRES_USER: "{{ postgres_jdbc_user }}"
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
POSTGRES_DB: "{{ postgres_jdbc_db }}"
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ postgres_replicas }}
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

6
roles/elasticsearch/templates/elasticsearch-swarm.yaml.j2

@ -3,7 +3,7 @@ version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
@ -18,13 +18,13 @@ services:
deploy:
mode: replicated
replicas: {{ elasticsearch_replicas }}
#endpoint_mode: dnsrr
{% if infrastructure != 'local' %}
placement:
constraints: [node.role == worker]
{% endif %}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:

1
roles/external-postgres/defaults/main.yml

@ -3,6 +3,7 @@ use_jdbc: True
postgres_host: "postgresql-srv.d4science.org"
conductor_db: "postgres"
postgres_jdbc_user: "conductor_u"
postgres_jdbc_pass: '{{ jdbc_pass }}'
jdbc_db: "conductor"
postgres_jdbc_url: "jdbc:postgresql://{{ postgres_host }}:5432/{{ jdbc_db }}"

10
roles/mysql/defaults/main.yml

@ -1,10 +0,0 @@
---
use_jdbc: True
mysql_image_name: 'mariadb'
mysql_service_name: 'mysqldb'
mysql_replicas: 1
conductor_db: mysql
jdbc_user: conductor
jdbc_pass: password
jdbc_db: conductor
jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ mysql_jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true

5
roles/mysql/tasks/main.yaml

@ -1,5 +0,0 @@
---
- name: "Generate mysql swarm, image used: {{ mysql_image_name }}"
template:
src: templates/mysql-swarm.yaml.j2
dest: "{{ target_path }}/mysql-swarm.yaml"

30
roles/mysql/templates/mysql-swarm.yaml.j2

@ -1,30 +0,0 @@
version: '3.6'
services:
{{ mysql_service_name }}:
image: {{ mysql_image_name }}
environment:
MYSQL_USER: {{ mysql_jdbc_user }}
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
MYSQL_DB: {{ jdbc_db }}
{% if init_db %}
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
{% endif %}
networks:
- {{ conductor_network }}
deploy:
replicas: {{ mysql_replicas }}
placement:
constraints: [node.role == worker]
networks:
{{ conductor_network }}:
{% if init_db %}
configs:
db-init:
file: {{ target_path }}/conductor-db-init.sql
{% endif %}

4
roles/pep/defaults/main.yaml

@ -0,0 +1,4 @@
pep_port: 80
pep_replicas: 1
# hostnames to be used as vhosts
#pep_credentials: in vault

24
roles/pep/defaults/pep_credentials.yaml

@ -0,0 +1,24 @@
$ANSIBLE_VAULT;1.1;AES256
63653037396633613264356337303461626364643463616264616333313065336263626665646233
3861663135613138333863343261373464326239303835650a643535633265653339376332663462
35306231383136623339313436343732666332333435383162366135386663363063376466636233
6233353263663839310a623233353138373734356465653965376132643137643738363430333861
63336132646562343639666334616633356631366535343561646434323130633135393535383061
38313337303261396364653663316462376337393837373038623266633831303564646539326665
30303065363335346538643436613030336163336535383665623533303535623064376539363062
33393137376263383335363632633836626137346663613934346136306436353230663934633637
32356234386161393937303563343931373939623737636466363936393438353666326663373038
66343339353430393065346237626434356462653330313064303166366239343636636661633438
38613863386666343638663762303531326531633062343132663462333137373062646339623961
35666164313962356139623839323161303131306132633139303463393661636165353566373561
37333963386332386635616332326239386639636434376232356465366131306366376464366433
33323839326366653261636665623136336564373333313135313661633536333837353163373334
32366532373239303263386565363236383036623333353662303031373335653032646166386262
33656266356164666130343135386263346533393533386166306666366137313231386434343434
31653633303133323031343566663834636565313235323863353963363633346264636339653463
34353834343836306633346638313066316162373239326435313532643764306461663965303236
31386331303334636636623035303236303265633839323963633066633932336335326561623334
34366565393434393131656564646132343964653637393739613837313561646238646631316265
32303865633862386162393161336533313465326632363463653831623961633039393932623633
63613730663131343463316436326437393931343566373533666638366631333264353939343862
306362633430393061666539616565383366

41
roles/pep/tasks/main.yaml

@ -0,0 +1,41 @@
---
- name: Generate PEP config
template:
src: templates/nginx.conf.j2
dest: "{{ target_path }}/nginx.conf"
- name: Generate PEP default config
when: pep is defined and pep == True
template:
src: templates/nginx.default.conf.j2
dest: "{{ target_path }}/nginx.default.conf"
- name: Generate PEP default config
when: pep is not defined or pep == False
template:
src: templates/nginx.default.conf.nopep.j2
dest: "{{ target_path }}/nginx.default.conf"
- name: Generate config.js
when: pep is defined and pep == True
template:
src: templates/config.js.j2
dest: "{{ target_path }}/config.js"
- name: Generate pep.js
when: pep is defined and pep == True
template:
src: templates/pep.js.j2
dest: "{{ target_path }}/pep.js"
- name: Generate pep-docker-swarm
template:
src: templates/pep-swarm.yaml.j2
dest: "{{ target_path }}/pep-swarm.yaml"
- name: Generate pep-docker-swarm when behind HA proxy
when: ha_network is defined and ha_network == True
template:
src: templates/pep-swarm-ha_network.yaml.j2
dest: "{{ target_path }}/pep-swarm.yaml"

99
roles/pep/templates/config.js.j2

@ -0,0 +1,99 @@
export default { config };
var config = {
"pep-credentials" : "{{ pep_credentials }}",
"hosts" : [
{
"host": "{{ conductor_server_name }}",
"audience" : "conductor-server",
"allow-basic-auth" : true,
"pip" : [ { claim: "context", operator : "get-contexts" } ],
"paths" : [
{
"name" : "metadata",
"path" : "^/api/metadata/(taskdefs|workflow)/?.*$",
"methods" : [
{
"method" : "GET",
"scopes" : ["get","list"]
}
]
},
{
"name" : "metadata.taskdefs",
"path" : "^/api/metadata/taskdefs/?.*$",
"methods" : [
{
"method" : "POST",
"scopes" : ["create"]
},
{
"method" : "DELETE",
"scopes" : ["delete"],
},
{
"method" : "PUT",
"scopes" : ["update"],
}
]
},
{
"name" : "metadata.workflow",
"path" : "^/api/metadata/workflow/?.*$",
"methods" : [
{
"method" : "POST",
"scopes" : ["create"]
},
{
"method" : "DELETE",
"scopes" : ["delete"],
},
{
"method" : "PUT",
"scopes" : ["update"],
}
]
},
{
"name" : "workflow",
"path" : "^/api/workflow/?.*$",
"methods" : [
{
"method" : "GET",
"scopes" : ["get"],
},
{
"method" : "POST",
"scopes" : ["start"],
},
{
"method" : "DELETE",
"scopes" : ["terminate"],
}
]
},
{
"name" : "task",
"path" : "^/api/tasks/poll/.+$",
"methods" : [
{
"method" : "GET",
"scopes" : ["poll"],
}
]
},
{
"name" : "task",
"path" : "^/api/tasks[/]?$",
"methods" : [
{
"method" : "POST",
"scopes" : ["update"],
}
]
}
]
}
]
}

18
roles/pep/templates/nginx.conf.j2

@ -0,0 +1,18 @@
load_module modules/ngx_http_js_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
http {
{% if pep is defined and pep == True %}
js_import pep.js;
js_set $authorization pep.enforce;
proxy_cache_path /var/cache/nginx/pep keys_zone=token_responses:1m max_size=2m;
{% endif %}
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

109
roles/pep/templates/nginx.default.conf.j2

@ -0,0 +1,109 @@
upstream _conductor-server {
ip_hash;
server {{ conductor_service }}:8080;
}
upstream _conductor-ui {
ip_hash;
server {{ conductor_ui_service }}:5000;
}
map $http_authorization $source_auth {
default "";
}
js_var $auth_token;
js_var $pep_credentials;
server {
listen *:80;
listen [::]:80;
server_name {{ conductor_server_name }};
{% if conductor_server_name != conductor_ui_server_name %}
# When there is the possibility to separate vhosts for ui and apis as in local-site deployment forward also / to swagger docs
location / {
proxy_set_header Host $host;
proxy_pass http://_conductor-server;
}
{% endif %}
location /health {
proxy_set_header Host $host;
proxy_pass http://_conductor-server;
}
location /api/ {
js_content pep.enforce;
}
location @backend {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_pass http://_conductor-server;
}
location /jwt_verify_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Authorization $pep_credentials;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token/introspect";
proxy_ignore_headers Cache-Control Expires Set-Cookie;
gunzip on;
proxy_cache token_responses; # Enable caching
proxy_cache_key $source_auth; # Cache for each source authentication
proxy_cache_lock on; # Duplicate tokens must wait
proxy_cache_valid 200 10s; # How long to use each response
}
location /jwt_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Authorization $pep_credentials;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
gunzip on;
}
location /permission_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_set_header Authorization "Bearer $auth_token";
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
gunzip on;
}