conductor 3.0.4 with oauth2 and pep
This commit is contained in:
parent
2d4585d086
commit
288482d5b6
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
infrastructure: dev
|
|
||||||
conductor_workers_server: http://conductor-dev.int.d4science.net/api
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
infrastructure: pre
|
|
||||||
conductor_workers_server: https://conductor.pre.d4science.org/api
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
infrastructure: prod
|
|
||||||
conductor_workers_server: https://conductor.d4science.org/api
|
|
|
@ -1,5 +1,5 @@
|
||||||
[dev_infra:children]
|
[dev_infra:children]
|
||||||
nw_cluster
|
dev_cluster
|
||||||
|
|
||||||
[nw_cluster]
|
[dev_cluster]
|
||||||
nubis1.int.d4science.net
|
conductor.dev.d4science.org
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
[nw_cluster_infra:children]
|
||||||
|
nw_cluster
|
||||||
|
|
||||||
|
[nw_cluster]
|
||||||
|
nubis1.int.d4science.net
|
|
@ -1,14 +0,0 @@
|
||||||
[common]
|
|
||||||
loglevel = info
|
|
||||||
server = http://conductor-server:8080/api
|
|
||||||
threads = 1
|
|
||||||
pollrate = 1
|
|
||||||
|
|
||||||
[pymail]
|
|
||||||
server=smtp-relay.d4science.org
|
|
||||||
user=conductor_dev
|
|
||||||
password=d20d6ea975b01bc
|
|
||||||
protocol=starttls
|
|
||||||
port=587
|
|
||||||
|
|
||||||
|
|
|
@ -1,188 +0,0 @@
|
||||||
-- V1__initial_schema.sql--
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
-- SCHEMA FOR METADATA DAO
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
CREATE TABLE meta_event_handler (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
name varchar(255) NOT NULL,
|
|
||||||
event varchar(255) NOT NULL,
|
|
||||||
active boolean NOT NULL,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE INDEX event_handler_name_index ON meta_event_handler (name);
|
|
||||||
CREATE INDEX event_handler_event_index ON meta_event_handler (event);
|
|
||||||
|
|
||||||
CREATE TABLE meta_task_def (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
name varchar(255) NOT NULL,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_task_def_name ON meta_task_def (name);
|
|
||||||
|
|
||||||
CREATE TABLE meta_workflow_def (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
name varchar(255) NOT NULL,
|
|
||||||
version int NOT NULL,
|
|
||||||
latest_version int NOT NULL DEFAULT 0,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_name_version ON meta_workflow_def (name,version);
|
|
||||||
CREATE INDEX workflow_def_name_index ON meta_workflow_def (name);
|
|
||||||
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
-- SCHEMA FOR EXECUTION DAO
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
CREATE TABLE event_execution (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
event_handler_name varchar(255) NOT NULL,
|
|
||||||
event_name varchar(255) NOT NULL,
|
|
||||||
message_id varchar(255) NOT NULL,
|
|
||||||
execution_id varchar(255) NOT NULL,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,message_id);
|
|
||||||
|
|
||||||
CREATE TABLE poll_data (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
queue_name varchar(255) NOT NULL,
|
|
||||||
domain varchar(255) NOT NULL,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_poll_data ON poll_data (queue_name,domain);
|
|
||||||
CREATE INDEX ON poll_data (queue_name);
|
|
||||||
|
|
||||||
CREATE TABLE task_scheduled (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
task_key varchar(255) NOT NULL,
|
|
||||||
task_id varchar(255) NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_workflow_id_task_key ON task_scheduled (workflow_id,task_key);
|
|
||||||
|
|
||||||
CREATE TABLE task_in_progress (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
task_def_name varchar(255) NOT NULL,
|
|
||||||
task_id varchar(255) NOT NULL,
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
in_progress_status boolean NOT NULL DEFAULT false,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_task_def_task_id1 ON task_in_progress (task_def_name,task_id);
|
|
||||||
|
|
||||||
CREATE TABLE task (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
task_id varchar(255) NOT NULL,
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_task_id ON task (task_id);
|
|
||||||
|
|
||||||
CREATE TABLE workflow (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
correlation_id varchar(255),
|
|
||||||
json_data TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_workflow_id ON workflow (workflow_id);
|
|
||||||
|
|
||||||
CREATE TABLE workflow_def_to_workflow (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
workflow_def varchar(255) NOT NULL,
|
|
||||||
date_str varchar(60),
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_workflow_def_date_str ON workflow_def_to_workflow (workflow_def,date_str,workflow_id);
|
|
||||||
|
|
||||||
CREATE TABLE workflow_pending (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
workflow_type varchar(255) NOT NULL,
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_workflow_type_workflow_id ON workflow_pending (workflow_type,workflow_id);
|
|
||||||
CREATE INDEX workflow_type_index ON workflow_pending (workflow_type);
|
|
||||||
|
|
||||||
CREATE TABLE workflow_to_task (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
workflow_id varchar(255) NOT NULL,
|
|
||||||
task_id varchar(255) NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_workflow_to_task_id ON workflow_to_task (workflow_id,task_id);
|
|
||||||
CREATE INDEX workflow_id_index ON workflow_to_task (workflow_id);
|
|
||||||
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
-- SCHEMA FOR QUEUE DAO
|
|
||||||
-- --------------------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
CREATE TABLE queue (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
queue_name varchar(255) NOT NULL,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_queue_name ON queue (queue_name);
|
|
||||||
|
|
||||||
CREATE TABLE queue_message (
|
|
||||||
id SERIAL,
|
|
||||||
created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
queue_name varchar(255) NOT NULL,
|
|
||||||
message_id varchar(255) NOT NULL,
|
|
||||||
priority integer DEFAULT 0,
|
|
||||||
popped boolean DEFAULT false,
|
|
||||||
offset_time_seconds BIGINT,
|
|
||||||
payload TEXT,
|
|
||||||
PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX unique_queue_name_message_id ON queue_message (queue_name,message_id);
|
|
||||||
CREATE INDEX combo_queue_message ON queue_message (queue_name,popped,deliver_on,created_on);
|
|
||||||
|
|
||||||
-- V2__1009_Fix_PostgresExecutionDAO_Index.sql --
|
|
||||||
DROP INDEX IF EXISTS unique_event_execution;
|
|
||||||
|
|
||||||
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,execution_id);
|
|
||||||
|
|
||||||
-- V3__correlation_id_index.sql --
|
|
||||||
DROP INDEX IF EXISTS workflow_corr_id_index;
|
|
||||||
|
|
||||||
CREATE INDEX workflow_corr_id_index ON workflow (correlation_id);
|
|
||||||
|
|
||||||
-- V4__new_qm_index_with_priority.sql --
|
|
||||||
DROP INDEX IF EXISTS combo_queue_message;
|
|
||||||
|
|
||||||
CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on);
|
|
|
@ -1,42 +0,0 @@
|
||||||
# Servers.
|
|
||||||
conductor.jetty.server.enabled=true
|
|
||||||
conductor.grpc.server.enabled=false
|
|
||||||
|
|
||||||
# Database persistence model. Possible values are memory, redis, and dynomite.
|
|
||||||
# If ommitted, the persistence used is memory
|
|
||||||
#
|
|
||||||
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
|
|
||||||
# redis : non-Dynomite based redis instance
|
|
||||||
# dynomite : Dynomite cluster. Use this for HA configuration.
|
|
||||||
db=postgres
|
|
||||||
jdbc.url=jdbc:postgresql://postgresdb:5432/conductor
|
|
||||||
jdbc.username=conductor
|
|
||||||
jdbc.password=password
|
|
||||||
conductor.postgres.connection.pool.size.max=10
|
|
||||||
conductor.postgres.connection.pool.idle.min=2
|
|
||||||
flyway.enabled=false
|
|
||||||
|
|
||||||
|
|
||||||
# Elastic search instance type. Possible values are memory and external.
|
|
||||||
# If not specified, the instance type will be embedded in memory
|
|
||||||
#
|
|
||||||
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
|
|
||||||
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
|
|
||||||
# the server dies. Useful for more stable environments like staging or production.
|
|
||||||
workflow.elasticsearch.instanceType=external
|
|
||||||
|
|
||||||
# Transport address to elasticsearch
|
|
||||||
workflow.elasticsearch.url=elasticsearch:9300
|
|
||||||
|
|
||||||
# Name of the elasticsearch cluster
|
|
||||||
workflow.elasticsearch.index.name=conductor
|
|
||||||
|
|
||||||
# Additional modules (optional)
|
|
||||||
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
|
|
||||||
|
|
||||||
# Additional modules for metrics collection (optional)
|
|
||||||
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
|
|
||||||
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
|
|
||||||
|
|
||||||
# Load sample kitchen sink workflow
|
|
||||||
loadSample=false
|
|
|
@ -1,32 +0,0 @@
|
||||||
version: '3.6'
|
|
||||||
|
|
||||||
services:
|
|
||||||
base:
|
|
||||||
environment:
|
|
||||||
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
|
|
||||||
configs:
|
|
||||||
- source: base-config
|
|
||||||
target: /app/config.cfg
|
|
||||||
image: 'nubisware/nubisware-conductor-worker-py-base'
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 2
|
|
||||||
placement:
|
|
||||||
constraints: [node.role == worker]
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 5s
|
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
|
||||||
logging:
|
|
||||||
driver: "journald"
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
|
||||||
conductor-network:
|
|
||||||
|
|
||||||
configs:
|
|
||||||
base-config:
|
|
||||||
file: base-config.cfg
|
|
|
@ -1,144 +0,0 @@
|
||||||
version: '3.6'
|
|
||||||
|
|
||||||
|
|
||||||
services:
|
|
||||||
postgresdb:
|
|
||||||
image: postgres
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "conductor"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
POSTGRES_DB: "conductor"
|
|
||||||
configs:
|
|
||||||
- source: db-init
|
|
||||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
deploy:
|
|
||||||
replicas: 1
|
|
||||||
|
|
||||||
conductor-server:
|
|
||||||
environment:
|
|
||||||
- CONFIG_PROP=conductor-swarm-config.properties
|
|
||||||
image: nubisware/conductor-server
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 15s
|
|
||||||
max_attempts: 10
|
|
||||||
window: 120s
|
|
||||||
configs:
|
|
||||||
- source: swarm-config
|
|
||||||
target: /app/config/conductor-swarm-config.properties
|
|
||||||
|
|
||||||
# logging:
|
|
||||||
# driver: "journald"
|
|
||||||
|
|
||||||
elasticsearch:
|
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
|
|
||||||
environment:
|
|
||||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
|
||||||
- transport.host=0.0.0.0
|
|
||||||
- discovery.type=single-node
|
|
||||||
- xpack.security.enabled=false
|
|
||||||
networks:
|
|
||||||
conductor-network:
|
|
||||||
aliases:
|
|
||||||
- es
|
|
||||||
# logging:
|
|
||||||
# driver: "journald"
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 5s
|
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
|
||||||
|
|
||||||
conductor-ui:
|
|
||||||
environment:
|
|
||||||
- WF_SERVER=http://conductor-server:8080/api/
|
|
||||||
- AUTH_CONFIG_PATH=/app/config/auth.config
|
|
||||||
#image: nubisware/conductor-ui
|
|
||||||
#image: nubisware/conductor-ui_oauth2:2.31
|
|
||||||
image: conductor-ui_oauth2:2.31
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
configs:
|
|
||||||
- source: auth-config
|
|
||||||
target: /app/config/auth.config
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 15s
|
|
||||||
max_attempts: 10
|
|
||||||
window: 120s
|
|
||||||
|
|
||||||
base:
|
|
||||||
environment:
|
|
||||||
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
|
|
||||||
configs:
|
|
||||||
- source: base-config
|
|
||||||
target: /app/config.cfg
|
|
||||||
image: 'nubisware/nubisware-conductor-worker-py-base'
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 5s
|
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
|
||||||
# logging:
|
|
||||||
# driver: "journald"
|
|
||||||
|
|
||||||
pep:
|
|
||||||
image: nginx:1.19.8-alpine
|
|
||||||
networks:
|
|
||||||
- conductor-network
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
env_file:
|
|
||||||
- nginx.env
|
|
||||||
volumes:
|
|
||||||
- "${PWD}/keycloak.js:/etc/nginx/keycloak.js"
|
|
||||||
|
|
||||||
# to be uncommented for debug porposes
|
|
||||||
#command: [nginx-debug, '-g', 'daemon off;']
|
|
||||||
deploy:
|
|
||||||
replicas: 1
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 10s
|
|
||||||
# max_attempts: 3
|
|
||||||
window: 120s
|
|
||||||
configs:
|
|
||||||
- source: nginxconf
|
|
||||||
target: /etc/nginx/templates/default.conf.template
|
|
||||||
- source: nginxbaseconf
|
|
||||||
target: /etc/nginx/nginx.conf
|
|
||||||
|
|
||||||
networks:
|
|
||||||
conductor-network:
|
|
||||||
|
|
||||||
configs:
|
|
||||||
swarm-config:
|
|
||||||
file: ./conductor-swarm-config.properties
|
|
||||||
auth-config:
|
|
||||||
file: ./oauth2auth.cfg
|
|
||||||
db-init:
|
|
||||||
file: ./conductor-db-init.sql
|
|
||||||
base-config:
|
|
||||||
file: base-config.cfg
|
|
||||||
nginxconf:
|
|
||||||
file: ${PWD}/nginx.default.conf
|
|
||||||
nginxbaseconf:
|
|
||||||
file: ${PWD}/nginx.conf
|
|
|
@ -1,20 +0,0 @@
|
||||||
export default { introspectAccessToken };
|
|
||||||
|
|
||||||
function introspectAccessToken(r) {
|
|
||||||
r.error("Inside introspectAccessToken " + njs.dump(r.variables))
|
|
||||||
r.subrequest("/jwt_verify_request",
|
|
||||||
function(reply) {
|
|
||||||
if (reply.status == 200) {
|
|
||||||
var response = JSON.parse(reply.responseBody);
|
|
||||||
r.error("Response is " + reply.responseBody)
|
|
||||||
if (response.active == true) {
|
|
||||||
r.return(204); // Token is valid, return success code
|
|
||||||
} else {
|
|
||||||
r.return(403); // Token is invalid, return forbidden code
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.return(401); // Unexpected response, return 'auth required'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
load_module modules/ngx_http_js_module.so;
|
|
||||||
|
|
||||||
worker_processes 1;
|
|
||||||
|
|
||||||
events {
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
|
|
||||||
js_import keycloak.js;
|
|
||||||
proxy_cache_path /var/cache/nginx/keycloak keys_zone=token_responses:1m max_size=2m;
|
|
||||||
|
|
||||||
# js_import json_log.js;
|
|
||||||
# js_set $json_debug_log json_log.debugLog;
|
|
||||||
# log_format access_debug escape=none $json_debug_log; # Offload to njs
|
|
||||||
# access_log /var/log/nginx/access.log access_debug;
|
|
||||||
|
|
||||||
include /etc/nginx/conf.d/*.conf;
|
|
||||||
include /etc/nginx/sites-enabled/*;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
upstream _conductor-server {
|
|
||||||
ip_hash;
|
|
||||||
server conductor-server:8080;
|
|
||||||
}
|
|
||||||
|
|
||||||
upstream _conductor-ui {
|
|
||||||
ip_hash;
|
|
||||||
server conductor-ui:5000;
|
|
||||||
}
|
|
||||||
|
|
||||||
map $http_authorization $source_token {
|
|
||||||
default "";
|
|
||||||
"~*^Bearer\s+(?<token>[\S]+)$" $token;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
|
|
||||||
listen *:80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name conductor-server;
|
|
||||||
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
|
||||||
proxy_set_header X-Forwarded-Server $host;
|
|
||||||
proxy_set_header X-Forwarded-Port $server_port;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
#auth_request /jwt_verify;
|
|
||||||
proxy_pass http://_conductor-server;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /jwt_verify {
|
|
||||||
internal;
|
|
||||||
js_content keycloak.introspectAccessToken;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /jwt_verify_request {
|
|
||||||
internal;
|
|
||||||
proxy_method POST;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_set_header Host "127.0.0.1";
|
|
||||||
proxy_set_header Authorization "Basic Z2F5YV9wZXA6NWJiN2RjYWItN2NlNy00YTQ3LTlmNTUtZmE4MWFlYmNjM2I4";
|
|
||||||
proxy_set_header Content-Type "application/x-www-form-urlencoded";
|
|
||||||
proxy_set_body "token=$source_token&token_type_hint=access_token";
|
|
||||||
proxy_pass http://accounts.dev.d4science.org/auth/realms/master/protocol/openid-connect/token/introspect;
|
|
||||||
|
|
||||||
proxy_cache token_responses; # Enable caching
|
|
||||||
proxy_cache_key $source_token; # Cache for each access token
|
|
||||||
proxy_cache_lock on; # Duplicate tokens must wait
|
|
||||||
proxy_cache_valid 200 10s; # How long to use each response
|
|
||||||
proxy_ignore_headers Cache-Control Expires Set-Cookie;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
|
|
||||||
listen *:80 default_server;
|
|
||||||
listen [::]:80 default_server;
|
|
||||||
server_name conductor-ui;
|
|
||||||
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
|
||||||
proxy_set_header X-Forwarded-Server $host;
|
|
||||||
proxy_set_header X-Forwarded-Port $server_port;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
#auth_request /jwt_verify;
|
|
||||||
proxy_pass http://_conductor-ui;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
NGINX_PORT=80
|
|
||||||
CAMUNDA_PORT=8080
|
|
||||||
PGADMIN_PORT=80
|
|
||||||
KEYCLOAK_PORT=8080
|
|
|
@ -1,24 +0,0 @@
|
||||||
{
|
|
||||||
"strategy": "oauth2",
|
|
||||||
"strategySettings": {
|
|
||||||
"authorizationURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/auth",
|
|
||||||
"tokenURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/token",
|
|
||||||
"clientID": "conductor-ui",
|
|
||||||
"clientSecret": "b10d40b3-1f3c-47ce-baf4-d58bf6386eb3",
|
|
||||||
"callbackURL": "http://localhost/login/callback",
|
|
||||||
"logoutURL": "https://accounts.dev.d4science.org/auth/realms/d4science/protocol/openid-connect/logout",
|
|
||||||
"logoutCallbackURL": "http://localhost/logout/callback",
|
|
||||||
"roles": [ "admin", "viewer" ]
|
|
||||||
},
|
|
||||||
"cookieSecret": "b10d40b3-1f3c-47ce-baf4-d58bf6386eb3",
|
|
||||||
"audit": true,
|
|
||||||
"acl": [
|
|
||||||
"POST /(.*) admin",
|
|
||||||
"PUT /(.*) admin",
|
|
||||||
"DELETE /(.*) admin",
|
|
||||||
"GET /api/(.*) *",
|
|
||||||
"GET /(.*) viewer,admin"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
frontend http
|
|
||||||
|
|
||||||
bind *:80
|
|
||||||
|
|
||||||
mode http
|
|
||||||
option http-keep-alive
|
|
||||||
|
|
||||||
use_backend conductor-server_bck if { hdr_dom(host) -i conductor-server.local.net }
|
|
||||||
use_backend conductor-ui_bck if { hdr_dom(host) -i conductor-ui.local.net }
|
|
||||||
|
|
||||||
#
|
|
||||||
# Backends
|
|
||||||
#
|
|
||||||
|
|
||||||
backend conductor-server_bck
|
|
||||||
mode http
|
|
||||||
option httpchk
|
|
||||||
balance roundrobin
|
|
||||||
http-check send meth GET uri /api/health ver HTTP/1.1 hdr Host localhost
|
|
||||||
http-check expect rstatus (2|3)[0-9][0-9]
|
|
||||||
server-template conductor-server- 2 conductor-local_conductor-server:8080 check resolvers docker init-addr libc,none
|
|
||||||
backend conductor-ui_bck
|
|
||||||
mode http
|
|
||||||
option httpchk
|
|
||||||
balance roundrobin
|
|
||||||
http-check send meth GET uri / ver HTTP/1.1 hdr Host localhost
|
|
||||||
http-check expect rstatus (2|3)[0-9][0-9]
|
|
||||||
server-template conductor-ui- 2 conductor-local_conductor-ui:5000 check resolvers docker init-addr libc,none
|
|
|
@ -1,56 +0,0 @@
|
||||||
---
|
|
||||||
haproxy_latest_release: True
|
|
||||||
haproxy_version: 2.2
|
|
||||||
haproxy_repo_key: 'http://haproxy.debian.net/bernat.debian.org.gpg'
|
|
||||||
haproxy_debian_latest_repo: "deb http://haproxy.debian.net {{ ansible_lsb.codename }}-backports-{{ haproxy_version }} main"
|
|
||||||
haproxy_ubuntu_latest_repo: "ppa:vbernat/haproxy-{{ haproxy_version }}"
|
|
||||||
haproxy_pkg_state: present
|
|
||||||
haproxy_enabled: True
|
|
||||||
haproxy_loglevel: info
|
|
||||||
haproxy_k_bind_non_local_ip: True
|
|
||||||
haproxy_docker_container: False
|
|
||||||
haproxy_docker_version: '{{ haproxy_version }}.4'
|
|
||||||
haproxy_docker_image: 'haproxytech/haproxy-debian:{{ haproxy_version }}.4'
|
|
||||||
haproxy_docker_compose_dir: /srv/haproxy_swarm
|
|
||||||
haproxy_docker_restart_policy: 'on-failure'
|
|
||||||
|
|
||||||
haproxy_ha_with_keepalived: False
|
|
||||||
haproxy_docker_swarm_networks:
|
|
||||||
- '{{ docker_swarm_portainer_network }}'
|
|
||||||
haproxy_docker_swarm_additional_networks: []
|
|
||||||
|
|
||||||
haproxy_docker_swarm_haproxy_constraints:
|
|
||||||
- 'node.role == manager'
|
|
||||||
haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductorui-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }]
|
|
||||||
# - { acl_name: 'service', acl_rule: 'hdr_dom(host) -i service.example.com', stack_name: 'stack', service_name: 'service', service_replica_num: '1', service_port: '9999', service_overlay_network: 'service-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth HEAD uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]', allowed_networks: '192.168.1.0/24 192.168.2.0/24' }
|
|
||||||
|
|
||||||
haproxy_default_port: 80
|
|
||||||
haproxy_terminate_tls: False
|
|
||||||
haproxy_ssl_port: 443
|
|
||||||
haproxy_admin_port: 8880
|
|
||||||
haproxy_admin_socket: /run/haproxy/admin.sock
|
|
||||||
|
|
||||||
haproxy_install_additional_pkgs: False
|
|
||||||
haproxy_additional_pkgs:
|
|
||||||
- haproxyctl
|
|
||||||
- haproxy-log-analysis
|
|
||||||
|
|
||||||
haproxy_nagios_check: False
|
|
||||||
# It's a percentage
|
|
||||||
haproxy_nagios_check_w: 70
|
|
||||||
haproxy_nagios_check_c: 90
|
|
||||||
|
|
||||||
# Used by some other role as defaults, eg docker-swarm
|
|
||||||
haproxy_spread_checks: 5
|
|
||||||
haproxy_connect_timeout: 10s
|
|
||||||
haproxy_client_timeout: 120s
|
|
||||||
haproxy_server_timeout: 480s
|
|
||||||
haproxy_global_keepalive_timeout: 10s
|
|
||||||
haproxy_client_keepalive_timeout: 5184000s
|
|
||||||
haproxy_backend_maxconn: 2048
|
|
||||||
haproxy_check_interval: 3s
|
|
||||||
haproxy_check_timeout: 2s
|
|
||||||
haproxy_maxconns: 4096
|
|
||||||
|
|
||||||
haproxy_sysctl_conntrack_max: 131072
|
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
---
|
|
||||||
- name: Generate haproxy config
|
|
||||||
template:
|
|
||||||
src: templates/haproxy.cfg.j2
|
|
||||||
dest: "{{ target_path }}/haproxy.cfg"
|
|
||||||
|
|
||||||
- name: Generate haproxy-docker-swarm
|
|
||||||
template:
|
|
||||||
src: templates/haproxy-docker-swarm.yaml.j2
|
|
||||||
dest: "{{ target_path }}/haproxy-swarm.yaml"
|
|
||||||
|
|
||||||
- name: Create the overlay network that will be joined by the proxied services
|
|
||||||
docker_network:
|
|
||||||
name: '{{ haproxy_docker_overlay_network }}'
|
|
||||||
driver: overlay
|
|
||||||
scope: swarm
|
|
|
@ -1,56 +0,0 @@
|
||||||
version: '3.6'
|
|
||||||
|
|
||||||
services:
|
|
||||||
haproxy:
|
|
||||||
image: {{ haproxy_docker_image }}
|
|
||||||
configs:
|
|
||||||
- source: haproxy-config
|
|
||||||
target: /usr/local/etc/haproxy/haproxy.cfg
|
|
||||||
networks:
|
|
||||||
- {{ haproxy_docker_overlay_network }}
|
|
||||||
volumes:
|
|
||||||
#- /etc/haproxy:/usr/local/etc/haproxy:ro
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
ports:
|
|
||||||
- target: {{ haproxy_default_port }}
|
|
||||||
published: {{ haproxy_default_port }}
|
|
||||||
protocol: tcp
|
|
||||||
mode: host
|
|
||||||
- target: {{ haproxy_ssl_port }}
|
|
||||||
published: {{ haproxy_ssl_port }}
|
|
||||||
protocol: tcp
|
|
||||||
mode: host
|
|
||||||
- target: {{ haproxy_admin_port }}
|
|
||||||
published: {{ haproxy_admin_port }}
|
|
||||||
protocol: tcp
|
|
||||||
mode: host
|
|
||||||
dns: [127.0.0.11]
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
update_config:
|
|
||||||
parallelism: 1
|
|
||||||
delay: 20s
|
|
||||||
placement:
|
|
||||||
constraints:
|
|
||||||
- "node.role==manager"
|
|
||||||
restart_policy:
|
|
||||||
condition: {{ haproxy_docker_restart_policy}}
|
|
||||||
delay: 20s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 120s
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpus: '2.0'
|
|
||||||
memory: 768M
|
|
||||||
reservations:
|
|
||||||
cpus: '1.0'
|
|
||||||
memory: 384M
|
|
||||||
logging:
|
|
||||||
driver: 'journald'
|
|
||||||
configs:
|
|
||||||
haproxy-config:
|
|
||||||
file: ./haproxy.cfg
|
|
||||||
networks:
|
|
||||||
{{ haproxy_docker_overlay_network }}:
|
|
||||||
external: true
|
|
|
@ -1,75 +0,0 @@
|
||||||
global
|
|
||||||
log fd@2 local2
|
|
||||||
chroot /var/lib/haproxy
|
|
||||||
pidfile /var/run/haproxy.pid
|
|
||||||
maxconn 4000
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
stats socket /var/lib/haproxy/stats expose-fd listeners
|
|
||||||
master-worker
|
|
||||||
|
|
||||||
resolvers docker
|
|
||||||
nameserver dns1 127.0.0.11:53
|
|
||||||
resolve_retries 3
|
|
||||||
timeout resolve 1s
|
|
||||||
timeout retry 1s
|
|
||||||
hold other 10s
|
|
||||||
hold refused 10s
|
|
||||||
hold nx 10s
|
|
||||||
hold timeout 10s
|
|
||||||
hold valid 10s
|
|
||||||
hold obsolete 10s
|
|
||||||
|
|
||||||
defaults
|
|
||||||
timeout connect 10s
|
|
||||||
timeout client 30s
|
|
||||||
timeout server 30s
|
|
||||||
log global
|
|
||||||
monitor-uri /_haproxy_health_check
|
|
||||||
timeout http-keep-alive {{ haproxy_global_keepalive_timeout }}
|
|
||||||
timeout connect {{ haproxy_connect_timeout }}
|
|
||||||
timeout client {{ haproxy_client_timeout }}
|
|
||||||
timeout server {{ haproxy_server_timeout }}
|
|
||||||
timeout check {{ haproxy_check_timeout }}
|
|
||||||
timeout http-request 10s # slowloris protection
|
|
||||||
default-server inter 3s fall 2 rise 2 slowstart 60s
|
|
||||||
|
|
||||||
# Needed to preserve the stick tables
|
|
||||||
peers mypeers
|
|
||||||
peer local_haproxy 127.0.0.1:1024
|
|
||||||
|
|
||||||
frontend http
|
|
||||||
|
|
||||||
bind *:{{ haproxy_default_port }}
|
|
||||||
|
|
||||||
mode http
|
|
||||||
option http-keep-alive
|
|
||||||
|
|
||||||
{% for srv in haproxy_docker_swarm_additional_services %}
|
|
||||||
use_backend {{ srv.acl_name }}_bck if { {{ srv.acl_rule }} }
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Backends
|
|
||||||
#
|
|
||||||
|
|
||||||
{% for srv in haproxy_docker_swarm_additional_services %}
|
|
||||||
backend {{ srv.acl_name }}_bck
|
|
||||||
mode http
|
|
||||||
option httpchk
|
|
||||||
balance {{ srv.balance_type | default('roundrobin') }}
|
|
||||||
{% if srv.http_check_enabled is defined and srv.http_check_enabled %}
|
|
||||||
http-check send {{ srv.http_check }}
|
|
||||||
http-check expect {{ srv.http_check_expect }}
|
|
||||||
{% endif %}
|
|
||||||
{% if srv.stick_sessions %}
|
|
||||||
{% if srv.stick_on_cookie %}
|
|
||||||
cookie {{ srv.stick_cookie }}
|
|
||||||
{% else %}
|
|
||||||
stick on src
|
|
||||||
stick-table {{ srv.stick_table }}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
server-template {{ srv.service_name }}- {{ srv.service_replica_num }} {{ srv.stack_name }}_{{ srv.service_name }}:{{ srv.service_port }} {{ srv.backend_options | default('') }} check resolvers docker init-addr libc,none
|
|
||||||
{% endfor %}
|
|
|
@ -1,2 +0,0 @@
|
||||||
---
|
|
||||||
haproxy_docker_overlay_network: 'haproxy-public'
|
|
|
@ -1,4 +1,5 @@
|
||||||
---
|
---
|
||||||
|
conductor_server: http://conductor-server:8080/api
|
||||||
target_path: "/tmp/conductor_stack"
|
target_path: "/tmp/conductor_stack"
|
||||||
conductor_network: conductor-network
|
conductor_network: conductor-network
|
||||||
conductor_db: postgres
|
conductor_db: postgres
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
64626566636365626537356334643266623431393062653538313362663664643538373137383732
|
||||||
|
3865313539323962666165386336373633303066353634350a633231666363356238326130373561
|
||||||
|
64336633343066323464343136336333613233396164623537623762323261383537633137363234
|
||||||
|
3534616537666436370a326331316232613839656436646164343236356233646233623430623665
|
||||||
|
64663265313964653063333133326636353162353532626364316433373030396434616434333631
|
||||||
|
32333936666339303963636438616164343063393364666332323831363833323131653666303534
|
||||||
|
62376162313737303036366532316163383434333130643363613166333433616331393636613635
|
||||||
|
30613132636261613165613136356638353532663634393431383739363636323961323538383566
|
||||||
|
62316261373262663335393632376366383031306563343531643632633234346531633164303038
|
||||||
|
62633039363961613538393832623039383237623663366430313238653030376263613032663437
|
||||||
|
38386635303332386630386133366232343966393761643635313833316536386634633563326639
|
||||||
|
38626435393963643866363663353834343333346139363565353161663737393166613938353562
|
||||||
|
62623661326237353163623138386432376531353864383036613931643164333633646431353162
|
||||||
|
3666373032663262623438353236626436303132306436326636
|
|
@ -1,5 +1,12 @@
|
||||||
---
|
---
|
||||||
conductor_replicas: 2
|
conductor_replicas: 1
|
||||||
|
conductor_ui_replicas: 1
|
||||||
|
conductor_image: nubisware/conductor-server:3.0.4
|
||||||
|
conductor_ui_image: nubisware/conductor-ui-oauth2:3.0.4
|
||||||
conductor_config: conductor-swarm-config.properties
|
conductor_config: conductor-swarm-config.properties
|
||||||
conductor_config_template: "{{ conductor_config }}.j2"
|
conductor_config_template: "{{ conductor_config }}.j2"
|
||||||
|
|
||||||
|
#nw_cluster_conductor_ui_secret: in vault
|
||||||
|
#dev_conductor_ui_secret: in vault
|
||||||
|
#pre_conductor_ui_secret: in vault
|
||||||
|
#prod_conductor_ui_secret: in vault
|
||||||
|
|
|
@ -4,28 +4,14 @@
|
||||||
src: templates/conductor-swarm.yaml.j2
|
src: templates/conductor-swarm.yaml.j2
|
||||||
dest: "{{ target_path }}/conductor-swarm.yaml"
|
dest: "{{ target_path }}/conductor-swarm.yaml"
|
||||||
|
|
||||||
- name: Generate auth config
|
- name: Generate local auth config
|
||||||
|
when: conductor_auth is defined
|
||||||
template:
|
template:
|
||||||
src: templates/auth.cfg.j2
|
src: "templates/{{ conductor_auth }}_auth.cfg.j2"
|
||||||
dest: "{{ target_path }}/auth.cfg"
|
dest: "{{ target_path }}/auth.cfg"
|
||||||
|
|
||||||
- name: Generate conductor config from dynomite seeds
|
|
||||||
when: conductor_db is defined and conductor_db == 'dynomite'
|
|
||||||
vars:
|
|
||||||
seeds: "{{ lookup('file', '{{ target_path}}/seeds.list').splitlines() }}"
|
|
||||||
template:
|
|
||||||
src: "templates/{{ conductor_config_template }}"
|
|
||||||
dest: "{{ target_path }}/{{ conductor_config }}"
|
|
||||||
|
|
||||||
- name: Generate conductor config for JDBC DB
|
- name: Generate conductor config for JDBC DB
|
||||||
when: conductor_db is not defined or conductor_db != 'dynomite'
|
when: conductor_db is not defined or conductor_db != 'dynomite'
|
||||||
template:
|
template:
|
||||||
src: "templates/{{ conductor_config_template }}"
|
src: "templates/{{ conductor_config_template }}"
|
||||||
dest: "{{ target_path }}/{{ conductor_config }}"
|
dest: "{{ target_path }}/{{ conductor_config }}"
|
||||||
|
|
||||||
- name: Copy conductor SQL schema init for JDBC DB
|
|
||||||
when: (conductor_db is not defined or conductor_db != 'dynomite') and init_db
|
|
||||||
template:
|
|
||||||
src: "templates/conductor-db-init-{{ conductor_db }}.sql.j2"
|
|
||||||
dest: "{{ target_path }}/conductor-db-init.sql"
|
|
||||||
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
{
|
|
||||||
"strategy": "local",
|
|
||||||
"strategySettings":{
|
|
||||||
"users": {
|
|
||||||
"admin": {
|
|
||||||
"hash": "098039dd5e84e486f83eadefc31ce038ccc90d6d62323528181049371c9460b4",
|
|
||||||
"salt": "salt",
|
|
||||||
"displayName": "Admin",
|
|
||||||
"email": "marco.lettere@nubisware.com",
|
|
||||||
"roles": [ "admin", "viewer" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"audit": true,
|
|
||||||
"acl": [
|
|
||||||
"POST /(.*) admin",
|
|
||||||
"PUT /(.*) admin",
|
|
||||||
"DELETE /(.*) admin",
|
|
||||||
"GET /api/(.*) viewer",
|
|
||||||
"GET /(.*) *"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,92 +1,31 @@
|
||||||
# Servers.
|
# Servers.
|
||||||
conductor.jetty.server.enabled=true
|
conductor.grpc-server.enabled=false
|
||||||
conductor.grpc.server.enabled=false
|
|
||||||
|
|
||||||
# Database persistence model. Possible values are memory, redis, and dynomite.
|
# Database persistence type.
|
||||||
# If ommitted, the persistence used is memory
|
|
||||||
#
|
|
||||||
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
|
|
||||||
# redis : non-Dynomite based redis instance
|
|
||||||
# dynomite : Dynomite cluster. Use this for HA configuration.
|
|
||||||
{% if conductor_db is not defined or conductor_db == 'postgres' %}
|
{% if conductor_db is not defined or conductor_db == 'postgres' %}
|
||||||
db=postgres
|
conductor.db.type=postgres
|
||||||
jdbc.url={{ postgres_jdbc_url }}
|
conductor.postgres.jdbcUrl={{ postgres_jdbc_url }}
|
||||||
jdbc.username={{ postgres_jdbc_user }}
|
conductor.postgres.jdbcUsername={{ postgres_jdbc_user }}
|
||||||
jdbc.password={{ postgres_jdbc_pass }}
|
conductor.postgres.jdbcPassword={{ postgres_jdbc_pass }}
|
||||||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
|
||||||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
|
||||||
flyway.enabled=false
|
|
||||||
|
|
||||||
{% elif conductor_db is defined and conductor_db == 'mysql' %}
|
|
||||||
db=mysql
|
|
||||||
jdbc.url={{ mysql_jdbc_url }}
|
|
||||||
jdbc.username={{ mysql_jdbc_user }}
|
|
||||||
jdbc.password={{ mysql_jdbc_pass }}
|
|
||||||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
|
||||||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
|
||||||
flyway.enabled=false
|
|
||||||
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
db=dynomite
|
|
||||||
|
|
||||||
# Dynomite Cluster details.
|
|
||||||
# format is host:port:rack separated by semicolon
|
|
||||||
workflow.dynomite.cluster.hosts={% set ns = namespace() %}
|
|
||||||
{% set ns.availability_zone = "" %}
|
|
||||||
{% for seed in seeds %}
|
|
||||||
{% set ns.seed_tokens = seed.split(':') %}
|
|
||||||
{% if ns.availability_zone == "" %}
|
|
||||||
{% set ns.availability_zone = ns.seed_tokens[2] %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ns.availability_zone == ns.seed_tokens[2] %}
|
|
||||||
{{ ns.seed_tokens[0] }}:8102:{{ ns.availability_zone }}{%- if not loop.last %};{%- endif %}
|
|
||||||
{% endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
|
|
||||||
|
|
||||||
# If you are running using dynomite, also add the following line to the property
|
|
||||||
# to set the rack/availability zone of the conductor server to be same as dynomite cluster config
|
|
||||||
EC2_AVAILABILTY_ZONE={{ ns.availability_zone }}
|
|
||||||
|
|
||||||
# Dynomite cluster name
|
|
||||||
workflow.dynomite.cluster.name=dyno1
|
|
||||||
|
|
||||||
# Namespace for the keys stored in Dynomite/Redis
|
|
||||||
workflow.namespace.prefix=conductor
|
|
||||||
|
|
||||||
# Namespace prefix for the dyno queues
|
|
||||||
workflow.namespace.queue.prefix=conductor_queues
|
|
||||||
|
|
||||||
# No. of threads allocated to dyno-queues (optional)
|
|
||||||
queues.dynomite.threads=3
|
|
||||||
|
|
||||||
# Non-quorum port used to connect to local redis. Used by dyno-queues.
|
|
||||||
# When using redis directly, set this to the same port as redis server
|
|
||||||
# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite.
|
|
||||||
queues.dynomite.nonQuorum.port=22122
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Elastic search instance type. Possible values are memory and external.
|
{% if conductor_db == 'imysql' %}
|
||||||
# If not specified, the instance type will be embedded in memory
|
conductor.db.type=mysql
|
||||||
#
|
conductor.mysql.jdbcUrl={{ mysql_jdbc_url }}
|
||||||
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
|
conductor.mysql.jdbcUsername={{ mysql_jdbc_user }}
|
||||||
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
|
conductor.mysql.jdbcPassword={{ mysql_jdbc_pass }}
|
||||||
# the server dies. Useful for more stable environments like staging or production.
|
{% endif %}
|
||||||
workflow.elasticsearch.instanceType=external
|
|
||||||
|
|
||||||
# Transport address to elasticsearch
|
# Hikari pool sizes are -1 by default and prevent startup
|
||||||
workflow.elasticsearch.url=elasticsearch:9300
|
conductor.{{conductor_db}}.connectionPoolMaxSize=10
|
||||||
|
conductor.{{conductor_db}}.connectionPoolMinIdle=2
|
||||||
|
|
||||||
# Name of the elasticsearch cluster
|
|
||||||
|
# Elastic search instance indexing is enabled.
|
||||||
|
conductor.indexing.enabled=true
|
||||||
|
conductor.elasticsearch.url=http://elasticsearch:9200
|
||||||
|
workflow.elasticsearch.instanceType=EXTERNAL
|
||||||
workflow.elasticsearch.index.name=conductor
|
workflow.elasticsearch.index.name=conductor
|
||||||
|
|
||||||
# Additional modules (optional)
|
|
||||||
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
|
|
||||||
|
|
||||||
# Additional modules for metrics collection (optional)
|
|
||||||
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
|
|
||||||
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
|
|
||||||
|
|
||||||
# Load sample kitchen sink workflow
|
# Load sample kitchen sink workflow
|
||||||
loadSample=false
|
loadSample=false
|
||||||
|
|
|
@ -6,28 +6,19 @@ services:
|
||||||
conductor-server:
|
conductor-server:
|
||||||
environment:
|
environment:
|
||||||
- CONFIG_PROP={{ conductor_config }}
|
- CONFIG_PROP={{ conductor_config }}
|
||||||
image: nubisware/conductor-server
|
image: "{{ conductor_image }}"
|
||||||
networks:
|
networks:
|
||||||
- {{ conductor_network }}
|
- {{ conductor_network }}
|
||||||
{% if clustered %}
|
|
||||||
- {{ haproxy_docker_overlay_network }}
|
|
||||||
{% endif %}
|
|
||||||
{% if not clustered %}
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
{% endif %}
|
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: {{ conductor_replicas }}
|
replicas: {{ conductor_replicas }}
|
||||||
{% if clustered %}
|
{% if infrastructure != 'local' %}
|
||||||
endpoint_mode: dnsrr
|
|
||||||
{% endif %}
|
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
window: 120s
|
||||||
configs:
|
configs:
|
||||||
- source: swarm-config
|
- source: swarm-config
|
||||||
|
@ -40,42 +31,33 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- WF_SERVER=http://conductor-server:8080/api/
|
- WF_SERVER=http://conductor-server:8080/api/
|
||||||
- AUTH_CONFIG_PATH=/app/config/auth.config
|
- AUTH_CONFIG_PATH=/app/config/auth.config
|
||||||
image: nubisware/conductor-ui
|
image: "{{ conductor_ui_image }}"
|
||||||
networks:
|
networks:
|
||||||
- {{ conductor_network }}
|
- {{ conductor_network }}
|
||||||
{% if clustered %}
|
{% if conductor_auth is defined %}
|
||||||
- {{ haproxy_docker_overlay_network }}
|
|
||||||
{% endif %}
|
|
||||||
{% if not clustered %}
|
|
||||||
ports:
|
|
||||||
- "5000:5000"
|
|
||||||
{% endif %}
|
|
||||||
configs:
|
configs:
|
||||||
- source: auth-config
|
- source: auth-config
|
||||||
target: /app/config/auth.config
|
target: /app/config/auth.config
|
||||||
|
{% endif %}
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: {{ conductor_replicas }}
|
replicas: {{ conductor_ui_replicas }}
|
||||||
{% if clustered %}
|
{% if infrastructure != 'local' %}
|
||||||
endpoint_mode: dnsrr
|
|
||||||
{% endif %}
|
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
window: 120s
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
{{ conductor_network }}:
|
{{ conductor_network }}:
|
||||||
{% if clustered %}
|
|
||||||
{{ haproxy_docker_overlay_network }}:
|
|
||||||
external: True
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
swarm-config:
|
swarm-config:
|
||||||
file: ./{{ conductor_config }}
|
file: ./{{ conductor_config }}
|
||||||
|
{% if conductor_auth is defined %}
|
||||||
auth-config:
|
auth-config:
|
||||||
file: ./auth.cfg
|
file: ./auth.cfg
|
||||||
|
{% endif %}
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
{
|
||||||
|
"strategy": "oauth2",
|
||||||
|
"strategySettings": {
|
||||||
|
"authorizationURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/auth",
|
||||||
|
"tokenURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token",
|
||||||
|
"clientID": "conductor-ui",
|
||||||
|
"clientSecret": "{{ conductor_ui_secret }}",
|
||||||
|
"callbackURL": "http://conductor-ui/login/callback",
|
||||||
|
"logoutURL": "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/logout",
|
||||||
|
"logoutCallbackURL": "http://conductor-ui/logout/callback",
|
||||||
|
"roles": [ "admin", "viewer" ]
|
||||||
|
},
|
||||||
|
"cookieSecret": "{{ conductor_ui_secret }}",
|
||||||
|
"audit": true,
|
||||||
|
"acl": [
|
||||||
|
"POST /(.*) admin",
|
||||||
|
"PUT /(.*) admin",
|
||||||
|
"DELETE /(.*) admin",
|
||||||
|
"GET /api/(.*) *",
|
||||||
|
"GET /(.*) viewer,admin"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ version: '3.6'
|
||||||
services:
|
services:
|
||||||
|
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
|
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
|
||||||
environment:
|
environment:
|
||||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
- transport.host=0.0.0.0
|
- transport.host=0.0.0.0
|
||||||
|
@ -18,9 +18,10 @@ services:
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: {{ elasticsearch_replicas }}
|
replicas: {{ elasticsearch_replicas }}
|
||||||
#endpoint_mode: dnsrr
|
{% if infrastructure != 'local' %}
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
|
|
|
@ -9,22 +9,14 @@ services:
|
||||||
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
|
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
|
||||||
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
|
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
|
||||||
MYSQL_DB: {{ mysql_jdbc_db }}
|
MYSQL_DB: {{ mysql_jdbc_db }}
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
- source: db-init
|
|
||||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
|
||||||
{% endif %}
|
|
||||||
networks:
|
networks:
|
||||||
- {{ conductor_network }}
|
- {{ conductor_network }}
|
||||||
deploy:
|
deploy:
|
||||||
replicas: {{ mysql_replicas }}
|
replicas: {{ mysql_replicas }}
|
||||||
|
{% if infrastructure == 'local' %}
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
{{ conductor_network }}:
|
{{ conductor_network }}:
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
db-init:
|
|
||||||
file: {{ target_path }}/conductor-db-init.sql
|
|
||||||
{% endif %}
|
|
||||||
|
|
|
@ -4,28 +4,17 @@ services:
|
||||||
|
|
||||||
{{ postgres_service_name }}:
|
{{ postgres_service_name }}:
|
||||||
image: postgres
|
image: postgres
|
||||||
ports:
|
|
||||||
- "5432:5432"
|
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "{{ postgres_jdbc_user }}"
|
POSTGRES_USER: "{{ postgres_jdbc_user }}"
|
||||||
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
|
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
|
||||||
POSTGRES_DB: "{{ postgres_jdbc_db }}"
|
POSTGRES_DB: "{{ postgres_jdbc_db }}"
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
- source: db-init
|
|
||||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
|
||||||
{% endif %}
|
|
||||||
networks:
|
networks:
|
||||||
- {{ conductor_network }}
|
- {{ conductor_network }}
|
||||||
deploy:
|
deploy:
|
||||||
replicas: {{ postgres_replicas }}
|
replicas: {{ postgres_replicas }}
|
||||||
|
{% if infrastructure != 'local' %}
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
networks:
|
networks:
|
||||||
{{ conductor_network }}:
|
{{ conductor_network }}:
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
db-init:
|
|
||||||
file: {{ target_path }}/conductor-db-init.sql
|
|
||||||
{% endif %}
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ version: '3.6'
|
||||||
services:
|
services:
|
||||||
|
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
|
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.15
|
||||||
environment:
|
environment:
|
||||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
- transport.host=0.0.0.0
|
- transport.host=0.0.0.0
|
||||||
|
@ -18,13 +18,13 @@ services:
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: {{ elasticsearch_replicas }}
|
replicas: {{ elasticsearch_replicas }}
|
||||||
#endpoint_mode: dnsrr
|
{% if infrastructure !== 'local' %}
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
window: 120s
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
use_jdbc: True
|
|
||||||
mysql_image_name: 'mariadb'
|
|
||||||
mysql_service_name: 'mysqldb'
|
|
||||||
mysql_replicas: 1
|
|
||||||
conductor_db: mysql
|
|
||||||
jdbc_user: conductor
|
|
||||||
jdbc_pass: password
|
|
||||||
jdbc_db: conductor
|
|
||||||
jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ mysql_jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true
|
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
- name: "Generate mysql swarm, image used: {{ mysql_image_name }}"
|
|
||||||
template:
|
|
||||||
src: templates/mysql-swarm.yaml.j2
|
|
||||||
dest: "{{ target_path }}/mysql-swarm.yaml"
|
|
|
@ -1,30 +0,0 @@
|
||||||
version: '3.6'
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
{{ mysql_service_name }}:
|
|
||||||
image: {{ mysql_image_name }}
|
|
||||||
environment:
|
|
||||||
MYSQL_USER: {{ mysql_jdbc_user }}
|
|
||||||
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
|
|
||||||
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
|
|
||||||
MYSQL_DB: {{ jdbc_db }}
|
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
- source: db-init
|
|
||||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
|
||||||
{% endif %}
|
|
||||||
networks:
|
|
||||||
- {{ conductor_network }}
|
|
||||||
deploy:
|
|
||||||
replicas: {{ mysql_replicas }}
|
|
||||||
placement:
|
|
||||||
constraints: [node.role == worker]
|
|
||||||
|
|
||||||
networks:
|
|
||||||
{{ conductor_network }}:
|
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
db-init:
|
|
||||||
file: {{ target_path }}/conductor-db-init.sql
|
|
||||||
{% endif %}
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
pep_port: 80
|
||||||
|
#pep_credentials: in vault
|
|
@ -0,0 +1,18 @@
|
||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
64326266376663626435303764383036326164336561303030633464333131373161336461326162
|
||||||
|
3630623962383434623834313737616435613966343637390a353562636535376539353538353061
|
||||||
|
31383933313734646661633661353836386266393565633830353137646431613431663236376137
|
||||||
|
6362613731386433370a323934373363383565323337373239666434353036333435613061666231
|
||||||
|
34346134313038366165343861316233326331393732353334303039616535633866366261623764
|
||||||
|
64653630353830396665363862633730396432633062363932636335643136613237373339613139
|
||||||
|
32386330396237396363383638653431663864333162303936663563313535343536376139343166
|
||||||
|
65316137326533306335643833353338376533633733393333623131316662386334653633353332
|
||||||
|
66363734636237363637303863323638393339373364356433666466643038343930616166396136
|
||||||
|
61666232356337613431316662353766393335306232616266363933653032656536386562373665
|
||||||
|
36306234636233313237623364613033313261393431633139343037623732646431663139383062
|
||||||
|
30396230326432376335303362356534613937306431636361663335376265363139366463656638
|
||||||
|
31386430393037306233663161333465616236383134623961343732383633386665333231363036
|
||||||
|
64346630633337643961653464613336623363303737626231326138633736656530653138326537
|
||||||
|
35386161656461313034343935353863333635376664386565393530633532613965646662363634
|
||||||
|
65396137646561353534373536616162353631383130363466356637643639323333643964323638
|
||||||
|
3535
|
|
@ -0,0 +1,34 @@
|
||||||
|
---
|
||||||
|
- name: Generate PEP config
|
||||||
|
template:
|
||||||
|
src: templates/nginx.conf.j2
|
||||||
|
dest: "{{ target_path }}/nginx.conf"
|
||||||
|
|
||||||
|
- name: Generate PEP default config
|
||||||
|
when: pep is defined and pep == True
|
||||||
|
template:
|
||||||
|
src: templates/nginx.default.conf.j2
|
||||||
|
dest: "{{ target_path }}/nginx.default.conf"
|
||||||
|
|
||||||
|
- name: Generate PEP default config
|
||||||
|
when: pep is not defined or pep == False
|
||||||
|
template:
|
||||||
|
src: templates/nginx.default.conf.nopep.j2
|
||||||
|
dest: "{{ target_path }}/nginx.default.conf"
|
||||||
|
|
||||||
|
- name: Generate config.js
|
||||||
|
when: pep is defined and pep == True
|
||||||
|
template:
|
||||||
|
src: templates/config.js.j2
|
||||||
|
dest: "{{ target_path }}/config.js"
|
||||||
|
|
||||||
|
- name: Generate pep.js
|
||||||
|
when: pep is defined and pep == True
|
||||||
|
template:
|
||||||
|
src: templates/pep.js.j2
|
||||||
|
dest: "{{ target_path }}/pep.js"
|
||||||
|
|
||||||
|
- name: Generate pep-docker-swarm
|
||||||
|
template:
|
||||||
|
src: templates/pep-swarm.yaml.j2
|
||||||
|
dest: "{{ target_path }}/pep-swarm.yaml"
|
|
@ -0,0 +1,98 @@
|
||||||
|
export default { config };
|
||||||
|
|
||||||
|
var config = {
|
||||||
|
"pep-credentials" : "{{ pep_credentials }}",
|
||||||
|
"hosts" : [
|
||||||
|
{
|
||||||
|
"host": "conductor-server",
|
||||||
|
"allow-basic-auth" : true,
|
||||||
|
"pip" : [ { claim: "context", operator : "get-contexts" } ],
|
||||||
|
"paths" : [
|
||||||
|
{
|
||||||
|
"name" : "metadata",
|
||||||
|
"path" : "^/api/metadata/(taskdefs|workflow)/?.*$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "GET",
|
||||||
|
"scopes" : ["get","list"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "metadata.taskdefs",
|
||||||
|
"path" : "^/api/metadata/taskdefs/?.*$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "POST",
|
||||||
|
"scopes" : ["create"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "DELETE",
|
||||||
|
"scopes" : ["delete"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "PUT",
|
||||||
|
"scopes" : ["update"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "metadata.workflow",
|
||||||
|
"path" : "^/api/metadata/workflow/?.*$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "POST",
|
||||||
|
"scopes" : ["create"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "DELETE",
|
||||||
|
"scopes" : ["delete"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "PUT",
|
||||||
|
"scopes" : ["update"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "workflow",
|
||||||
|
"path" : "^/api/workflow/?.*$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "GET",
|
||||||
|
"scopes" : ["get"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "POST",
|
||||||
|
"scopes" : ["start"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"method" : "DELETE",
|
||||||
|
"scopes" : ["terminate"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "task",
|
||||||
|
"path" : "^/api/tasks/poll/.+$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "GET",
|
||||||
|
"scopes" : ["poll"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "task",
|
||||||
|
"path" : "^/api/tasks$",
|
||||||
|
"methods" : [
|
||||||
|
{
|
||||||
|
"method" : "POST",
|
||||||
|
"scopes" : ["update"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
load_module modules/ngx_http_js_module.so;
|
||||||
|
|
||||||
|
worker_processes 1;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
|
||||||
|
{% if pep is defined and pep == True %}
|
||||||
|
js_import pep.js;
|
||||||
|
js_set $authorization pep.enforce;
|
||||||
|
proxy_cache_path /var/cache/nginx/pep keys_zone=token_responses:1m max_size=2m;
|
||||||
|
{% endif %}
|
||||||
|
include /etc/nginx/conf.d/*.conf;
|
||||||
|
include /etc/nginx/sites-enabled/*;
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
upstream _conductor-server {
|
||||||
|
ip_hash;
|
||||||
|
server conductor-server:8080;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream _conductor-ui {
|
||||||
|
ip_hash;
|
||||||
|
server conductor-ui:5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_authorization $source_auth {
|
||||||
|
default "";
|
||||||
|
}
|
||||||
|
|
||||||
|
js_var $auth_token;
|
||||||
|
js_var $pep_credentials;
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
listen *:80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name conductor-server;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://_conductor-server;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/ {
|
||||||
|
js_content pep.enforce;
|
||||||
|
}
|
||||||
|
|
||||||
|
location @backend {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Server $host;
|
||||||
|
proxy_set_header X-Forwarded-Port $server_port;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Original-URI $request_uri;
|
||||||
|
proxy_pass http://_conductor-server;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /jwt_verify_request {
|
||||||
|
internal;
|
||||||
|
proxy_method POST;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Authorization $pep_credentials;
|
||||||
|
proxy_set_header Content-Type "application/x-www-form-urlencoded";
|
||||||
|
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token/introspect";
|
||||||
|
|
||||||
|
proxy_cache token_responses; # Enable caching
|
||||||
|
proxy_cache_key $source_auth; # Cache for each source authentication
|
||||||
|
proxy_cache_lock on; # Duplicate tokens must wait
|
||||||
|
proxy_cache_valid 200 10s; # How long to use each response
|
||||||
|
proxy_ignore_headers Cache-Control Expires Set-Cookie;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /jwt_request {
|
||||||
|
internal;
|
||||||
|
proxy_method POST;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Authorization $pep_credentials;
|
||||||
|
proxy_set_header Content-Type "application/x-www-form-urlencoded";
|
||||||
|
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
|
||||||
|
}
|
||||||
|
|
||||||
|
location /permission_request {
|
||||||
|
internal;
|
||||||
|
proxy_method POST;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Content-Type "application/x-www-form-urlencoded";
|
||||||
|
proxy_set_header Authorization "Bearer $auth_token";
|
||||||
|
proxy_pass "{{ iam_host }}/auth/realms/d4science/protocol/openid-connect/token";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
listen *:80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
server_name conductor-ui;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Server $host;
|
||||||
|
proxy_set_header X-Forwarded-Port $server_port;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://_conductor-ui;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,40 @@
|
||||||
|
upstream _conductor-server {
|
||||||
|
ip_hash;
|
||||||
|
server conductor-server:8080;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream _conductor-ui {
|
||||||
|
ip_hash;
|
||||||
|
server conductor-ui:5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
listen *:80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name conductor-server;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://_conductor-server;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
listen *:80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
server_name conductor-ui;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Server $host;
|
||||||
|
proxy_set_header X-Forwarded-Port $server_port;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://_conductor-ui;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
version: '3.6'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
pep:
|
||||||
|
image: nginx:stable-alpine
|
||||||
|
networks:
|
||||||
|
- conductor-network
|
||||||
|
ports:
|
||||||
|
- "{{ pep_port }}:80"
|
||||||
|
{% if pep is defined and pep == True %}
|
||||||
|
volumes:
|
||||||
|
- "./pep.js:/etc/nginx/pep.js"
|
||||||
|
- "./config.js:/etc/nginx/config.js"
|
||||||
|
{% endif %}
|
||||||
|
deploy:
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 10s
|
||||||
|
window: 120s
|
||||||
|
configs:
|
||||||
|
- source: nginxconf
|
||||||
|
target: /etc/nginx/templates/default.conf.template
|
||||||
|
- source: nginxbaseconf
|
||||||
|
target: /etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
networks:
|
||||||
|
conductor-network:
|
||||||
|
|
||||||
|
configs:
|
||||||
|
nginxconf:
|
||||||
|
file: ./nginx.default.conf
|
||||||
|
nginxbaseconf:
|
||||||
|
file: ./nginx.conf
|
|
@ -0,0 +1,299 @@
|
||||||
|
export default { enforce };
|
||||||
|
|
||||||
|
import defaultExport from './config.js';
|
||||||
|
|
||||||
|
function log(c, s){
|
||||||
|
c.request.error(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
function enforce(r) {
|
||||||
|
|
||||||
|
var context = {
|
||||||
|
request: r ,
|
||||||
|
config : defaultExport["config"],
|
||||||
|
backend : (defaultExport.backend ? defaultExport.backend : "@backend"),
|
||||||
|
export_backend_headers : (defaultExport.backendHeaders ? defaultExport.backendHeaders : wkf.export_backend_headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
log(context, "Inside NJS enforce for " + r.method + " @ " + r.headersIn.host + "/" + r.uri)
|
||||||
|
|
||||||
|
context = computeProtection(context)
|
||||||
|
|
||||||
|
wkf.run(wkf.build(context), context)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ######## WORKFLOW FUNCTIONS ###############
|
||||||
|
var wkf = {
|
||||||
|
|
||||||
|
build : (context)=>{
|
||||||
|
var actions = [
|
||||||
|
"export_pep_credentials",
|
||||||
|
"parse_authentication",
|
||||||
|
"check_authentication",
|
||||||
|
"export_authn_token",
|
||||||
|
"pip",
|
||||||
|
"pdp",
|
||||||
|
"export_backend_headers",
|
||||||
|
"pass"
|
||||||
|
]
|
||||||
|
return actions
|
||||||
|
},
|
||||||
|
|
||||||
|
run : (actions, context) => {
|
||||||
|
context.request.error("Starting workflow with " + njs.dump(actions))
|
||||||
|
var w = actions.reduce(
|
||||||
|
(acc, f) => acc.then(typeof(f) === "function" ? f : wkf[f]),
|
||||||
|
Promise.resolve().then(()=>context)
|
||||||
|
)
|
||||||
|
w.catch(e => { context.request.error(njs.dump(e)); context.request.return(401)} )
|
||||||
|
},
|
||||||
|
|
||||||
|
export_pep_credentials : exportPepCredentials,
|
||||||
|
export_authn_token : exportAuthToken,
|
||||||
|
export_backend_headers : c=>c,
|
||||||
|
parse_authentication : parseAuthentication,
|
||||||
|
check_authentication : checkAuthentication,
|
||||||
|
verify_token : verifyToken,
|
||||||
|
request_token : requestToken,
|
||||||
|
pip : pipExecutor,
|
||||||
|
pdp : pdpExecutor,
|
||||||
|
pass : pass,
|
||||||
|
|
||||||
|
//PIP utilities
|
||||||
|
"get-path-component" : (c, i) => c.request.uri.split("/")[i],
|
||||||
|
"get-token-field" : getTokenField,
|
||||||
|
"get-contexts" : (c) => {
|
||||||
|
var ra = c.authn.verified_token["resource_access"]
|
||||||
|
if(ra){
|
||||||
|
var out = [];
|
||||||
|
for(var k in ra){
|
||||||
|
if(ra[k].roles && ra[k].roles.length !== 0) out.push(k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTokenField(context, f){
|
||||||
|
return context.authn.verified_token[f]
|
||||||
|
}
|
||||||
|
|
||||||
|
function exportVariable(context, name, value){
|
||||||
|
context.request.variables[name] = value
|
||||||
|
log(context, "Exported variables:" + njs.dump(context.request.variables))
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
|
||||||
|
function exportPepCredentials(context){
|
||||||
|
if(!context.config["pep-credentials"]){
|
||||||
|
throw new Error("Need PEP credentials")
|
||||||
|
}
|
||||||
|
return exportVariable(context, "pep_credentials", "Basic " + context.config["pep-credentials"])
|
||||||
|
}
|
||||||
|
|
||||||
|
function exportAuthToken(context){
|
||||||
|
return exportVariable(context, "auth_token", context.authn.token)
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkAuthentication(context){
|
||||||
|
return context.authn.type === "bearer" ? wkf.verify_token(context) : wkf.request_token(context)
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseAuthentication(context){
|
||||||
|
context.request.log("Inside parseAuthentication")
|
||||||
|
var incomingauth = context.request.headersIn["Authorization"]
|
||||||
|
|
||||||
|
if(!incomingauth) throw new Error("Authentication required");
|
||||||
|
|
||||||
|
var arr = incomingauth.trim().replace(/\s\s+/g, " ").split(" ")
|
||||||
|
if(arr.length != 2) throw new Error("Unknown authentication scheme");
|
||||||
|
|
||||||
|
var type = arr[0].toLowerCase()
|
||||||
|
if(type === "basic" && context.authz.host && context.authz.host["allow-basic-auth"]){
|
||||||
|
var unamepass = Buffer.from(arr[1], 'base64').toString().split(":")
|
||||||
|
if(unamepass.length != 2) return null;
|
||||||
|
context.authn = { type : type, raw : arr[1], user : unamepass[0], password : unamepass[1]}
|
||||||
|
return context
|
||||||
|
}else if(type === "bearer"){
|
||||||
|
context.authn = { type : type, raw : arr[1], token : arr[1]}
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
throw new Error("Unknown authentication scheme");
|
||||||
|
}
|
||||||
|
|
||||||
|
function verifyToken(context){
|
||||||
|
log(context, "Inside verifyToken")
|
||||||
|
var options = {
|
||||||
|
"body" : "token=" + context.authn.token + "&token_type_hint=access_token"
|
||||||
|
}
|
||||||
|
return context.request.subrequest("/jwt_verify_request", options)
|
||||||
|
.then(reply=>{
|
||||||
|
if (reply.status === 200) {
|
||||||
|
var response = JSON.parse(reply.responseBody);
|
||||||
|
if (response.active === true) {
|
||||||
|
return response
|
||||||
|
} else {
|
||||||
|
throw new Error("Unauthorized")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new Error("Unauthorized")
|
||||||
|
}
|
||||||
|
}).then(verified_token => {
|
||||||
|
context.authn.verified_token =
|
||||||
|
JSON.parse(Buffer.from(context.authn.token.split('.')[1], 'base64url').toString())
|
||||||
|
return context
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function requestToken(context){
|
||||||
|
log(context, "Inside requestToken")
|
||||||
|
var options = {
|
||||||
|
"body" : "grant_type=password&username="+context.authn.user+"&password="+context.authn.password
|
||||||
|
}
|
||||||
|
return context.request.subrequest("/jwt_request", options)
|
||||||
|
.then(reply=>{
|
||||||
|
if (reply.status === 200) {
|
||||||
|
var response = JSON.parse(reply.responseBody);
|
||||||
|
context.authn.token = response.access_token
|
||||||
|
context.authn.verified_token =
|
||||||
|
JSON.parse(Buffer.from(context.authn.token.split('.')[1], 'base64url').toString())
|
||||||
|
return context
|
||||||
|
} else {
|
||||||
|
throw new Error("Unauthorized")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function pipExecutor(context){
|
||||||
|
log(context, "Inside extra claims PIP")
|
||||||
|
context.authz.pip.forEach(extra =>{
|
||||||
|
//call extra claim pip function
|
||||||
|
try{
|
||||||
|
var operator = extra.operator
|
||||||
|
var result = wkf[operator](context, extra.args)
|
||||||
|
//ensure array and add to extra_claims
|
||||||
|
if(!(result instanceof Array)) result = [result]
|
||||||
|
if(!context.extra_claims) context.extra_claims = {};
|
||||||
|
context.extra_claims[extra.claim] = result
|
||||||
|
} catch (error){
|
||||||
|
log(context, "Skipping invalid extra claim " + njs.dump(error))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
log(context, "Extra claims are " + njs.dump(context.extra_claims))
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
|
||||||
|
function pdpExecutor(context){
|
||||||
|
log(context, "Inside PDP")
|
||||||
|
return context.authz.pdp(context)
|
||||||
|
}
|
||||||
|
|
||||||
|
function umaCall(context){
|
||||||
|
log(context, "Inside UMA call")
|
||||||
|
var options = { "body" : computePermissionRequestBody(context) };
|
||||||
|
return context.request.subrequest("/permission_request", options)
|
||||||
|
.then(reply =>{
|
||||||
|
if(reply.status === 200){
|
||||||
|
return context
|
||||||
|
}else{
|
||||||
|
throw new Error("Response for authorization request is not ok " + reply.status + " " + njs.dump(reply.responseBody))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function pass(context){
|
||||||
|
log(context, "Inside pass");
|
||||||
|
if(typeof(context.backend) === "string") context.request.internalRedirect(context.backend);
|
||||||
|
else if (typeof(context.backend) === "function") context.request.internalRedirect(context.backend(context))
|
||||||
|
return context;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ######## AUTHORIZATION PART ###############
|
||||||
|
function computePermissionRequestBody(context){
|
||||||
|
|
||||||
|
if(!context.authz.host || !context.authz.path ){
|
||||||
|
throw new Error("Enforcemnt mode is always enforcing. Host or path not found...")
|
||||||
|
}
|
||||||
|
|
||||||
|
var audience = computeAudience(context)
|
||||||
|
var grant = "grant_type=urn:ietf:params:oauth:grant-type:uma-ticket"
|
||||||
|
var mode = "response_mode=decision"
|
||||||
|
var permissions = computePermissions(context)
|
||||||
|
var extra = ""
|
||||||
|
if(context.extra_claims){
|
||||||
|
extra =
|
||||||
|
"claim_token_format=urn:ietf:params:oauth:token-type:jwt&claim_token=" +
|
||||||
|
JSON.stringify(context.extra_claims).toString("base64url")
|
||||||
|
}
|
||||||
|
var body = audience + "&" + grant + "&" + permissions + "&" + mode + "&" + extra
|
||||||
|
context.request.error("Computed permission request body is " + body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
function computeAudience(context){
|
||||||
|
var aud = context.request.headersIn.host
|
||||||
|
if(context.authz.host){
|
||||||
|
aud = context.authz.host.audience||context.authz.host.host
|
||||||
|
}
|
||||||
|
return "audience=" + aud
|
||||||
|
}
|
||||||
|
|
||||||
|
function computePermissions(context){
|
||||||
|
var resource = context.request.uri
|
||||||
|
if(context.authz.path){
|
||||||
|
resource = context.authz.path.name||context.authz.path.path
|
||||||
|
}
|
||||||
|
var scopes = []
|
||||||
|
if(context.authz.method && context.authz.method.scopes){
|
||||||
|
scopes = context.authz.method.scopes
|
||||||
|
}
|
||||||
|
if(scopes.length > 0){
|
||||||
|
return scopes.map(s=>"permission=" + resource + "#" + s).join("&")
|
||||||
|
}
|
||||||
|
return "permission=" + resource
|
||||||
|
}
|
||||||
|
|
||||||
|
function getPath(hostconfig, incomingpath, incomingmethod){
|
||||||
|
var paths = hostconfig.paths || []
|
||||||
|
var matchingpaths = paths
|
||||||
|
.filter(p => {return incomingpath.match(p.path) != null})
|
||||||
|
.reduce((acc, p) => {
|
||||||
|
if (!p.methods || p.methods.length === 0) acc.weak.push({ path: p});
|
||||||
|
else{
|
||||||
|
var matchingmethods = p.methods.filter(m=>m.method.toUpperCase() === incomingmethod)
|
||||||
|
if(matchingmethods.length > 0) acc.strong.push({ method : matchingmethods[0], path: p});
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
}, { strong: [], weak: []})
|
||||||
|
return matchingpaths.strong.concat(matchingpaths.weak)[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
function getHost(config, host){
|
||||||
|
var matching = config.hosts.filter(h=>{
|
||||||
|
return h.host === host
|
||||||
|
})
|
||||||
|
return matching.length > 0 ? matching[0] : null
|
||||||
|
}
|
||||||
|
|
||||||
|
function computeProtection(context){
|
||||||
|
log(context, "Getting by host " + context.request.headersIn.host)
|
||||||
|
context.authz = {}
|
||||||
|
context.authz.host = getHost(context.config, context.request.headersIn.host)
|
||||||
|
if(context.authz.host !== null){
|
||||||
|
context.authz.pip = context.authz.host.pip ? context.authz.host.pip : [];
|
||||||
|
context.authz.pdp = context.authz.host.pdp ? context.authz.host.pdp : umaCall;
|
||||||
|
var pathandmethod = getPath(context.authz.host, context.request.uri, context.request.method);
|
||||||
|
if(pathandmethod){
|
||||||
|
context.authz.path = pathandmethod.path;
|
||||||
|
context.authz.pip = context.authz.path.pip ? context.authz.pip.concat(context.authz.path.pip) : context.authz.pip;
|
||||||
|
context.authz.pdp = context.authz.path.pdp ? context.authz.path.pdp : context.authz.pdp;
|
||||||
|
context.authz.method = pathandmethod.method;
|
||||||
|
if(context.authz.method){
|
||||||
|
context.authz.pip = context.authz.method.pip ? context.authz.pip.concat(context.authz.method.pip) : context.authz.pip;
|
||||||
|
context.authz.pdp = context.authz.method.pdp ? context.authz.method.pdp : context.authz.pdp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log(context, "Leaving protection computation: ")
|
||||||
|
return context
|
||||||
|
}
|
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
use_jdbc: True
|
|
||||||
postgres_service_name: 'postgresdb'
|
|
||||||
postgres_replicas: 1
|
|
||||||
conductor_db: postgres
|
|
||||||
postgres_jdbc_user: conductor
|
|
||||||
postgres_jdbc_pass: password
|
|
||||||
postgres_jdbc_db: conductor
|
|
||||||
postgres_jdbc_url: jdbc:postgresql://{{ postgres_service_name }}:5432/{{ postgres_jdbc_db }}
|
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
- name: Generate postgres swarm
|
|
||||||
template:
|
|
||||||
src: templates/postgres-swarm.yaml.j2
|
|
||||||
dest: "{{ target_path }}/postgres-swarm.yaml"
|
|
|
@ -1,31 +0,0 @@
|
||||||
version: '3.6'
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
{{ postgres_service_name }}:
|
|
||||||
image: postgres
|
|
||||||
ports:
|
|
||||||
- "5432:5432"
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "{{ postgres_jdbc_user }}"
|
|
||||||
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
|
|
||||||
POSTGRES_DB: "{{ postgres_jdbc_db }}"
|
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
- source: db-init
|
|
||||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
|
||||||
{% endif %}
|
|
||||||
networks:
|
|
||||||
- {{ conductor_network }}
|
|
||||||
deploy:
|
|
||||||
replicas: {{ postgres_replicas }}
|
|
||||||
placement:
|
|
||||||
constraints: [node.role == worker]
|
|
||||||
|
|
||||||
networks:
|
|
||||||
{{ conductor_network }}:
|
|
||||||
{% if init_db %}
|
|
||||||
configs:
|
|
||||||
db-init:
|
|
||||||
file: {{ target_path }}/conductor-db-init.sql
|
|
||||||
{% endif %}
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
conductor_workers_server: http://conductor-dev.int.d4science.net/api
|
conductor_workers_server: http://conductor-server:8080/api
|
||||||
|
|
||||||
conductor_workers: [ { service: 'base', image: 'nubisware/nubisware-conductor-worker-py-base', replicas: 2, threads: 1, pollrate: 1 }]
|
conductor_workers: [ { service: 'base', image: 'nubisware/nubisware-conductor-worker-py-base', replicas: 2, threads: 1, pollrate: 1 }]
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ pymail_user: "conductor_{{ infrastructure }}"
|
||||||
pymail_protocol: "starttls"
|
pymail_protocol: "starttls"
|
||||||
pymail_port: "587"
|
pymail_port: "587"
|
||||||
|
|
||||||
|
#smtp_local_pwd: ""
|
||||||
#smtp_dev_pwd: in vault
|
#smtp_dev_pwd: in vault
|
||||||
#smtp_pre_pwd: in vault
|
#smtp_pre_pwd: in vault
|
||||||
#smtp_prod_pwd: in vault
|
#smtp_prod_pwd: in vault
|
||||||
|
|
|
@ -14,12 +14,13 @@ services:
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: {{ workers.replicas }}
|
replicas: {{ workers.replicas }}
|
||||||
|
{% if infrastructure != 'local' %}
|
||||||
placement:
|
placement:
|
||||||
constraints: [node.role == worker]
|
constraints: [node.role == worker]
|
||||||
|
{% endif %}
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
max_attempts: 3
|
|
||||||
window: 120s
|
window: 120s
|
||||||
logging:
|
logging:
|
||||||
driver: "journald"
|
driver: "journald"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
[common]
|
[common]
|
||||||
loglevel = {{ item.get('loglevel', 'info') }}
|
loglevel = {{ item.get('loglevel', 'info') }}
|
||||||
#server =
|
#server =
|
||||||
threads = 3
|
threads = 1
|
||||||
pollrate = .1
|
pollrate = 1
|
||||||
{% if "domain" in item.keys() %}
|
{% if "domain" in item.keys() %}
|
||||||
domain={{ item.domain }}
|
domain={{ item.domain }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -1,18 +1,25 @@
|
||||||
---
|
---
|
||||||
- hosts: dev_infra
|
- hosts: dev_infra
|
||||||
vars_files:
|
vars_files:
|
||||||
- roles/workers/defaults/smtp.yaml
|
- roles/workers/defaults/smtp.yaml
|
||||||
|
- roles/pep/defaults/pep_credentials.yaml
|
||||||
|
- roles/conductor/defaults/conductor_ui_secrets.yaml
|
||||||
vars:
|
vars:
|
||||||
cluster_check: true
|
infrastructure: "dev"
|
||||||
infrastructure: dev
|
|
||||||
pymail_password: "{{ smtp_dev_pwd }}"
|
pymail_password: "{{ smtp_dev_pwd }}"
|
||||||
conductor_workers_server: http://conductor-server:8080/api
|
iam_host: https://accounts.dev.d4science.org
|
||||||
|
pep: True
|
||||||
|
pep_credentials: "{{ dev_pep_credentials }}"
|
||||||
|
conductor_ui_secret: "{{ dev_conductor_ui_secret }}"
|
||||||
|
conductor_auth: oauth2
|
||||||
|
conductor_replicas: 2
|
||||||
|
conductor_ui_replicas: 2
|
||||||
roles:
|
roles:
|
||||||
- common
|
- common
|
||||||
- databases
|
- databases
|
||||||
- conductor
|
- conductor
|
||||||
- workers
|
- workers
|
||||||
- cluster-replacement
|
- pep
|
||||||
tasks:
|
tasks:
|
||||||
- name: Start {{ db|default('postgres', true) }} and es
|
- name: Start {{ db|default('postgres', true) }} and es
|
||||||
docker_stack:
|
docker_stack:
|
||||||
|
@ -36,12 +43,12 @@
|
||||||
- "{{ target_path }}/conductor-swarm.yaml"
|
- "{{ target_path }}/conductor-swarm.yaml"
|
||||||
when: dry is not defined or not dry|bool
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
- name: Start haproxy
|
- name: Start pep
|
||||||
docker_stack:
|
docker_stack:
|
||||||
name: 'conductor-{{ infrastructure }}'
|
name: 'conductor-{{ infrastructure }}'
|
||||||
state: present
|
state: present
|
||||||
compose:
|
compose:
|
||||||
- "{{ target_path }}/haproxy-swarm.yaml"
|
- "{{ target_path }}/pep-swarm.yaml"
|
||||||
when: dry is not defined or not dry|bool
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
- name: Start workers
|
- name: Start workers
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
vars_files:
|
||||||
|
- roles/workers/defaults/smtp.yaml
|
||||||
|
vars:
|
||||||
|
infrastructure: "local"
|
||||||
|
pymail_password: "{{ smtp_local_pwd }}"
|
||||||
|
smtp_local_pwd: ""
|
||||||
|
roles:
|
||||||
|
- common
|
||||||
|
- databases
|
||||||
|
- conductor
|
||||||
|
- workers
|
||||||
|
- pep
|
||||||
|
tasks:
|
||||||
|
- name: Start {{ db|default('postgres', true) }} and es
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/{{ db|default('postgres', true) }}-swarm.yaml"
|
||||||
|
- "{{ target_path }}/elasticsearch-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Waiting for databases
|
||||||
|
pause:
|
||||||
|
seconds: 20
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start conductor
|
||||||
|
docker_stack:
|
||||||
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/conductor-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start pep
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/pep-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start workers
|
||||||
|
docker_stack:
|
||||||
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/conductor-workers-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
|
@ -0,0 +1,61 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
#- hosts: nw_cluster_infra
|
||||||
|
vars_files:
|
||||||
|
- roles/workers/defaults/smtp.yaml
|
||||||
|
- roles/pep/defaults/pep_credentials.yaml
|
||||||
|
- roles/conductor/defaults/conductor_ui_secrets.yaml
|
||||||
|
vars:
|
||||||
|
infrastructure: "nw-cluster"
|
||||||
|
pymail_password: "{{ smtp_dev_pwd }}"
|
||||||
|
iam_host: https://accounts.dev.d4science.org
|
||||||
|
pep: True
|
||||||
|
pep_credentials: "{{ nw_cluster_pep_credentials }}"
|
||||||
|
conductor_ui_secret: "{{ nw_cluster_conductor_ui_secret }}"
|
||||||
|
conductor_auth: oauth2
|
||||||
|
conductor_replicas: 2
|
||||||
|
conductor_ui_replicas: 2
|
||||||
|
roles:
|
||||||
|
- common
|
||||||
|
- databases
|
||||||
|
- conductor
|
||||||
|
- workers
|
||||||
|
- pep
|
||||||
|
tasks:
|
||||||
|
- name: Start {{ db|default('postgres', true) }} and es
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/{{ db|default('postgres', true) }}-swarm.yaml"
|
||||||
|
- "{{ target_path }}/elasticsearch-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Waiting for databases
|
||||||
|
pause:
|
||||||
|
seconds: 20
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start conductor
|
||||||
|
docker_stack:
|
||||||
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/conductor-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start pep
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/pep-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start workers
|
||||||
|
docker_stack:
|
||||||
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/conductor-workers-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
|
@ -1,19 +1,26 @@
|
||||||
---
|
---
|
||||||
- hosts: pre_infra
|
- hosts: localhost
|
||||||
|
#- hosts: pre_infra
|
||||||
vars_files:
|
vars_files:
|
||||||
- roles/external-postgres/defaults/vault_main.yaml
|
|
||||||
- roles/workers/defaults/smtp.yaml
|
- roles/workers/defaults/smtp.yaml
|
||||||
|
- roles/pep/defaults/pep_credentials.yaml
|
||||||
|
- roles/conductor/defaults/conductor_ui_secrets.yaml
|
||||||
vars:
|
vars:
|
||||||
cluster_check: true
|
infrastructure: "pre"
|
||||||
infrastructure: pre
|
|
||||||
pymail_password: "{{ smtp_pre_pwd }}"
|
pymail_password: "{{ smtp_pre_pwd }}"
|
||||||
conductor_workers_server: http://conductor-server:8080/api
|
iam_host: https://accounts.pre.d4science.org
|
||||||
|
pep: True
|
||||||
|
pep_credentials: "{{ pre_pep_credentials }}"
|
||||||
|
conductor_ui_secret: "{{ pre_conductor_ui_secret }}"
|
||||||
|
conductor_auth: oauth2
|
||||||
|
conductor_replicas: 2
|
||||||
|
conductor_ui_replicas: 2
|
||||||
roles:
|
roles:
|
||||||
- common
|
- common
|
||||||
- databases
|
- databases
|
||||||
- cluster-replacement
|
|
||||||
- conductor
|
- conductor
|
||||||
- workers
|
- workers
|
||||||
|
- pep
|
||||||
tasks:
|
tasks:
|
||||||
- name: Start {{ db|default('postgres', true) }} and es
|
- name: Start {{ db|default('postgres', true) }} and es
|
||||||
docker_stack:
|
docker_stack:
|
||||||
|
@ -26,7 +33,7 @@
|
||||||
|
|
||||||
- name: Waiting for databases
|
- name: Waiting for databases
|
||||||
pause:
|
pause:
|
||||||
seconds: 20
|
seconds: 20
|
||||||
when: dry is not defined or not dry|bool
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
- name: Start conductor
|
- name: Start conductor
|
||||||
|
@ -36,7 +43,15 @@
|
||||||
compose:
|
compose:
|
||||||
- "{{ target_path }}/conductor-swarm.yaml"
|
- "{{ target_path }}/conductor-swarm.yaml"
|
||||||
when: dry is not defined or not dry|bool
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start pep
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/pep-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
- name: Start workers
|
- name: Start workers
|
||||||
docker_stack:
|
docker_stack:
|
||||||
name: "conductor-{{ infrastructure }}"
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
|
|
@ -1,20 +1,27 @@
|
||||||
---
|
---
|
||||||
- hosts: prod_infra
|
- hosts: localhost
|
||||||
|
#- hosts: prod_infra
|
||||||
vars_files:
|
vars_files:
|
||||||
- roles/external-postgres/defaults/vault_main.yaml
|
|
||||||
- roles/workers/defaults/smtp.yaml
|
- roles/workers/defaults/smtp.yaml
|
||||||
|
- roles/pep/defaults/pep_credentials.yaml
|
||||||
|
- roles/conductor/defaults/conductor_ui_secrets.yaml
|
||||||
vars:
|
vars:
|
||||||
cluster_check: true
|
infrastructure: "prod"
|
||||||
conductor_workers_server: http://conductor-server:8080/api
|
|
||||||
pymail_password: "{{ smtp_prod_pwd }}"
|
pymail_password: "{{ smtp_prod_pwd }}"
|
||||||
postgres_jdbc_pass: '{{ jdbc_pass }}'
|
iam_host: https://accounts.d4science.org
|
||||||
|
pep: True
|
||||||
|
pep_credentials: "{{ prod_pep_credentials }}"
|
||||||
|
conductor_ui_secret: "{{ prod_conductor_ui_secret }}"
|
||||||
|
conductor_auth: oauth2
|
||||||
|
conductor_replicas: 2
|
||||||
|
conductor_ui_replicas: 2
|
||||||
roles:
|
roles:
|
||||||
- common
|
- common
|
||||||
- external-postgres
|
- elasticsearch
|
||||||
- elasticsearch
|
- external-postgres
|
||||||
- cluster-replacement
|
|
||||||
- conductor
|
- conductor
|
||||||
- workers
|
- workers
|
||||||
|
- pep
|
||||||
tasks:
|
tasks:
|
||||||
- name: Start es
|
- name: Start es
|
||||||
docker_stack:
|
docker_stack:
|
||||||
|
@ -36,7 +43,15 @@
|
||||||
compose:
|
compose:
|
||||||
- "{{ target_path }}/conductor-swarm.yaml"
|
- "{{ target_path }}/conductor-swarm.yaml"
|
||||||
when: dry is not defined or not dry|bool
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
|
- name: Start pep
|
||||||
|
docker_stack:
|
||||||
|
name: 'conductor-{{ infrastructure }}'
|
||||||
|
state: present
|
||||||
|
compose:
|
||||||
|
- "{{ target_path }}/pep-swarm.yaml"
|
||||||
|
when: dry is not defined or not dry|bool
|
||||||
|
|
||||||
- name: Start workers
|
- name: Start workers
|
||||||
docker_stack:
|
docker_stack:
|
||||||
name: "conductor-{{ infrastructure }}"
|
name: "conductor-{{ infrastructure }}"
|
||||||
|
|
56
site.yaml
56
site.yaml
|
@ -1,56 +0,0 @@
|
||||||
---
|
|
||||||
- hosts: pre_infra:dev_infra
|
|
||||||
roles:
|
|
||||||
- common
|
|
||||||
- role: cluster-replacement
|
|
||||||
when:
|
|
||||||
- cluster_replacement is defined and cluster_replacement|bool
|
|
||||||
- role: databases
|
|
||||||
- conductor
|
|
||||||
- role: workers
|
|
||||||
when:
|
|
||||||
- no_workers is not defined or not no_workers|bool
|
|
||||||
tasks:
|
|
||||||
- name: Start {{ db|default('postgres', true) }} and es
|
|
||||||
docker_stack:
|
|
||||||
name: 'conductor-{{ infrastructure }}'
|
|
||||||
state: present
|
|
||||||
compose:
|
|
||||||
- "{{ target_path }}/{{ db|default('postgres', true) }}-swarm.yaml"
|
|
||||||
- "{{ target_path }}/elasticsearch-swarm.yaml"
|
|
||||||
when: dry is not defined or not dry|bool
|
|
||||||
|
|
||||||
- name: Waiting for databases
|
|
||||||
pause:
|
|
||||||
seconds: 10
|
|
||||||
when: dry is not defined or not dry|bool
|
|
||||||
|
|
||||||
- name: Start conductor
|
|
||||||
docker_stack:
|
|
||||||
name: 'conductor-{{ infrastructure }}'
|
|
||||||
state: present
|
|
||||||
compose:
|
|
||||||
- "{{ target_path }}/conductor-swarm.yaml"
|
|
||||||
when: dry is not defined or not dry|bool
|
|
||||||
|
|
||||||
- name: Start haproxy
|
|
||||||
docker_stack:
|
|
||||||
name: 'conductor-{{ infrastructure }}'
|
|
||||||
state: present
|
|
||||||
compose:
|
|
||||||
- "{{ target_path }}/haproxy-swarm.yaml"
|
|
||||||
when:
|
|
||||||
- dry is not defined or not dry|bool
|
|
||||||
- cluster_replacement is defined
|
|
||||||
- cluster_replacement|bool
|
|
||||||
|
|
||||||
- name: Start workers
|
|
||||||
docker_stack:
|
|
||||||
name: 'conductor-{{ infrastructure }}'
|
|
||||||
state: present
|
|
||||||
compose:
|
|
||||||
- "{{ target_path }}/conductor-workers-swarm.yaml"
|
|
||||||
when:
|
|
||||||
- dry is not defined or not dry|bool
|
|
||||||
- no_workers is not defined or not no_workers|bool
|
|
||||||
|
|
Loading…
Reference in New Issue