added local-site with pep

master
dcore94 3 years ago
parent ca0b62bcfe
commit bf1bf82c0f

@ -0,0 +1,23 @@
{
"strategy": "local",
"strategySettings":{
"users": {
"admin": {
"hash": "098039dd5e84e486f83eadefc31ce038ccc90d6d62323528181049371c9460b4",
"salt": "salt",
"displayName": "Admin",
"email": "marco.lettere@nubisware.com",
"roles": [ "admin", "viewer" ]
}
}
},
"audit": true,
"acl": [
"POST /(.*) admin",
"PUT /(.*) admin",
"DELETE /(.*) admin",
"GET /api/(.*) viewer",
"GET /(.*) *"
]
}

@ -0,0 +1,14 @@
[common]
loglevel = info
server = http://conductor-server:8080/api
threads = 1
pollrate = 1
[pymail]
server=smtp-relay.d4science.org
user=conductor_dev
password=d20d6ea975b01bc
protocol=starttls
port=587

@ -0,0 +1,188 @@
-- V1__initial_schema.sql--
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR METADATA DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE meta_event_handler (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
event varchar(255) NOT NULL,
active boolean NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE INDEX event_handler_name_index ON meta_event_handler (name);
CREATE INDEX event_handler_event_index ON meta_event_handler (event);
CREATE TABLE meta_task_def (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_def_name ON meta_task_def (name);
CREATE TABLE meta_workflow_def (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
name varchar(255) NOT NULL,
version int NOT NULL,
latest_version int NOT NULL DEFAULT 0,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_name_version ON meta_workflow_def (name,version);
CREATE INDEX workflow_def_name_index ON meta_workflow_def (name);
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR EXECUTION DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE event_execution (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
event_handler_name varchar(255) NOT NULL,
event_name varchar(255) NOT NULL,
message_id varchar(255) NOT NULL,
execution_id varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,message_id);
CREATE TABLE poll_data (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
domain varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_poll_data ON poll_data (queue_name,domain);
CREATE INDEX ON poll_data (queue_name);
CREATE TABLE task_scheduled (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
task_key varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_id_task_key ON task_scheduled (workflow_id,task_key);
CREATE TABLE task_in_progress (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
task_def_name varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
workflow_id varchar(255) NOT NULL,
in_progress_status boolean NOT NULL DEFAULT false,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_def_task_id1 ON task_in_progress (task_def_name,task_id);
CREATE TABLE task (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
task_id varchar(255) NOT NULL,
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_task_id ON task (task_id);
CREATE TABLE workflow (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
correlation_id varchar(255),
json_data TEXT NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_id ON workflow (workflow_id);
CREATE TABLE workflow_def_to_workflow (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_def varchar(255) NOT NULL,
date_str varchar(60),
workflow_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_def_date_str ON workflow_def_to_workflow (workflow_def,date_str,workflow_id);
CREATE TABLE workflow_pending (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_type varchar(255) NOT NULL,
workflow_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_type_workflow_id ON workflow_pending (workflow_type,workflow_id);
CREATE INDEX workflow_type_index ON workflow_pending (workflow_type);
CREATE TABLE workflow_to_task (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
workflow_id varchar(255) NOT NULL,
task_id varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_workflow_to_task_id ON workflow_to_task (workflow_id,task_id);
CREATE INDEX workflow_id_index ON workflow_to_task (workflow_id);
-- --------------------------------------------------------------------------------------------------------------
-- SCHEMA FOR QUEUE DAO
-- --------------------------------------------------------------------------------------------------------------
CREATE TABLE queue (
id SERIAL,
created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_queue_name ON queue (queue_name);
CREATE TABLE queue_message (
id SERIAL,
created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
queue_name varchar(255) NOT NULL,
message_id varchar(255) NOT NULL,
priority integer DEFAULT 0,
popped boolean DEFAULT false,
offset_time_seconds BIGINT,
payload TEXT,
PRIMARY KEY (id)
);
CREATE UNIQUE INDEX unique_queue_name_message_id ON queue_message (queue_name,message_id);
CREATE INDEX combo_queue_message ON queue_message (queue_name,popped,deliver_on,created_on);
-- V2__1009_Fix_PostgresExecutionDAO_Index.sql --
DROP INDEX IF EXISTS unique_event_execution;
CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,execution_id);
-- V3__correlation_id_index.sql --
DROP INDEX IF EXISTS workflow_corr_id_index;
CREATE INDEX workflow_corr_id_index ON workflow (correlation_id);
-- V4__new_qm_index_with_priority.sql --
DROP INDEX IF EXISTS combo_queue_message;
CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on);

@ -0,0 +1,42 @@
# Servers.
conductor.jetty.server.enabled=true
conductor.grpc.server.enabled=false
# Database persistence model. Possible values are memory, redis, and dynomite.
# If ommitted, the persistence used is memory
#
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
# redis : non-Dynomite based redis instance
# dynomite : Dynomite cluster. Use this for HA configuration.
db=postgres
jdbc.url=jdbc:postgresql://postgresdb:5432/conductor
jdbc.username=conductor
jdbc.password=password
conductor.postgres.connection.pool.size.max=10
conductor.postgres.connection.pool.idle.min=2
flyway.enabled=false
# Elastic search instance type. Possible values are memory and external.
# If not specified, the instance type will be embedded in memory
#
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
# the server dies. Useful for more stable environments like staging or production.
workflow.elasticsearch.instanceType=external
# Transport address to elasticsearch
workflow.elasticsearch.url=elasticsearch:9300
# Name of the elasticsearch cluster
workflow.elasticsearch.index.name=conductor
# Additional modules (optional)
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
# Additional modules for metrics collection (optional)
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
# Load sample kitchen sink workflow
loadSample=false

@ -0,0 +1,32 @@
version: '3.6'
services:
base:
environment:
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
configs:
- source: base-config
target: /app/config.cfg
image: 'nubisware/nubisware-conductor-worker-py-base'
networks:
- conductor-network
deploy:
mode: replicated
replicas: 2
placement:
constraints: [node.role == worker]
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
logging:
driver: "journald"
networks:
conductor-network:
configs:
base-config:
file: base-config.cfg

@ -0,0 +1,142 @@
version: '3.6'
services:
postgresdb:
image: postgres
environment:
POSTGRES_USER: "conductor"
POSTGRES_PASSWORD: "password"
POSTGRES_DB: "conductor"
configs:
- source: db-init
target: "/docker-entrypoint-initdb.d/db-init.sql"
networks:
- conductor-network
deploy:
replicas: 1
conductor-server:
environment:
- CONFIG_PROP=conductor-swarm-config.properties
image: nubisware/conductor-server
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 15s
max_attempts: 10
window: 120s
configs:
- source: swarm-config
target: /app/config/conductor-swarm-config.properties
logging:
driver: "journald"
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
- discovery.type=single-node
- xpack.security.enabled=false
networks:
conductor-network:
aliases:
- es
logging:
driver: "journald"
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
conductor-ui:
environment:
- WF_SERVER=http://conductor-server:8080/api/
- AUTH_CONFIG_PATH=/app/config/auth.config
image: nubisware/conductor-ui
networks:
- conductor-network
configs:
- source: auth-config
target: /app/config/auth.config
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 15s
max_attempts: 10
window: 120s
base:
environment:
CONDUCTOR_SERVER: http://conductor-dev.int.d4science.net/api
configs:
- source: base-config
target: /app/config.cfg
image: 'nubisware/nubisware-conductor-worker-py-base'
networks:
- conductor-network
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
logging:
driver: "journald"
pep:
image: nginx:1.19.8-alpine
networks:
- conductor-network
ports:
- "80:80"
env_file:
- nginx.env
volumes:
- "${PWD}/keycloak.js:/etc/nginx/keycloak.js"
# to be uncommented for debug porposes
#command: [nginx-debug, '-g', 'daemon off;']
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
# max_attempts: 3
window: 120s
configs:
- source: nginxconf
target: /etc/nginx/templates/default.conf.template
- source: nginxbaseconf
target: /etc/nginx/nginx.conf
networks:
conductor-network:
configs:
swarm-config:
file: ./conductor-swarm-config.properties
auth-config:
file: ./auth.cfg
db-init:
file: ./conductor-db-init.sql
base-config:
file: base-config.cfg
nginxconf:
file: ${PWD}/nginx.default.conf
nginxbaseconf:
file: ${PWD}/nginx.conf

@ -0,0 +1,20 @@
export default { introspectAccessToken };
function introspectAccessToken(r) {
r.error("Inside introspectAccessToken " + njs.dump(r.variables))
r.subrequest("/jwt_verify_request",
function(reply) {
if (reply.status == 200) {
var response = JSON.parse(reply.responseBody);
r.error("Response is " + reply.responseBody)
if (response.active == true) {
r.return(204); // Token is valid, return success code
} else {
r.return(403); // Token is invalid, return forbidden code
}
} else {
r.return(401); // Unexpected response, return 'auth required'
}
}
);
}

@ -0,0 +1,22 @@
load_module modules/ngx_http_js_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
http {
js_import keycloak.js;
proxy_cache_path /var/cache/nginx/keycloak keys_zone=token_responses:1m max_size=2m;
# js_import json_log.js;
# js_set $json_debug_log json_log.debugLog;
# log_format access_debug escape=none $json_debug_log; # Offload to njs
# access_log /var/log/nginx/access.log access_debug;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

@ -0,0 +1,78 @@
upstream _conductor-server {
ip_hash;
server conductor-server:8080;
}
upstream _conductor-ui {
ip_hash;
server conductor-ui:5000;
}
map $http_authorization $source_token {
default "";
"~*^Bearer\s+(?<token>[\S]+)$" $token;
}
server {
listen *:80;
listen [::]:80;
server_name conductor-server;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
#auth_request /jwt_verify;
proxy_pass http://_conductor-server;
}
location = /jwt_verify {
internal;
js_content keycloak.introspectAccessToken;
}
location /jwt_verify_request {
internal;
proxy_method POST;
proxy_http_version 1.1;
proxy_set_header Host "127.0.0.1";
proxy_set_header Authorization "Basic Z2F5YV9wZXA6NWJiN2RjYWItN2NlNy00YTQ3LTlmNTUtZmE4MWFlYmNjM2I4";
proxy_set_header Content-Type "application/x-www-form-urlencoded";
proxy_set_body "token=$source_token&token_type_hint=access_token";
proxy_pass http://accounts.dev.d4science.org/auth/realms/master/protocol/openid-connect/token/introspect;
proxy_cache token_responses; # Enable caching
proxy_cache_key $source_token; # Cache for each access token
proxy_cache_lock on; # Duplicate tokens must wait
proxy_cache_valid 200 10s; # How long to use each response
proxy_ignore_headers Cache-Control Expires Set-Cookie;
}
}
server {
listen *:80 default_server;
listen [::]:80 default_server;
server_name conductor-ui;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
#auth_request /jwt_verify;
proxy_pass http://_conductor-ui;
}
}

@ -0,0 +1,4 @@
NGINX_PORT=80
CAMUNDA_PORT=8080
PGADMIN_PORT=80
KEYCLOAK_PORT=8080

@ -0,0 +1,28 @@
frontend http
bind *:80
mode http
option http-keep-alive
use_backend conductor-server_bck if { hdr_dom(host) -i conductor-server.local.net }
use_backend conductor-ui_bck if { hdr_dom(host) -i conductor-ui.local.net }
#
# Backends
#
backend conductor-server_bck
mode http
option httpchk
balance roundrobin
http-check send meth GET uri /api/health ver HTTP/1.1 hdr Host localhost
http-check expect rstatus (2|3)[0-9][0-9]
server-template conductor-server- 2 conductor-local_conductor-server:8080 check resolvers docker init-addr libc,none
backend conductor-ui_bck
mode http
option httpchk
balance roundrobin
http-check send meth GET uri / ver HTTP/1.1 hdr Host localhost
http-check expect rstatus (2|3)[0-9][0-9]
server-template conductor-ui- 2 conductor-local_conductor-ui:5000 check resolvers docker init-addr libc,none
Loading…
Cancel
Save