added swarms for el and conductor

pull/1/head
dcore94 4 years ago
parent cb033e7163
commit 4c5975abbe

@ -3,3 +3,5 @@
roles:
- common
- dynomite
- elasticsearch
- conductor

@ -1,12 +1,3 @@
---
target_path: /tmp/conductor_setup_test
conductor_network: conductor-network
dynomite_shards: 3
dynomite_replicas: 3
elasticsearch_replicas: 1
conductor_replicas: 2
conductor_network: conductor-network

@ -0,0 +1,58 @@
# Servers.
conductor.jetty.server.enabled=true
conductor.grpc.server.enabled=false
# Database persistence model. Possible values are memory, redis, and dynomite.
# If ommitted, the persistence used is memory
#
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
# redis : non-Dynomite based redis instance
# dynomite : Dynomite cluster. Use this for HA configuration.
db=dynomite
# Dynomite Cluster details.
# format is host:port:rack separated by semicolon
workflow.dynomite.cluster.hosts=dynomite1:8102:us-east-1b;dynomite2:8102:us-east-1b;dynomite3:8102:us-east-2b;dynomite4:8102:us-east-2b
# Dynomite cluster name
workflow.dynomite.cluster.name=dyno1
# Namespace for the keys stored in Dynomite/Redis
workflow.namespace.prefix=conductor
# Namespace prefix for the dyno queues
workflow.namespace.queue.prefix=conductor_queues
# No. of threads allocated to dyno-queues (optional)
queues.dynomite.threads=10
# Non-quorum port used to connect to local redis. Used by dyno-queues.
# When using redis directly, set this to the same port as redis server
# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite.
queues.dynomite.nonQuorum.port=22122
# Elastic search instance type. Possible values are memory and external.
# If not specified, the instance type will be embedded in memory
#
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
# the server dies. Useful for more stable environments like staging or production.
workflow.elasticsearch.instanceType=external
# Transport address to elasticsearch
workflow.elasticsearch.url=elasticsearch:9300
# Name of the elasticsearch cluster
workflow.elasticsearch.index.name=conductor
# Additional modules (optional)
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
# Additional modules for metrics collection (optional)
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
# Load sample kitchen sink workflow
loadSample=false

@ -0,0 +1,3 @@
---
conductor_replicas: 2
conductor_config: conductor-swarm-config.properties

@ -0,0 +1,10 @@
---
- name: Generate conductor-swarm
template:
src: templates/conductor-swarm.yaml.j2
dest: "{{ target_path }}/conductor-swarm.yaml"
- name: Copy conductor config
copy:
src: "{{ conductor_config }}"
dest: "{{ target_path }}/{{ conductor_config }}"

@ -0,0 +1,55 @@
version: '3.6'
services:
conductor-server:
environment:
- CONFIG_PROP={{ conductor_config }}
image: nubisware/conductor-server
networks:
- {{ conductor_network }}
ports:
- "8080:8080"
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
#endpoint_mode: dnsrr
placement:
constraints: [node.role == worker]
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
configs:
- source: swarm-config
target: /app/config/{{ conductor_network }}
logging:
driver: "journald"
conductor-ui:
environment:
- WF_SERVER=http://conductor-server:8080/api/
image: nubisware/conductor-ui
networks:
- {{ conductor_network }}
ports:
- "5000:5000"
deploy:
mode: replicated
replicas: {{ conductor_replicas }}
#endpoint_mode: dnsrr
placement:
constraints: [node.role == worker]
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
{{ conductor_network }}:
configs:
swarm-config:
file: ./{{ conductor_config }}

@ -0,0 +1,3 @@
---
dynomite_shards: 3
dynomite_replicas: 3

@ -1,7 +1,4 @@
---
- name: Hello world dynomite
command: echo "Hello world"
- name: Generate seedlist
template:
src: templates/seeds.list.j2

@ -0,0 +1,2 @@
---
elasticsearch_replicas: 1

@ -0,0 +1,5 @@
---
- name: Generate elasticsearch swarm
template:
src: templates/elasticsearch-swarm.yaml.j2
dest: "{{ target_path }}/elasticsearch-swarm.yaml"

@ -0,0 +1,31 @@
version: '3.6'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- transport.host=0.0.0.0
- discovery.type=single-node
- xpack.security.enabled=false
networks:
{{ conductor_network }}:
aliases:
- es
logging:
driver: "journald"
deploy:
mode: replicated
replicas: {{ elasticsearch_replicas }}
#endpoint_mode: dnsrr
placement:
constraints: [node.role == worker]
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
{{ conductor_network }}:
Loading…
Cancel
Save