93 lines
3.5 KiB
Django/Jinja
93 lines
3.5 KiB
Django/Jinja
# Servers.
|
||
conductor.jetty.server.enabled=true
|
||
conductor.grpc.server.enabled=false
|
||
|
||
# Database persistence model. Possible values are memory, redis, and dynomite.
|
||
# If ommitted, the persistence used is memory
|
||
#
|
||
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
|
||
# redis : non-Dynomite based redis instance
|
||
# dynomite : Dynomite cluster. Use this for HA configuration.
|
||
{% if conductor_db is not defined or conductor_db == 'postgres' %}
|
||
db=postgres
|
||
jdbc.url={{ postgres_jdbc_url }}
|
||
jdbc.username={{ postgres_jdbc_user }}
|
||
jdbc.password={{ postgres_jdbc_pass }}
|
||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
||
flyway.enabled=false
|
||
|
||
{% elif conductor_db is defined and conductor_db == 'mysql' %}
|
||
db=mysql
|
||
jdbc.url={{ mysql_jdbc_url }}
|
||
jdbc.username={{ mysql_jdbc_user }}
|
||
jdbc.password={{ mysql_jdbc_pass }}
|
||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
||
flyway.enabled=false
|
||
|
||
|
||
{% else %}
|
||
db=dynomite
|
||
|
||
# Dynomite Cluster details.
|
||
# format is host:port:rack separated by semicolon
|
||
workflow.dynomite.cluster.hosts={% set ns = namespace() %}
|
||
{% set ns.availability_zone = "" %}
|
||
{% for seed in seeds %}
|
||
{% set ns.seed_tokens = seed.split(':') %}
|
||
{% if ns.availability_zone == "" %}
|
||
{% set ns.availability_zone = ns.seed_tokens[2] %}
|
||
{% endif %}
|
||
{% if ns.availability_zone == ns.seed_tokens[2] %}
|
||
{{ ns.seed_tokens[0] }}:8102:{{ ns.availability_zone }}{%- if not loop.last %};{%- endif %}
|
||
{% endif %}
|
||
{%- endfor %}
|
||
|
||
|
||
# If you are running using dynomite, also add the following line to the property
|
||
# to set the rack/availability zone of the conductor server to be same as dynomite cluster config
|
||
EC2_AVAILABILTY_ZONE={{ ns.availability_zone }}
|
||
|
||
# Dynomite cluster name
|
||
workflow.dynomite.cluster.name=dyno1
|
||
|
||
# Namespace for the keys stored in Dynomite/Redis
|
||
workflow.namespace.prefix=conductor
|
||
|
||
# Namespace prefix for the dyno queues
|
||
workflow.namespace.queue.prefix=conductor_queues
|
||
|
||
# No. of threads allocated to dyno-queues (optional)
|
||
queues.dynomite.threads=3
|
||
|
||
# Non-quorum port used to connect to local redis. Used by dyno-queues.
|
||
# When using redis directly, set this to the same port as redis server
|
||
# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite.
|
||
queues.dynomite.nonQuorum.port=22122
|
||
{% endif %}
|
||
|
||
# Elastic search instance type. Possible values are memory and external.
|
||
# If not specified, the instance type will be embedded in memory
|
||
#
|
||
# memory: The instance is created in memory and lost when the server dies. Useful for development and testing.
|
||
# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when
|
||
# the server dies. Useful for more stable environments like staging or production.
|
||
workflow.elasticsearch.instanceType=external
|
||
|
||
# Transport address to elasticsearch
|
||
workflow.elasticsearch.url=elasticsearch:9300
|
||
|
||
# Name of the elasticsearch cluster
|
||
workflow.elasticsearch.index.name=conductor
|
||
|
||
# Additional modules (optional)
|
||
# conductor.additional.modules=class_extending_com.google.inject.AbstractModule
|
||
|
||
# Additional modules for metrics collection (optional)
|
||
# conductor.additional.modules=com.netflix.conductor.contribs.metrics.MetricsRegistryModule,com.netflix.conductor.contribs.metrics.LoggingMetricsModule
|
||
# com.netflix.conductor.contribs.metrics.LoggingMetricsModule.reportPeriodSeconds=15
|
||
|
||
# Load sample kitchen sink workflow
|
||
loadSample=false
|