forked from gCubeSystem/conductor-setup
Merge branch 'master' of https://bitbucket.org/Nubisware/conductor-setup
This commit is contained in:
commit
1887adf73b
|
@ -44,6 +44,11 @@ Other setting can be fine tuned by checking the variables in the proper roles wh
|
|||
- *postgres*: defaults, templates and tasks for starting in the swarm a single instance of postgres
|
||||
- *workers*: defaults and task for starting in the swarm a replicated instance of the workers for executing HTTP, Shell, Eval operations.
|
||||
|
||||
## Examples
|
||||
|
||||
The following example runs as user username on the remote hosts listed in hosts a swarm with 2 replicas of conductor server and ui, 1 postgres, 1 elasticsearch, 2 replicas of simple PyExec, an HAProxy that acts as load balancer.
|
||||
`ansible-playbook -u username -i hosts site.yaml -e target_path=/tmp/conductor -e cluster_replacement=true`
|
||||
|
||||
## Change log
|
||||
|
||||
See [CHANGELOG.md](CHANGELOG.md).
|
||||
|
|
|
@ -0,0 +1,506 @@
|
|||
# config file for ansible -- https://ansible.com/
|
||||
# ===============================================
|
||||
|
||||
# nearly all parameters can be overridden in ansible-playbook
|
||||
# or with command line flags. ansible will read ANSIBLE_CONFIG,
|
||||
# ansible.cfg in the current working directory, .ansible.cfg in
|
||||
# the home directory or /etc/ansible/ansible.cfg, whichever it
|
||||
# finds first
|
||||
|
||||
[defaults]
|
||||
|
||||
# some basic default values...
|
||||
|
||||
#inventory = /etc/ansible/hosts
|
||||
#library = ./modules
|
||||
#module_utils = /usr/share/my_module_utils/
|
||||
#remote_tmp = ~/.ansible/tmp
|
||||
#local_tmp = ~/.ansible/tmp
|
||||
#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
|
||||
#forks = 5
|
||||
#poll_interval = 15
|
||||
#sudo_user = root
|
||||
#ask_sudo_pass = True
|
||||
#ask_pass = True
|
||||
#transport = smart
|
||||
#remote_port = 22
|
||||
#module_lang = C
|
||||
#module_set_locale = False
|
||||
|
||||
# plays will gather facts by default, which contain information about
|
||||
# the remote system.
|
||||
#
|
||||
# smart - gather by default, but don't regather if already gathered
|
||||
# implicit - gather by default, turn off with gather_facts: False
|
||||
# explicit - do not gather by default, must say gather_facts: True
|
||||
gathering = smart
|
||||
|
||||
# This only affects the gathering done by a play's gather_facts directive,
|
||||
# by default gathering retrieves all facts subsets
|
||||
# all - gather all subsets
|
||||
# network - gather min and network facts
|
||||
# hardware - gather hardware facts (longest facts to retrieve)
|
||||
# virtual - gather min and virtual facts
|
||||
# facter - import facts from facter
|
||||
# ohai - import facts from ohai
|
||||
# You can combine them using comma (ex: network,virtual)
|
||||
# You can negate them using ! (ex: !hardware,!facter,!ohai)
|
||||
# A minimal set of facts is always gathered.
|
||||
#gather_subset = all
|
||||
|
||||
# some hardware related facts are collected
|
||||
# with a maximum timeout of 10 seconds. This
|
||||
# option lets you increase or decrease that
|
||||
# timeout to something more suitable for the
|
||||
# environment.
|
||||
# gather_timeout = 10
|
||||
|
||||
# Ansible facts are available inside the ansible_facts.* dictionary
|
||||
# namespace. This setting maintains the behaviour which was the default prior
|
||||
# to 2.5, duplicating these variables into the main namespace, each with a
|
||||
# prefix of 'ansible_'.
|
||||
# This variable is set to True by default for backwards compatibility. It
|
||||
# will be changed to a default of 'False' in a future release.
|
||||
# ansible_facts.
|
||||
# inject_facts_as_vars = True
|
||||
|
||||
# additional paths to search for roles in, colon separated
|
||||
#roles_path = /etc/ansible/roles
|
||||
|
||||
# uncomment this to disable SSH key host checking
|
||||
host_key_checking = False
|
||||
|
||||
# change the default callback, you can only have one 'stdout' type enabled at a time.
|
||||
#stdout_callback = skippy
|
||||
|
||||
|
||||
## Ansible ships with some plugins that require whitelisting,
|
||||
## this is done to avoid running all of a type by default.
|
||||
## These setting lists those that you want enabled for your system.
|
||||
## Custom plugins should not need this unless plugin author specifies it.
|
||||
|
||||
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
|
||||
callback_whitelist = timer,profile_roles,profile_tasks,mail
|
||||
|
||||
# Determine whether includes in tasks and handlers are "static" by
|
||||
# default. As of 2.0, includes are dynamic by default. Setting these
|
||||
# values to True will make includes behave more like they did in the
|
||||
# 1.x versions.
|
||||
#task_includes_static = False
|
||||
#handler_includes_static = False
|
||||
|
||||
# Controls if a missing handler for a notification event is an error or a warning
|
||||
#error_on_missing_handler = True
|
||||
|
||||
# change this for alternative sudo implementations
|
||||
#sudo_exe = sudo
|
||||
|
||||
# What flags to pass to sudo
|
||||
# WARNING: leaving out the defaults might create unexpected behaviours
|
||||
#sudo_flags = -H -S -n
|
||||
|
||||
# SSH timeout
|
||||
#timeout = 10
|
||||
|
||||
# default user to use for playbooks if user is not specified
|
||||
# (/usr/bin/ansible will use current user as default)
|
||||
#remote_user = root
|
||||
remote_user = ansible
|
||||
|
||||
# logging is off by default unless this path is defined
|
||||
# if so defined, consider logrotate
|
||||
#log_path = /var/log/ansible.log
|
||||
|
||||
# default module name for /usr/bin/ansible
|
||||
#module_name = command
|
||||
|
||||
# use this shell for commands executed under sudo
|
||||
# you may need to change this to bin/bash in rare instances
|
||||
# if sudo is constrained
|
||||
#executable = /bin/sh
|
||||
|
||||
# if inventory variables overlap, does the higher precedence one win
|
||||
# or are hash values merged together? The default is 'replace' but
|
||||
# this can also be set to 'merge'.
|
||||
#hash_behaviour = replace
|
||||
|
||||
# by default, variables from roles will be visible in the global variable
|
||||
# scope. To prevent this, the following option can be enabled, and only
|
||||
# tasks and handlers within the role will see the variables there
|
||||
#private_role_vars = yes
|
||||
|
||||
# list any Jinja2 extensions to enable here:
|
||||
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
|
||||
|
||||
# if set, always use this private key file for authentication, same as
|
||||
# if passing --private-key to ansible or ansible-playbook
|
||||
#private_key_file = /path/to/file
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
# replacing {file}, {host} and {uid} and strftime codes with proper values.
|
||||
ansible_managed = Ansible managed: {file} on {host}
|
||||
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
|
||||
# in some situations so the default is a static string:
|
||||
#ansible_managed = Ansible managed
|
||||
|
||||
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
|
||||
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
|
||||
# messages. NOTE: the task header will still be shown regardless of whether or not the
|
||||
# task is skipped.
|
||||
#display_skipped_hosts = True
|
||||
|
||||
# by default, if a task in a playbook does not include a name: field then
|
||||
# ansible-playbook will construct a header that includes the task's action but
|
||||
# not the task's args. This is a security feature because ansible cannot know
|
||||
# if the *module* considers an argument to be no_log at the time that the
|
||||
# header is printed. If your environment doesn't have a problem securing
|
||||
# stdout from ansible-playbook (or you have manually specified no_log in your
|
||||
# playbook on all of the tasks where you have secret information) then you can
|
||||
# safely set this to True to get more informative messages.
|
||||
#display_args_to_stdout = False
|
||||
|
||||
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
|
||||
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
|
||||
# to revert the behavior to pre-1.3.
|
||||
#error_on_undefined_vars = False
|
||||
|
||||
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
|
||||
# system running ansible itself. This may include warnings about 3rd party packages or
|
||||
# other conditions that should be resolved if possible.
|
||||
# to disable these warnings, set the following value to False:
|
||||
#system_warnings = True
|
||||
|
||||
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
||||
# features that should no longer be used and will be removed in future versions.
|
||||
# to disable these warnings, set the following value to False:
|
||||
#deprecation_warnings = True
|
||||
|
||||
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
||||
# command module appear to be simplified by using a default Ansible module
|
||||
# instead. These warnings can be silenced by adjusting the following
|
||||
# setting or adding warn=yes or warn=no to the end of the command line
|
||||
# parameter string. This will for example suggest using the git module
|
||||
# instead of shelling out to the git command.
|
||||
command_warnings = True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=600s
|
||||
control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
|
||||
|
||||
|
||||
# set plugin path directories here, separate with colons
|
||||
action_plugins = /usr/share/ansible/plugins/action
|
||||
#cache_plugins = /usr/share/ansible/plugins/cache
|
||||
callback_plugins = /usr/share/ansible/plugins/callback
|
||||
connection_plugins = /usr/share/ansible/plugins/connection
|
||||
lookup_plugins = /usr/share/ansible/plugins/lookup
|
||||
#inventory_plugins = /usr/share/ansible/plugins/inventory
|
||||
vars_plugins = /usr/share/ansible/plugins/vars
|
||||
filter_plugins = /usr/share/ansible/plugins/filter
|
||||
test_plugins = /usr/share/ansible/plugins/test
|
||||
#terminal_plugins = /usr/share/ansible/plugins/terminal
|
||||
#strategy_plugins = /usr/share/ansible/plugins/strategy
|
||||
|
||||
|
||||
# by default, ansible will use the 'linear' strategy but you may want to try
|
||||
# another one
|
||||
#strategy = free
|
||||
|
||||
# by default callbacks are not loaded for /bin/ansible, enable this if you
|
||||
# want, for example, a notification or logging callback to also apply to
|
||||
# /bin/ansible runs
|
||||
bin_ansible_callbacks = True
|
||||
|
||||
|
||||
# don't like cows? that's unfortunate.
|
||||
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
|
||||
#nocows = 1
|
||||
|
||||
# set which cowsay stencil you'd like to use by default. When set to 'random',
|
||||
# a random stencil will be selected for each task. The selection will be filtered
|
||||
# against the `cow_whitelist` option below.
|
||||
#cow_selection = default
|
||||
#cow_selection = random
|
||||
|
||||
# when using the 'random' option for cowsay, stencils will be restricted to this list.
|
||||
# it should be formatted as a comma-separated list with no spaces between names.
|
||||
# NOTE: line continuations here are for formatting purposes only, as the INI parser
|
||||
# in python does not support them.
|
||||
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
|
||||
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
|
||||
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
|
||||
|
||||
# don't like colors either?
|
||||
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
|
||||
#nocolor = 1
|
||||
|
||||
# if set to a persistent type (not 'memory', for example 'redis') fact values
|
||||
# from previous runs in Ansible will be stored. This may be useful when
|
||||
# wanting to use, for example, IP information from one group of servers
|
||||
# without having to talk to them in the same playbook run to get their
|
||||
# current IP information.
|
||||
fact_caching = memory
|
||||
|
||||
#This option tells Ansible where to cache facts. The value is plugin dependent.
|
||||
#For the jsonfile plugin, it should be a path to a local directory.
|
||||
#For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
|
||||
|
||||
fact_caching_connection=$HOME/.ansible/facts
|
||||
|
||||
|
||||
|
||||
# retry files
|
||||
# When a playbook fails by default a .retry file will be created in ~/
|
||||
# You can disable this feature by setting retry_files_enabled to False
|
||||
# and you can change the location of the files by setting retry_files_save_path
|
||||
|
||||
retry_files_enabled = False
|
||||
retry_files_save_path = ~/.ansible_retry
|
||||
|
||||
# squash actions
|
||||
# Ansible can optimise actions that call modules with list parameters
|
||||
# when looping. Instead of calling the module once per with_ item, the
|
||||
# module is called once with all items at once. Currently this only works
|
||||
# under limited circumstances, and only with parameters named 'name'.
|
||||
#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
|
||||
|
||||
# prevents logging of task data, off by default
|
||||
#no_log = False
|
||||
|
||||
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
|
||||
no_target_syslog = False
|
||||
|
||||
# controls whether Ansible will raise an error or warning if a task has no
|
||||
# choice but to create world readable temporary files to execute a module on
|
||||
# the remote machine. This option is False by default for security. Users may
|
||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
||||
# for more secure ways to fix this than enabling this option.
|
||||
#allow_world_readable_tmpfiles = False
|
||||
|
||||
# controls the compression level of variables sent to
|
||||
# worker processes. At the default of 0, no compression
|
||||
# is used. This value must be an integer from 0 to 9.
|
||||
#var_compression_level = 9
|
||||
|
||||
# controls what compression method is used for new-style ansible modules when
|
||||
# they are sent to the remote system. The compression types depend on having
|
||||
# support compiled into both the controller's python and the client's python.
|
||||
# The names should match with the python Zipfile compression types:
|
||||
# * ZIP_STORED (no compression. available everywhere)
|
||||
# * ZIP_DEFLATED (uses zlib, the default)
|
||||
# These values may be set per host via the ansible_module_compression inventory
|
||||
# variable
|
||||
#module_compression = 'ZIP_DEFLATED'
|
||||
|
||||
# This controls the cutoff point (in bytes) on --diff for files
|
||||
# set to 0 for unlimited (RAM may suffer!).
|
||||
#max_diff_size = 1048576
|
||||
|
||||
# This controls how ansible handles multiple --tags and --skip-tags arguments
|
||||
# on the CLI. If this is True then multiple arguments are merged together. If
|
||||
# it is False, then the last specified argument is used and the others are ignored.
|
||||
# This option will be removed in 2.8.
|
||||
#merge_multiple_cli_flags = True
|
||||
|
||||
# Controls showing custom stats at the end, off by default
|
||||
show_custom_stats = True
|
||||
|
||||
# Controls which files to ignore when using a directory as inventory with
|
||||
# possibly multiple sources (both static and dynamic)
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
|
||||
|
||||
# This family of modules use an alternative execution path optimized for network appliances
|
||||
# only update this setting if you know how this works, otherwise it can break module execution
|
||||
#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
|
||||
|
||||
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
|
||||
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
|
||||
# jinja2 templating language which will be run through the templating engine.
|
||||
# ENABLING THIS COULD BE A SECURITY RISK
|
||||
#allow_unsafe_lookups = False
|
||||
|
||||
# set default errors for all plays
|
||||
#any_errors_fatal = False
|
||||
|
||||
[inventory]
|
||||
# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini', 'auto'
|
||||
#enable_plugins = host_list, virtualbox, yaml, constructed
|
||||
|
||||
# ignore these extensions when parsing a directory as inventory source
|
||||
#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
|
||||
|
||||
# ignore files matching these patterns when parsing a directory as inventory source
|
||||
#ignore_patterns=
|
||||
|
||||
# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
|
||||
#unparsed_is_failed=False
|
||||
|
||||
[privilege_escalation]
|
||||
become=True
|
||||
become_method=sudo
|
||||
become_user=root
|
||||
become_ask_pass=False
|
||||
|
||||
[paramiko_connection]
|
||||
|
||||
# uncomment this line to cause the paramiko connection plugin to not record new host
|
||||
# keys encountered. Increases performance on new host additions. Setting works independently of the
|
||||
# host key checking setting above.
|
||||
record_host_keys=False
|
||||
|
||||
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
|
||||
# line to disable this behaviour.
|
||||
#pty=False
|
||||
|
||||
# paramiko will default to looking for SSH keys initially when trying to
|
||||
# authenticate to remote devices. This is a problem for some network devices
|
||||
# that close the connection after a key failure. Uncomment this line to
|
||||
# disable the Paramiko look for keys function
|
||||
#look_for_keys = False
|
||||
|
||||
# When using persistent connections with Paramiko, the connection runs in a
|
||||
# background process. If the host doesn't already have a valid SSH key, by
|
||||
# default Ansible will prompt to add the host key. This will cause connections
|
||||
# running in background processes to fail. Uncomment this line to have
|
||||
# Paramiko automatically add host keys.
|
||||
#host_key_auto_add = True
|
||||
|
||||
[ssh_connection]
|
||||
|
||||
# ssh arguments to use
|
||||
# Leaving off ControlPersist will result in poor performance, so use
|
||||
# paramiko on older platforms rather than removing it, -C controls compression use
|
||||
ssh_args = -C -o ControlMaster=auto -o ControlPersist=120s
|
||||
|
||||
# The base directory for the ControlPath sockets.
|
||||
# This is the "%(directory)s" in the control_path option
|
||||
#
|
||||
# Example:
|
||||
# control_path_dir = /tmp/.ansible/cp
|
||||
#control_path_dir = ~/.ansible/cp
|
||||
|
||||
# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
|
||||
# port and username (empty string in the config). The hash mitigates a common problem users
|
||||
# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
|
||||
# In those cases, a "too long for Unix domain socket" ssh error would occur.
|
||||
#
|
||||
# Example:
|
||||
#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
|
||||
#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
|
||||
#control_path =
|
||||
|
||||
# Enabling pipelining reduces the number of SSH operations required to
|
||||
# execute a module on the remote server. This can result in a significant
|
||||
# performance improvement when enabled, however when using "sudo:" you must
|
||||
# first disable 'requiretty' in /etc/sudoers
|
||||
#
|
||||
# By default, this option is disabled to preserve compatibility with
|
||||
# sudoers configurations that have requiretty (the default on many distros).
|
||||
#
|
||||
pipelining = True
|
||||
|
||||
# Control the mechanism for transferring files (old)
|
||||
# * smart = try sftp and then try scp [default]
|
||||
# * True = use scp only
|
||||
# * False = use sftp only
|
||||
#scp_if_ssh = smart
|
||||
|
||||
# Control the mechanism for transferring files (new)
|
||||
# If set, this will override the scp_if_ssh option
|
||||
# * sftp = use sftp to transfer files
|
||||
# * scp = use scp to transfer files
|
||||
# * piped = use 'dd' over SSH to transfer files
|
||||
# * smart = try sftp, scp, and piped, in that order [default]
|
||||
transfer_method = smart
|
||||
|
||||
# if False, sftp will not use batch mode to transfer files. This may cause some
|
||||
# types of file transfer failures impossible to catch however, and should
|
||||
# only be disabled if your sftp version has problems with batch mode
|
||||
#sftp_batch_mode = False
|
||||
|
||||
# The -tt argument is passed to ssh when pipelining is not enabled because sudo
|
||||
# requires a tty by default.
|
||||
#use_tty = True
|
||||
|
||||
# Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
|
||||
# For each retry attempt, there is an exponential backoff,
|
||||
# so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
|
||||
retries = 3
|
||||
|
||||
[persistent_connection]
|
||||
|
||||
# Configures the persistent connection timeout value in seconds. This value is
|
||||
# how long the persistent connection will remain idle before it is destroyed.
|
||||
# If the connection doesn't receive a request before the timeout value
|
||||
# expires, the connection is shutdown. The default value is 30 seconds.
|
||||
connect_timeout = 120
|
||||
|
||||
# Configures the persistent connection retry timeout. This value configures the
|
||||
# the retry timeout that ansible-connection will wait to connect
|
||||
# to the local domain socket. This value must be larger than the
|
||||
# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
|
||||
# The default value is 15 seconds.
|
||||
#connect_retry_timeout = 15
|
||||
|
||||
# The command timeout value defines the amount of time to wait for a command
|
||||
# or RPC call before timing out. The value for the command timeout must
|
||||
# be less than the value of the persistent connection idle timeout (connect_timeout)
|
||||
# The default value is 10 second.
|
||||
#command_timeout = 10
|
||||
|
||||
[accelerate]
|
||||
#accelerate_port = 5099
|
||||
#accelerate_timeout = 30
|
||||
#accelerate_connect_timeout = 5.0
|
||||
|
||||
# The daemon timeout is measured in minutes. This time is measured
|
||||
# from the last activity to the accelerate daemon.
|
||||
#accelerate_daemon_timeout = 30
|
||||
|
||||
# If set to yes, accelerate_multi_key will allow multiple
|
||||
# private keys to be uploaded to it, though each user must
|
||||
# have access to the system via SSH to add a new key. The default
|
||||
# is "no".
|
||||
#accelerate_multi_key = yes
|
||||
|
||||
[selinux]
|
||||
# file systems that require special treatment when dealing with security context
|
||||
# the default behaviour that copies the existing context or uses the user default
|
||||
# needs to be changed to use the file system dependent context.
|
||||
#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
|
||||
|
||||
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
|
||||
#libvirt_lxc_noseclabel = yes
|
||||
|
||||
[colors]
|
||||
#highlight = white
|
||||
#verbose = blue
|
||||
#warn = bright purple
|
||||
#error = red
|
||||
#debug = dark gray
|
||||
#deprecate = purple
|
||||
#skip = cyan
|
||||
#unreachable = red
|
||||
#ok = green
|
||||
#changed = yellow
|
||||
#diff_add = green
|
||||
#diff_remove = red
|
||||
#diff_lines = cyan
|
||||
|
||||
|
||||
[diff]
|
||||
# Always print diff when running ( same as always running with -D/--diff )
|
||||
# always = no
|
||||
|
||||
# Set how many context lines to show in diff
|
||||
# context = 3
|
||||
|
||||
[ara]
|
||||
api_client = http
|
||||
api_timeout = 30
|
||||
api_server = http://127.0.0.1:8000
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
infrastructure: dev
|
||||
conductor_workers_server: http://conductor-dev.int.d4science.net/api
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
infrastructure: pre
|
||||
conductor_workers_server: https://conductor.pre.d4science.org/api
|
|
@ -0,0 +1,5 @@
|
|||
[dev_infra:children]
|
||||
nw_cluster
|
||||
|
||||
[nw_cluster]
|
||||
nubis1.int.d4science.net
|
|
@ -0,0 +1,5 @@
|
|||
[pre_infra:children]
|
||||
pre_cluster
|
||||
|
||||
[pre_cluster]
|
||||
docker-swarm1.int.d4science.net docker_swarm_manager_main_node=True
|
|
@ -21,7 +21,7 @@ haproxy_docker_swarm_additional_networks: []
|
|||
|
||||
haproxy_docker_swarm_haproxy_constraints:
|
||||
- 'node.role == manager'
|
||||
haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-dev.int.d4science.net', stack_name: 'conductor', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductorui-dev.int.d4science.net', stack_name: 'conductor', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }]
|
||||
haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductorui-dev.int.d4science.net', stack_name: 'conductor-{{ infrastructure }}', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }]
|
||||
# - { acl_name: 'service', acl_rule: 'hdr_dom(host) -i service.example.com', stack_name: 'stack', service_name: 'service', service_replica_num: '1', service_port: '9999', service_overlay_network: 'service-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth HEAD uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]', allowed_networks: '192.168.1.0/24 192.168.2.0/24' }
|
||||
|
||||
haproxy_default_port: 80
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
dest: "{{ target_path }}/conductor-swarm.yaml"
|
||||
|
||||
- name: Generate conductor config from dynomite seeds
|
||||
when: not use_jdbc
|
||||
when: conductor_db is defined and conductor_db == 'dynomite'
|
||||
vars:
|
||||
seeds: "{{ lookup('file', '{{ target_path}}/seeds.list').splitlines() }}"
|
||||
template:
|
||||
|
@ -13,13 +13,13 @@
|
|||
dest: "{{ target_path }}/{{ conductor_config }}"
|
||||
|
||||
- name: Generate conductor config for JDBC DB
|
||||
when: use_jdbc
|
||||
when: conductor_db is not defined or conductor_db != 'dynomite'
|
||||
template:
|
||||
src: "templates/{{ conductor_config_template }}"
|
||||
dest: "{{ target_path }}/{{ conductor_config }}"
|
||||
|
||||
- name: Copy conductor SQL schema init for JDBC DB
|
||||
when: use_jdbc and init_db
|
||||
when: (conductor_db is not defined or conductor_db != 'dynomite') and init_db
|
||||
template:
|
||||
src: "templates/conductor-db-init-{{ conductor_db }}.sql.j2"
|
||||
dest: "{{ target_path }}/conductor-db-init.sql"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
CREATE DATABASE IF NOT EXISTS {{ jdbc_db }};
|
||||
GRANT ALL PRIVILEGES ON {{ jdbc_db }}.* TO {{ jdbc_user}};
|
||||
CREATE DATABASE IF NOT EXISTS {{ mysql_jdbc_db }};
|
||||
GRANT ALL PRIVILEGES ON {{ jdbc_db }}.* TO {{ mysql_jdbc_user}};
|
||||
FLUSH PRIVILEGES;
|
||||
USE {{ jdbc_db }};
|
||||
USE {{ mysql_jdbc_db }};
|
||||
|
||||
-- V1__initial_schema.sql --
|
||||
-- --------------------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -8,15 +8,25 @@ conductor.grpc.server.enabled=false
|
|||
# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo
|
||||
# redis : non-Dynomite based redis instance
|
||||
# dynomite : Dynomite cluster. Use this for HA configuration.
|
||||
{% if use_jdbc is defined and use_jdbc %}
|
||||
db={{ conductor_db }}
|
||||
jdbc.url={{ jdbc_url }}
|
||||
jdbc.username={{ jdbc_user }}
|
||||
jdbc.password={{ jdbc_pass }}
|
||||
{% if conductor_db is not defined or conductor_db == 'postgres' %}
|
||||
db=postgres
|
||||
jdbc.url={{ postgres_jdbc_url }}
|
||||
jdbc.username={{ postgres_jdbc_user }}
|
||||
jdbc.password={{ postgres_jdbc_pass }}
|
||||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
||||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
||||
flyway.enabled=false
|
||||
|
||||
{% elif conductor_db is defined and conductor_db == 'mysql' %}
|
||||
db=mysql
|
||||
jdbc.url={{ mysql_jdbc_url }}
|
||||
jdbc.username={{ mysql_jdbc_user }}
|
||||
jdbc.password={{ mysql_jdbc_pass }}
|
||||
conductor.{{ conductor_db }}.connection.pool.size.max=10
|
||||
conductor.{{ conductor_db }}.connection.pool.idle.min=2
|
||||
flyway.enabled=false
|
||||
|
||||
|
||||
{% else %}
|
||||
db=dynomite
|
||||
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
elasticsearch_replicas: 1
|
||||
|
||||
postgres_service_name: 'postgresdb'
|
||||
postgres_replicas: 1
|
||||
postgres_conductor_db: postgres
|
||||
postgres_jdbc_user: conductor
|
||||
postgres_jdbc_pass: password
|
||||
postgres_jdbc_db: conductor
|
||||
postgres_jdbc_url: jdbc:postgresql://{{ postgres_service_name }}:5432/{{ mysql_jdbc_db }}
|
||||
|
||||
mysql_image_name: 'mariadb'
|
||||
mysql_service_name: 'mysqldb'
|
||||
mysql_replicas: 1
|
||||
mysql_conductor_db: mysql
|
||||
mysql_jdbc_user: conductor
|
||||
mysql_jdbc_pass: password
|
||||
mysql_jdbc_db: conductor
|
||||
mysql_jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ mysql_jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
- name: Generate elasticsearch swarm
|
||||
template:
|
||||
src: templates/elasticsearch-swarm.yaml.j2
|
||||
dest: "{{ target_path }}/elasticsearch-swarm.yaml"
|
||||
|
||||
- name: Generate postgres swarm
|
||||
template:
|
||||
src: templates/postgres-swarm.yaml.j2
|
||||
dest: "{{ target_path }}/postgres-swarm.yaml"
|
||||
when: conductor_db is not defined or conductor_db == 'postgres'
|
||||
|
||||
- name: "Generate mysql swarm, image used: {{ mysql_image_name }}"
|
||||
template:
|
||||
src: templates/mysql-swarm.yaml.j2
|
||||
dest: "{{ target_path }}/mysql-swarm.yaml"
|
||||
when: conductor_di is defined and conductor_db == 'mysql'
|
|
@ -0,0 +1,31 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8
|
||||
environment:
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
- transport.host=0.0.0.0
|
||||
- discovery.type=single-node
|
||||
- xpack.security.enabled=false
|
||||
networks:
|
||||
{{ conductor_network }}:
|
||||
aliases:
|
||||
- es
|
||||
logging:
|
||||
driver: "journald"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: {{ elasticsearch_replicas }}
|
||||
#endpoint_mode: dnsrr
|
||||
placement:
|
||||
constraints: [node.role == worker]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
|
||||
networks:
|
||||
{{ conductor_network }}:
|
|
@ -0,0 +1,30 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
|
||||
{{ mysql_service_name }}:
|
||||
image: {{ mysql_image_name }}
|
||||
environment:
|
||||
MYSQL_USER: {{ mysql_jdbc_user }}
|
||||
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
|
||||
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
|
||||
MYSQL_DB: {{ mysql_jdbc_db }}
|
||||
{% if init_db %}
|
||||
configs:
|
||||
- source: db-init
|
||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
||||
{% endif %}
|
||||
networks:
|
||||
- {{ conductor_network }}
|
||||
deploy:
|
||||
replicas: {{ mysql_replicas }}
|
||||
placement:
|
||||
constraints: [node.role == worker]
|
||||
|
||||
networks:
|
||||
{{ conductor_network }}:
|
||||
{% if init_db %}
|
||||
configs:
|
||||
db-init:
|
||||
file: {{ target_path }}/conductor-db-init.sql
|
||||
{% endif %}
|
|
@ -0,0 +1,31 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
|
||||
{{ postgres_service_name }}:
|
||||
image: postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
environment:
|
||||
POSTGRES_USER: "{{ postgres_jdbc_user }}"
|
||||
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
|
||||
POSTGRES_DB: "{{ postgres_jdbc_db }}"
|
||||
{% if init_db %}
|
||||
configs:
|
||||
- source: db-init
|
||||
target: "/docker-entrypoint-initdb.d/db-init.sql"
|
||||
{% endif %}
|
||||
networks:
|
||||
- {{ conductor_network }}
|
||||
deploy:
|
||||
replicas: {{ postgres_replicas }}
|
||||
placement:
|
||||
constraints: [node.role == worker]
|
||||
|
||||
networks:
|
||||
{{ conductor_network }}:
|
||||
{% if init_db %}
|
||||
configs:
|
||||
db-init:
|
||||
file: {{ target_path }}/conductor-db-init.sql
|
||||
{% endif %}
|
|
@ -7,4 +7,4 @@ conductor_db: mysql
|
|||
jdbc_user: conductor
|
||||
jdbc_pass: password
|
||||
jdbc_db: conductor
|
||||
jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true
|
||||
jdbc_url: jdbc:mysql://{{ mysql_service_name }}:3306/{{ mysql_jdbc_db }}?useSSL=false&allowPublicKeyRetrieval=true
|
||||
|
|
|
@ -5,9 +5,9 @@ services:
|
|||
{{ mysql_service_name }}:
|
||||
image: {{ mysql_image_name }}
|
||||
environment:
|
||||
MYSQL_USER: {{ jdbc_user }}
|
||||
MYSQL_PASSWORD: {{ jdbc_pass }}
|
||||
MYSQL_ROOT_PASSWORD: {{ jdbc_pass }}
|
||||
MYSQL_USER: {{ mysql_jdbc_user }}
|
||||
MYSQL_PASSWORD: {{ mysql_jdbc_pass }}
|
||||
MYSQL_ROOT_PASSWORD: {{ mysql_jdbc_pass }}
|
||||
MYSQL_DB: {{ jdbc_db }}
|
||||
{% if init_db %}
|
||||
configs:
|
||||
|
|
|
@ -6,4 +6,4 @@ conductor_db: postgres
|
|||
jdbc_user: conductor
|
||||
jdbc_pass: password
|
||||
jdbc_db: conductor
|
||||
jdbc_url: jdbc:postgresql://{{ postgres_service_name }}:5432/{{ jdbc_db }}
|
||||
jdbc_url: jdbc:postgresql://{{ postgres_service_name }}:5432/{{ postgres_jdbc_db }}
|
||||
|
|
|
@ -7,9 +7,9 @@ services:
|
|||
ports:
|
||||
- "5432:5432"
|
||||
environment:
|
||||
POSTGRES_USER: "{{ jdbc_user }}"
|
||||
POSTGRES_PASSWORD: "{{ jdbc_pass }}"
|
||||
POSTGRES_DB: "{{ jdbc_db }}"
|
||||
POSTGRES_USER: "{{ postgres_jdbc_user }}"
|
||||
POSTGRES_PASSWORD: "{{ postgres_jdbc_pass }}"
|
||||
POSTGRES_DB: "{{ postgres_jdbc_db }}"
|
||||
{% if init_db %}
|
||||
configs:
|
||||
- source: db-init
|
||||
|
|
|
@ -8,4 +8,3 @@
|
|||
src: templates/config.cfg.j2
|
||||
dest: "{{ target_path }}/{{ item.service }}-config.cfg"
|
||||
loop: "{{ conductor_workers }}"
|
||||
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# The "directory/directory.yml" is the old way that we used to simplify jobs execution.
|
||||
# The "directory/site.yml" is the syntax used by roles (from ansible version 1.2)
|
||||
#
|
||||
# Otherwise we can directly execute a single play (file)
|
||||
#
|
||||
|
||||
PAR=50
|
||||
TIMEOUT=15
|
||||
PLAY=site.yml
|
||||
HOSTS_DIR=.
|
||||
ANSIBLE_HOSTS=
|
||||
|
||||
export TMPDIR=/var/tmp/${USER}
|
||||
if [ ! -d ${TMPDIR} ] ; then
|
||||
mkdir -p ${TMPDIR}
|
||||
fi
|
||||
|
||||
if [ -f ./ansible.cfg ] ; then
|
||||
export ANSIBLE_CONFIG="./ansible.cfg"
|
||||
fi
|
||||
|
||||
# No cows!
|
||||
export ANSIBLE_NOCOWS=1
|
||||
|
||||
export ANSIBLE_ERROR_ON_UNDEFINED_VARS=True
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
export ANSIBLE_LIBRARY="/usr/share/ansible:./modules:../modules:$ANSIBLE_LIBRARY"
|
||||
|
||||
# Update the galaxy requirements
|
||||
if [ -f requirements.yml ] ; then
|
||||
ansible-galaxy install --ignore-errors -f -r requirements.yml
|
||||
fi
|
||||
|
||||
PLAY_OPTS="-T $TIMEOUT -f $PAR"
|
||||
|
||||
if [ -f "$1" ] ; then
|
||||
PLAY=$1
|
||||
elif [ ! -f $PLAY ] ; then
|
||||
echo "No play file available."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -f "${PLAY}" ] ; then
|
||||
MAIN="${PLAY}"
|
||||
shift
|
||||
elif [ -f "${PLAY}.yml" ]; then
|
||||
MAIN="${PLAY}.yml"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ -f ${HOSTS_DIR}/hosts ] ; then
|
||||
ANSIBLE_HOSTS=${HOSTS_DIR}/hosts
|
||||
fi
|
||||
if [ -f ${HOSTS_DIR}/inventory/hosts ] ; then
|
||||
ANSIBLE_HOSTS=${HOSTS_DIR}/inventory/hosts
|
||||
fi
|
||||
if [ ! -z "$ANSIBLE_HOSTS" ] ; then
|
||||
PLAY_OPTS="-i $ANSIBLE_HOSTS"
|
||||
fi
|
||||
|
||||
#echo "Find vault encrypted files if any"
|
||||
if [ -d ./group_vars ] ; then
|
||||
VAULT_GROUP_FILES=$( find ./group_vars -name \*vault\* )
|
||||
fi
|
||||
if [ -d ./host_vars ] ; then
|
||||
VAULT_HOST_FILES=$( find ./host_vars -name \*vault\* )
|
||||
fi
|
||||
|
||||
if [ -n "$VAULT_GROUP_FILES" ] || [ -n "$VAULT_HOST_FILES" ] ; then
|
||||
# Vault requires a password.
|
||||
# To encrypt a password for a user: python -c "from passlib.hash import sha512_crypt; print sha512_crypt.encrypt('<password>')"
|
||||
if [ -f ~/.conductor_ansible_vault_pass.txt ] ; then
|
||||
PLAY_OPTS="$PLAY_OPTS --vault-password-file=~/.conductor_ansible_vault_pass.txt"
|
||||
else
|
||||
echo "There are password protected encrypted files, we will ask for password before proceeding"
|
||||
PLAY_OPTS="$PLAY_OPTS --ask-vault-pass"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Main
|
||||
ansible-playbook $PLAY_OPTS $MAIN $@
|
||||
|
||||
rm -f /tmp/passwordfile
|
37
site.yaml
37
site.yaml
|
@ -1,55 +1,56 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
- hosts: pre_infra:dev_infra
|
||||
roles:
|
||||
- common
|
||||
- role: cluster-replacement
|
||||
when:
|
||||
- cluster_replacement is defined and cluster_replacement|bool
|
||||
- role: postgres
|
||||
when: db is not defined or db == 'postgres'
|
||||
- role: mysql
|
||||
when: db is defined and db == 'mysql'
|
||||
- elasticsearch
|
||||
- role: databases
|
||||
- conductor
|
||||
- role: workers
|
||||
when:
|
||||
- no_workers is not defined or not no_workers|bool
|
||||
tasks:
|
||||
- name: Start {{ db|default('postgres', true) }} and es
|
||||
docker_stack:
|
||||
name: conductor
|
||||
name: 'conductor-{{ infrastructure }}'
|
||||
state: present
|
||||
compose:
|
||||
- "{{ target_path }}/{{ db|default('postgres', true) }}-swarm.yaml"
|
||||
- "{{ target_path }}/elasticsearch-swarm.yaml"
|
||||
when: dry is not defined or dry|bool
|
||||
when: dry is not defined or not dry|bool
|
||||
|
||||
- name: Waiting for databases
|
||||
pause:
|
||||
seconds: 10
|
||||
when: dry is not defined or dry|bool
|
||||
when: dry is not defined or not dry|bool
|
||||
|
||||
- name: Start conductor
|
||||
docker_stack:
|
||||
name: conductor
|
||||
name: 'conductor-{{ infrastructure }}'
|
||||
state: present
|
||||
compose:
|
||||
- "{{ target_path }}/conductor-swarm.yaml"
|
||||
when: dry is not defined or dry|bool
|
||||
when: dry is not defined or not dry|bool
|
||||
|
||||
- name: Start haproxy
|
||||
docker_stack:
|
||||
name: conductor
|
||||
name: 'conductor-{{ infrastructure }}'
|
||||
state: present
|
||||
compose:
|
||||
- "{{ target_path }}/haproxy-swarm.yaml"
|
||||
when:
|
||||
- dry is not defined or dry|bool
|
||||
- dry is not defined or not dry|bool
|
||||
- cluster_replacement is defined
|
||||
- cluster_replacement|bool
|
||||
|
||||
- name: Start workers
|
||||
include_role:
|
||||
name: workers
|
||||
docker_stack:
|
||||
name: 'conductor-{{ infrastructure }}'
|
||||
state: present
|
||||
compose:
|
||||
- "{{ target_path }}/conductor-workers-swarm.yaml"
|
||||
when:
|
||||
- dry is not defined or dry|bool
|
||||
- workers is defined
|
||||
- workers|bool
|
||||
- dry is not defined or not dry|bool
|
||||
- no_workers is not defined or not no_workers|bool
|
||||
|
||||
|
|
Loading…
Reference in New Issue