diff --git a/ansible/roles/cluster-replacement/defaults/main.yml b/ansible/roles/cluster-replacement/defaults/main.yml new file mode 100644 index 0000000..48c30ea --- /dev/null +++ b/ansible/roles/cluster-replacement/defaults/main.yml @@ -0,0 +1,56 @@ +--- +haproxy_latest_release: True +haproxy_version: 2.2 +haproxy_repo_key: 'http://haproxy.debian.net/bernat.debian.org.gpg' +haproxy_debian_latest_repo: "deb http://haproxy.debian.net {{ ansible_lsb.codename }}-backports-{{ haproxy_version }} main" +haproxy_ubuntu_latest_repo: "ppa:vbernat/haproxy-{{ haproxy_version }}" +haproxy_pkg_state: present +haproxy_enabled: True +haproxy_loglevel: info +haproxy_k_bind_non_local_ip: True +haproxy_docker_container: False +haproxy_docker_version: '{{ haproxy_version }}.4' +haproxy_docker_image: 'haproxytech/haproxy-debian:{{ haproxy_version }}.4' +haproxy_docker_compose_dir: /srv/haproxy_swarm +haproxy_docker_restart_policy: 'on-failure' + +haproxy_ha_with_keepalived: False +haproxy_docker_swarm_networks: + - '{{ docker_swarm_portainer_network }}' +haproxy_docker_swarm_additional_networks: [] + +haproxy_docker_swarm_haproxy_constraints: + - 'node.role == manager' +haproxy_docker_swarm_additional_services: [{ acl_name: 'conductor-server', acl_rule: 'hdr_dom(host) -i conductor-server.int.d4science.net', stack_name: 'conductor', service_name: 'conductor-server', service_replica_num: '2', service_port: '8080', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri /api/health ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }, { acl_name: 'conductor-ui', acl_rule: 'hdr_dom(host) -i conductor-ui.int.d4science.net', stack_name: 'conductor', service_name: 'conductor-ui', service_replica_num: '2', service_port: '5000', service_overlay_network: 'conductor-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth GET uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]' }] +# - { acl_name: 'service', acl_rule: 'hdr_dom(host) -i service.example.com', stack_name: 'stack', service_name: 'service', service_replica_num: '1', service_port: '9999', service_overlay_network: 'service-network', stick_sessions: False, stick_on_cookie: True, stick_cookie: 'JSESSIONID', stick_table: 'type ip size 2m expire 180m', balance_type: 'roundrobin', backend_options: '', http_check_enabled: True, http_check: 'meth HEAD uri / ver HTTP/1.1 hdr Host localhost', http_check_expect: 'rstatus (2|3)[0-9][0-9]', allowed_networks: '192.168.1.0/24 192.168.2.0/24' } + +haproxy_default_port: 80 +haproxy_terminate_tls: False +haproxy_ssl_port: 443 +haproxy_admin_port: 8880 +haproxy_admin_socket: /run/haproxy/admin.sock + +haproxy_install_additional_pkgs: False +haproxy_additional_pkgs: + - haproxyctl + - haproxy-log-analysis + +haproxy_nagios_check: False +# It's a percentage +haproxy_nagios_check_w: 70 +haproxy_nagios_check_c: 90 + +# Used by some other role as defaults, eg docker-swarm +haproxy_spread_checks: 5 +haproxy_connect_timeout: 10s +haproxy_client_timeout: 120s +haproxy_server_timeout: 480s +haproxy_global_keepalive_timeout: 10s +haproxy_client_keepalive_timeout: 5184000s +haproxy_backend_maxconn: 2048 +haproxy_check_interval: 3s +haproxy_check_timeout: 2s +haproxy_maxconns: 4096 + +haproxy_sysctl_conntrack_max: 131072 + diff --git a/ansible/roles/cluster-replacement/tasks/main.yml b/ansible/roles/cluster-replacement/tasks/main.yml new file mode 100644 index 0000000..1ca5ff7 --- /dev/null +++ b/ansible/roles/cluster-replacement/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Generate haproxy config + template: + src: templates/haproxy.cfg.j2 + dest: "{{ target_path }}/haproxy.cfg" + +- name: Generate haproxy-docker-swarm + template: + src: templates/haproxy-docker-swarm.yaml.j2 + dest: "{{ target_path }}/haproxy-swarm.yaml" + +- name: Create the overlay network that will be joined by the proxied services + docker_network: + name: '{{ haproxy_docker_overlay_network }}' + driver: overlay + scope: swarm diff --git a/ansible/roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2 b/ansible/roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2 new file mode 100644 index 0000000..7424aba --- /dev/null +++ b/ansible/roles/cluster-replacement/templates/haproxy-docker-swarm.yaml.j2 @@ -0,0 +1,56 @@ +version: '3.6' + +services: + haproxy: + image: {{ haproxy_docker_image }} + configs: + - source: haproxy-config + target: /usr/local/etc/haproxy/haproxy.cfg + networks: + - {{ haproxy_docker_overlay_network }} + volumes: + #- /etc/haproxy:/usr/local/etc/haproxy:ro + - /var/run/docker.sock:/var/run/docker.sock + ports: + - target: {{ haproxy_default_port }} + published: {{ haproxy_default_port }} + protocol: tcp + mode: host + - target: {{ haproxy_ssl_port }} + published: {{ haproxy_ssl_port }} + protocol: tcp + mode: host + - target: {{ haproxy_admin_port }} + published: {{ haproxy_admin_port }} + protocol: tcp + mode: host + dns: [127.0.0.11] + deploy: + mode: replicated + replicas: 1 + update_config: + parallelism: 1 + delay: 20s + placement: + constraints: + - "node.role==manager" + restart_policy: + condition: {{ haproxy_docker_restart_policy}} + delay: 20s + max_attempts: 5 + window: 120s + resources: + limits: + cpus: '2.0' + memory: 768M + reservations: + cpus: '1.0' + memory: 384M + logging: + driver: 'journald' +configs: + haproxy-config: + file: ./haproxy.cfg +networks: + {{ haproxy_docker_overlay_network }}: + external: true diff --git a/ansible/roles/cluster-replacement/templates/haproxy.cfg.j2 b/ansible/roles/cluster-replacement/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..2d1932f --- /dev/null +++ b/ansible/roles/cluster-replacement/templates/haproxy.cfg.j2 @@ -0,0 +1,75 @@ +global + log fd@2 local2 + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + stats socket /var/lib/haproxy/stats expose-fd listeners + master-worker + +resolvers docker + nameserver dns1 127.0.0.11:53 + resolve_retries 3 + timeout resolve 1s + timeout retry 1s + hold other 10s + hold refused 10s + hold nx 10s + hold timeout 10s + hold valid 10s + hold obsolete 10s + +defaults + timeout connect 10s + timeout client 30s + timeout server 30s + log global + monitor-uri /_haproxy_health_check + timeout http-keep-alive {{ haproxy_global_keepalive_timeout }} + timeout connect {{ haproxy_connect_timeout }} + timeout client {{ haproxy_client_timeout }} + timeout server {{ haproxy_server_timeout }} + timeout check {{ haproxy_check_timeout }} + timeout http-request 10s # slowloris protection + default-server inter 3s fall 2 rise 2 slowstart 60s + +# Needed to preserve the stick tables +peers mypeers + peer local_haproxy 127.0.0.1:1024 + +frontend http + + bind *:{{ haproxy_default_port }} + + mode http + option http-keep-alive + +{% for srv in haproxy_docker_swarm_additional_services %} + use_backend {{ srv.acl_name }}_bck if { {{ srv.acl_rule }} } +{% endfor %} + + +# +# Backends +# + +{% for srv in haproxy_docker_swarm_additional_services %} +backend {{ srv.acl_name }}_bck + mode http + option httpchk + balance {{ srv.balance_type | default('roundrobin') }} +{% if srv.http_check_enabled is defined and srv.http_check_enabled %} + http-check send {{ srv.http_check }} + http-check expect {{ srv.http_check_expect }} +{% endif %} +{% if srv.stick_sessions %} +{% if srv.stick_on_cookie %} + cookie {{ srv.stick_cookie }} +{% else %} + stick on src + stick-table {{ srv.stick_table }} +{% endif %} +{% endif %} + server-template {{ srv.service_name }}- {{ srv.service_replica_num }} {{ srv.stack_name }}_{{ srv.service_name }}:{{ srv.service_port }} {{ srv.backend_options | default('') }} check resolvers docker init-addr libc,none +{% endfor %} diff --git a/ansible/roles/cluster-replacement/vars/main.yml b/ansible/roles/cluster-replacement/vars/main.yml new file mode 100644 index 0000000..b3a4f44 --- /dev/null +++ b/ansible/roles/cluster-replacement/vars/main.yml @@ -0,0 +1,3 @@ +--- +cluster_replacement: True +haproxy_docker_overlay_network: 'haproxy-public' diff --git a/ansible/roles/common/defaults/main.yaml b/ansible/roles/common/defaults/main.yaml index 8dd9588..f11b70b 100644 --- a/ansible/roles/common/defaults/main.yaml +++ b/ansible/roles/common/defaults/main.yaml @@ -1,3 +1,3 @@ --- -target_path: /tmp/conductor_stack +target_path: /tmp/conductor_stack_haproxy conductor_network: conductor-network diff --git a/ansible/roles/conductor/defaults/main.yaml b/ansible/roles/conductor/defaults/main.yaml index d74fa72..00a8a60 100644 --- a/ansible/roles/conductor/defaults/main.yaml +++ b/ansible/roles/conductor/defaults/main.yaml @@ -2,3 +2,4 @@ conductor_replicas: 2 conductor_config: conductor-swarm-config.properties conductor_config_template: "{{ conductor_config }}.j2" + diff --git a/ansible/roles/conductor/templates/conductor-swarm.yaml.j2 b/ansible/roles/conductor/templates/conductor-swarm.yaml.j2 index 42ff507..d653c38 100644 --- a/ansible/roles/conductor/templates/conductor-swarm.yaml.j2 +++ b/ansible/roles/conductor/templates/conductor-swarm.yaml.j2 @@ -7,12 +7,19 @@ services: image: nubisware/conductor-server networks: - {{ conductor_network }} +{% if (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %} + - {{ haproxy_docker_overlay_network }} +{% endif %} +{% if (cluster_replacement is not defined or not cluster_replacement) or (cluster_check is not defined or not cluster_check) %} ports: - "8080:8080" +{% endif %} deploy: mode: replicated replicas: {{ conductor_replicas }} - #endpoint_mode: dnsrr +{% if (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %} + endpoint_mode: dnsrr +{% endif %} placement: constraints: [node.role == worker] restart_policy: @@ -33,12 +40,19 @@ services: image: nubisware/conductor-ui networks: - {{ conductor_network }} +{% if (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %} + - {{ haproxy_docker_overlay_network }} +{% endif %} +{% if (cluster_replacement is not defined or not cluster_replacement) or (cluster_check is not defined or not cluster_check) %} ports: - "5000:5000" +{% endif %} deploy: mode: replicated replicas: {{ conductor_replicas }} - #endpoint_mode: dnsrr +{% if (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %} + endpoint_mode: dnsrr +{% endif %} placement: constraints: [node.role == worker] restart_policy: @@ -49,6 +63,10 @@ services: networks: {{ conductor_network }}: +{% if (cluster_replacement is defined and cluster_replacement) or (cluster_check is defined and cluster_check) %} + {{ haproxy_docker_overlay_network }}: + external: True +{% endif %} configs: swarm-config: diff --git a/ansible/site-with-cluster-replacement.yaml b/ansible/site-with-cluster-replacement.yaml new file mode 100644 index 0000000..94812c9 --- /dev/null +++ b/ansible/site-with-cluster-replacement.yaml @@ -0,0 +1,29 @@ +--- +- hosts: localhost + roles: + - common + - elasticsearch + - dynomite + tasks: + - name: Start dynomite and es + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/dynomite-swarm.yaml" + - "{{ target_path }}/elasticsearch-swarm.yaml" + + +- hosts: localhost + roles: + - common + - cluster-replacement + - conductor + tasks: + - name: Start conductor + docker_stack: + name: conductor + state: present + compose: + - "{{ target_path }}/conductor-swarm.yaml" + - "{{ target_path }}/haproxy-swarm.yaml"