2023-10-30 10:50:42 +01:00
|
|
|
#!/usr/bin/env bash
|
2023-10-24 12:50:35 +02:00
|
|
|
|
|
|
|
. ./variables.sh
|
|
|
|
#
|
|
|
|
# Docker Swarm
|
|
|
|
#
|
|
|
|
# Network for the NFS traffic with the Docker Swarm
|
|
|
|
#
|
|
|
|
openstack --os-cloud ${os_infra} network create --no-share --mtu 8942 swarm-nfs-net
|
|
|
|
openstack --os-cloud ${os_infra} subnet create --network swarm-nfs-net --dhcp --gateway none --subnet-range 192.168.1.0/24 --allocation-pool start=192.168.1.5,end=192.168.1.150 --dns-publish-fixed-ip swarm-nfs-subnet
|
|
|
|
#
|
|
|
|
# Security group (ingress allows everything)
|
|
|
|
#
|
|
|
|
openstack --os-cloud ${os_infra} security group create \
|
|
|
|
--description "Docker Swarm internal traffic" \
|
|
|
|
"Docker Swarm"
|
|
|
|
rules_to_delete=$(openstack --os-cloud ${os_infra} security group show -c rules "Docker Swarm" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
|
|
|
|
if [ -n "$rules_to_delete" ] ; then
|
|
|
|
for r in $(echo $rules_to_delete) ; do
|
|
|
|
openstack --os-cloud ${os_infra} security group rule delete $r
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
openstack --os-cloud ${os_infra} security group rule create \
|
|
|
|
--description "TCP traffic" --ingress --protocol tcp \
|
|
|
|
--remote-ip ${os_private_network} "Docker Swarm"
|
|
|
|
openstack --os-cloud ${os_infra} security group rule create \
|
|
|
|
--description "UDP traffic" --ingress --protocol udp \
|
|
|
|
--remote-ip ${os_private_network} "Docker Swarm"
|
|
|
|
|
|
|
|
openstack --os-cloud ${os_infra} security group create \
|
|
|
|
--description "Traffico between Docker Swarm and the NFS server " \
|
|
|
|
"Docker Swarm NFS"
|
|
|
|
rules_to_delete=$(openstack --os-cloud ${os_infra} security group show -c rules "Docker Swarm NFS" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
|
|
|
|
if [ -n "$rules_to_delete" ] ; then
|
|
|
|
for r in $(echo $rules_to_delete) ; do
|
|
|
|
openstack --os-cloud ${os_infra} security group rule delete $r
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
openstack --os-cloud ${os_infra} security group rule create \
|
|
|
|
--description "TCP traffic" --ingress --protocol tcp \
|
|
|
|
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
|
|
|
|
openstack --os-cloud ${os_infra} security group rule create \
|
|
|
|
--description "UDP traffic" --ingress --protocol udp \
|
|
|
|
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
|
|
|
|
|
|
|
|
# Server group (soft anti affinity)
|
|
|
|
openstack --os-cloud ${os_infra} server group create --policy soft-anti-affinity docker-managers
|
|
|
|
|
|
|
|
# VMs for the manager nodes
|
|
|
|
openstack --os-cloud ${os_infra} server create \
|
|
|
|
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
|
|
|
|
--key-name adellam-ed25519 \
|
|
|
|
--network ${os_infra}-cloud-main \
|
|
|
|
--network swarm-nfs-net \
|
|
|
|
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
|
|
|
|
--boot-from-volume 10 \
|
|
|
|
--min 3 --max 3 \
|
|
|
|
--security-group default --security-group "Docker Swarm" \
|
|
|
|
--hint group=50d520fd-d63c-4a66-9dbf-dba271971299 \
|
|
|
|
swarm-mgr
|
|
|
|
|
|
|
|
for i in 1 2 3; do
|
|
|
|
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm mgr $i data disk" swarm-mgr-data-$i
|
|
|
|
openstack --os-cloud ${os_infra} server add volume swarm-mgr-$i swarm-mgr-data-$i --device /dev/vdb
|
|
|
|
done
|
|
|
|
|
|
|
|
# VMs for the worker nodes m1.xlarge
|
|
|
|
openstack --os-cloud ${os_infra} server group create --policy soft-anti-affinity docker-workers
|
|
|
|
openstack --os-cloud ${os_infra} server create \
|
|
|
|
--image Ubuntu-Jammy-22.04 --flavor m1.xlarge \
|
|
|
|
--key-name adellam-ed25519 \
|
|
|
|
--network ${os_infra}-cloud-main \
|
|
|
|
--network swarm-nfs-net \
|
|
|
|
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
|
|
|
|
--boot-from-volume 10 \
|
|
|
|
--min 5 --max 5 \
|
|
|
|
--security-group default --security-group "Docker Swarm" \
|
|
|
|
--hint group=5eaa8fbb-1ac4-4249-a33b-32a97ec99cd5 \
|
|
|
|
swarm-worker
|
|
|
|
|
|
|
|
for i in 1 2 3 4 5 ; do
|
|
|
|
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm worker $i data disk" swarm-worker-data-$i
|
|
|
|
openstack --os-cloud ${os_infra} server add volume swarm-worker-$i swarm-worker-data-$i --device /dev/vdb
|
|
|
|
done
|
|
|
|
|
|
|
|
openstack --os-cloud ${os_infra} server create \
|
|
|
|
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
|
|
|
|
--key-name adellam-ed25519 \
|
|
|
|
--network ${os_infra}-cloud-main \
|
|
|
|
--network swarm-nfs-net \
|
|
|
|
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
|
|
|
|
--boot-from-volume 10 \
|
|
|
|
--min 1 --max 1 \
|
|
|
|
--security-group default --security-group "Docker Swarm NFS" \
|
|
|
|
swarm-nfs-server
|
|
|
|
|
|
|
|
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm NFS server data disk" swarm-nfs-server-data
|
|
|
|
openstack --os-cloud ${os_infra} server add volume swarm-nfs-server swarm-nfs-server-data --device /dev/vdb
|
|
|
|
|
|
|
|
#
|
|
|
|
# Octavia
|
|
|
|
#
|
|
|
|
# Create the swarm load balancer
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer create --description "D4Science dev L4 balancer for Docker Swarm" --flavor octavia_amphora-mvcpu-ha --enable --wait --name "l4-swarm-dev" --vip-network-id ${os_infra}-cloud-main
|
|
|
|
# Create and assign a floating IP address
|
|
|
|
openstack --os-cloud ${os_infra} floating ip create --description "D4Science dev Docker Swarm load balancer" --dns-domain ${os_dns_zone}. --dns-name swarm-lb external-network
|
|
|
|
# Add the floating IP to the load balancer
|
|
|
|
# the vip_port_id is showed by 'openstack loadbalancer show l4-swarm-dev'
|
|
|
|
# openstack --os-cloud ${os_infra} floating ip set --port <load_balancer_vip_port_id> <floating_ip_id>
|
|
|
|
openstack --os-cloud ${os_infra} floating ip set --port 1a3077e6-68aa-43d2-b117-44a5bb6852cb 146.48.30.38
|
|
|
|
|
|
|
|
#
|
|
|
|
# listener for the haproxy stats
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-stats" --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --protocol-port 8880 --allowed-cidr 146.48.122.27/32 --allowed-cidr 146.48.122.49/32 --allowed-cidr 146.48.28.10/32 --allowed-cidr 146.48.28.11/32 --enable --wait "l4-swarm-dev"
|
|
|
|
# Pool for the haproxy stats
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-stats --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --listener l4-swarm-haproxy-stats --lb-algorithm ROUND_ROBIN --enable --disable-tls --listener "l4-swarm-haproxy-stats"
|
|
|
|
# Pool members for the haproxy stats
|
|
|
|
# Add members to the pool
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8880 swarm-haproxy-stats
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8880 swarm-haproxy-stats
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8880 swarm-haproxy-stats
|
|
|
|
# Create a health check for the pool
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-stats-check --type TCP --enable --wait --delay 5 --timeout 5 --max-retries 3 swarm-haproxy-stats
|
|
|
|
|
|
|
|
#
|
|
|
|
# listener for the swarm haproxy plain http on port 8080
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-8080" --description "Docker Swarm HAPROXY port 8080" --protocol TCP --protocol-port 8080 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
|
|
|
|
# Pool for the listener
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-8080 --description "Docker Swarm HAPROXY 8080" --protocol PROXYV2 --listener l4-swarm-haproxy-8080 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
|
|
|
|
# Pool members for the haproxy stats
|
|
|
|
# Add members to the pool
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8080 swarm-haproxy-8080
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8080 swarm-haproxy-8080
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8080 swarm-haproxy-8080
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-8080-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-8080
|
|
|
|
|
|
|
|
#
|
|
|
|
# listener for the swarm haproxy plain http on port 80
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-80" --description "Docker Swarm HAPROXY port 80" --protocol TCP --protocol-port 80 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
|
|
|
|
# Pool for the listener
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-80 --description "Docker Swarm HAPROXY 80" --protocol PROXYV2 --listener l4-swarm-haproxy-80 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
|
|
|
|
# Pool members for the haproxy stats
|
|
|
|
# Add members to the pool
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 80 swarm-haproxy-80
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 80 swarm-haproxy-80
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 80 swarm-haproxy-80
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-http-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-80
|
|
|
|
|
|
|
|
#
|
|
|
|
# listener for the swarm haproxy on port 443
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-443" --description "Docker Swarm HAPROXY port 443" --protocol TCP --protocol-port 443 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
|
|
|
|
# Pool for the listener
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-443 --description "Docker Swarm HAPROXY 443" --protocol PROXYV2 --listener l4-swarm-haproxy-443 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
|
|
|
|
# Pool members
|
|
|
|
# Add members to the pool
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 443 swarm-haproxy-443
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 443 swarm-haproxy-443
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 443 swarm-haproxy-443
|
|
|
|
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-https-check --type HTTPS --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-443
|
|
|
|
|
|
|
|
#
|
|
|
|
# Add a CNAME for portainer
|
|
|
|
#
|
|
|
|
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. portainer
|
|
|
|
|
|
|
|
# CNAMES for CCP, CDN and the conductor
|
|
|
|
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. ccp
|
|
|
|
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. cdn
|
|
|
|
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. conductor
|