First commit of dev and preprod scripts.

This commit is contained in:
Andrea Dell'Amico 2023-10-24 12:50:35 +02:00
parent 345d9635d8
commit 8cb83faf78
Signed by untrusted user: andrea.dellamico
GPG Key ID: 147ABE6CEB9E20FF
8 changed files with 555 additions and 0 deletions

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
#
# PostgreSQL
#
# Separate network for the DB traffic
openstack --os-cloud d4s-dev network create --no-share --mtu 8942 postgresql-srv-net
openstack --os-cloud d4s-dev subnet create --network postgresql-srv-net --dhcp --gateway none --subnet-range 192.168.2.0/24 postgresql-srv-subnet
#
# Security group that allows postgresql traffic on the dedicated subnet only
openstack --os-cloud d4s-dev security group create \
--description "PostgreSQL internal traffic" \
"PostgreSQL service"
rules_to_delete=$(openstack --os-cloud d4s-dev security group show -c rules "PostgreSQL service" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-dev security group rule delete $r
done
fi
openstack --os-cloud d4s-dev security group rule create \
--description "TCP traffic" \
--ingress --protocol tcp --dst-port 5432 \
--remote-ip 192.168.2.0/24 "PostgreSQL service"
#
# PostgreSQL VM
openstack --os-cloud d4s-dev server create \
--image Ubuntu-Jammy-22.04 --flavor m1.large \
--key-name adellam-ed25519 \
--network d4s-dev-cloud-main \
--network postgresql-srv-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 1 --max 1 \
--security-group default --security-group "PostgreSQL service" \
postgresql-server
#
# Data volume for PostgreSQL
openstack --os-cloud d4s-dev volume create --size 200 --description "PostgreSQL server data disk" postgresql-server-data
openstack --os-cloud d4s-dev server add volume postgresql-server postgresql-server-data --device /dev/vdb

View File

@ -0,0 +1,168 @@
#!/bin/bash
#
# Docker Swarm
#
# Security group (ingress allows everything)
#
openstack --os-cloud d4s-dev security group create \
--description "Docker Swarm internal traffic" \
"Docker Swarm"
rules_to_delete=$(openstack --os-cloud d4s-dev security group show -c rules "Docker Swarm" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-dev security group rule delete $r
done
fi
openstack --os-cloud d4s-dev security group rule create \
--description "TCP traffic" --ingress --protocol tcp \
--remote-ip 10.1.28.0/22 "Docker Swarm"
openstack --os-cloud d4s-dev security group rule create \
--description "UDP traffic" --ingress --protocol udp \
--remote-ip 10.1.28.0/22 "Docker Swarm"
openstack --os-cloud d4s-dev security group create \
--description "Traffico between Docker Swarm and the NFS server " \
"Docker Swarm NFS"
rules_to_delete=$(openstack --os-cloud d4s-dev security group show -c rules "Docker Swarm NFS" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-dev security group rule delete $r
done
fi
openstack --os-cloud d4s-dev security group rule create \
--description "TCP traffic" --ingress --protocol tcp \
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
openstack --os-cloud d4s-dev security group rule create \
--description "UDP traffic" --ingress --protocol udp \
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
# Separate network for the NFS traffic
openstack --os-cloud d4s-dev network create --no-share --mtu 8942 swarm-nfs-net
openstack --os-cloud d4s-dev subnet create --network swarm-nfs-net --dhcp --gateway none --subnet-range 192.168.1.0/24 swarm-nfs-subnet
# Server group (soft anti affinity)
openstack --os-cloud d4s-dev server group create --policy soft-anti-affinity docker-managers
# VMs for the manager nodes
openstack --os-cloud d4s-dev server create \
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
--key-name adellam-ed25519 \
--network d4s-dev-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 3 --max 3 \
--security-group default --security-group "Docker Swarm" \
--hint group=50d520fd-d63c-4a66-9dbf-dba271971299 \
swarm-mgr
for i in 1 2 3; do
openstack --os-cloud d4s-dev volume create --size 100 --description "Swarm mgr $i data disk" swarm-mgr-data-$i
openstack --os-cloud d4s-dev server add volume swarm-mgr-$i swarm-mgr-data-$i --device /dev/vdb
done
# VMs for the worker nodes m1.xlarge
openstack --os-cloud d4s-dev server group create --policy soft-anti-affinity docker-workers
openstack --os-cloud d4s-dev server create \
--image Ubuntu-Jammy-22.04 --flavor m1.xlarge \
--key-name adellam-ed25519 \
--network d4s-dev-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 5 --max 5 \
--security-group default --security-group "Docker Swarm" \
--hint group=5eaa8fbb-1ac4-4249-a33b-32a97ec99cd5 \
swarm-worker
for i in 1 2 3 4 5 ; do
openstack --os-cloud d4s-dev volume create --size 100 --description "Swarm worker $i data disk" swarm-worker-data-$i
openstack --os-cloud d4s-dev server add volume swarm-worker-$i swarm-worker-data-$i --device /dev/vdb
done
openstack --os-cloud d4s-dev server create \
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
--key-name adellam-ed25519 \
--network d4s-dev-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 1 --max 1 \
--security-group default --security-group "Docker Swarm NFS" \
swarm-nfs-server
openstack --os-cloud d4s-dev volume create --size 100 --description "Swarm NFS server data disk" swarm-nfs-server-data
openstack --os-cloud d4s-dev server add volume swarm-nfs-server swarm-nfs-server-data --device /dev/vdb
#
# Octavia
#
# Create the swarm load balancer
openstack --os-cloud d4s-dev loadbalancer create --description "D4Science dev L4 balancer for Docker Swarm" --flavor octavia_amphora-mvcpu-ha --enable --wait --name "l4-swarm-dev" --vip-network-id d4s-dev-cloud-main
# Create and assign a floating IP address
openstack --os-cloud d4s-dev floating ip create --description "D4Science dev Docker Swarm load balancer" --dns-domain cloud-dev.d4science.org. --dns-name swarm-lb external-network
# Add the floating IP to the load balancer
# the vip_port_id is showed by 'openstack loadbalancer show l4-swarm-dev'
# openstack --os-cloud d4s-dev floating ip set --port <load_balancer_vip_port_id> <floating_ip_id>
openstack --os-cloud d4s-dev floating ip set --port 1a3077e6-68aa-43d2-b117-44a5bb6852cb 146.48.30.38
#
# listener for the haproxy stats
openstack --os-cloud d4s-dev loadbalancer listener create --name "l4-swarm-haproxy-stats" --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --protocol-port 8880 --allowed-cidr 146.48.122.27/32 --allowed-cidr 146.48.122.49/32 --allowed-cidr 146.48.28.10/32 --allowed-cidr 146.48.28.11/32 --enable --wait "l4-swarm-dev"
# Pool for the haproxy stats
openstack --os-cloud d4s-dev loadbalancer pool create --name swarm-haproxy-stats --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --listener l4-swarm-haproxy-stats --lb-algorithm ROUND_ROBIN --enable --disable-tls --listener "l4-swarm-haproxy-stats"
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8880 swarm-haproxy-stats
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8880 swarm-haproxy-stats
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8880 swarm-haproxy-stats
# Create a health check for the pool
openstack --os-cloud d4s-dev loadbalancer healthmonitor create --name swarm-haproxy-stats-check --type TCP --enable --wait --delay 5 --timeout 5 --max-retries 3 swarm-haproxy-stats
#
# listener for the swarm haproxy plain http on port 8080
openstack --os-cloud d4s-dev loadbalancer listener create --name "l4-swarm-haproxy-8080" --description "Docker Swarm HAPROXY port 8080" --protocol TCP --protocol-port 8080 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud d4s-dev loadbalancer pool create --name swarm-haproxy-8080 --description "Docker Swarm HAPROXY 8080" --protocol PROXYV2 --listener l4-swarm-haproxy-8080 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud d4s-dev loadbalancer healthmonitor create --name swarm-haproxy-8080-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-8080
#
# listener for the swarm haproxy plain http on port 80
openstack --os-cloud d4s-dev loadbalancer listener create --name "l4-swarm-haproxy-80" --description "Docker Swarm HAPROXY port 80" --protocol TCP --protocol-port 80 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud d4s-dev loadbalancer pool create --name swarm-haproxy-80 --description "Docker Swarm HAPROXY 80" --protocol PROXYV2 --listener l4-swarm-haproxy-80 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud d4s-dev loadbalancer healthmonitor create --name swarm-haproxy-http-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-80
#
# listener for the swarm haproxy on port 443
openstack --os-cloud d4s-dev loadbalancer listener create --name "l4-swarm-haproxy-443" --description "Docker Swarm HAPROXY port 443" --protocol TCP --protocol-port 443 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud d4s-dev loadbalancer pool create --name swarm-haproxy-443 --description "Docker Swarm HAPROXY 443" --protocol PROXYV2 --listener l4-swarm-haproxy-443 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members
# Add members to the pool
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud d4s-dev loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud d4s-dev loadbalancer healthmonitor create --name swarm-haproxy-https-check --type HTTPS --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-443
#
# Add a CNAME for portainer
#
openstack --os-cloud d4s-dev recordset create --record swarm-lb.cloud-dev.d4science.org. --type CNAME cloud-dev.d4science.org. portainer
# CNAMES for CCP, CDN and the conductor
openstack --os-cloud d4s-dev recordset create --record swarm-lb.cloud-dev.d4science.org. --type CNAME cloud-dev.d4science.org. ccp
openstack --os-cloud d4s-dev recordset create --record swarm-lb.cloud-dev.d4science.org. --type CNAME cloud-dev.d4science.org. cdn
openstack --os-cloud d4s-dev recordset create --record swarm-lb.cloud-dev.d4science.org. --type CNAME cloud-dev.d4science.org. conductor

View File

@ -0,0 +1,41 @@
#!/usr/bin/env sh
. ./variables.sh
#
# VM shell
#
openstack --os-cloud d4s-pre server create \
--image Ubuntu-Jammy-22.04 --flavor m2.small --description "SSH Proxy Jump" \
--key-name adellam-ed25519 \
--network d4s-pre-cloud-main --hostname pre-shell \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 100 \
--security-group default --security-group "Limited SSH access" \
pre-shell
# Security group that involves the shell server
openstack --os-cloud d4s-pre security group \
rule create --description "HTTPS from the jump proxy" \
--ingress --protocol tcp --dst-port 443 --remote-ip <private IP of pre-shell> \
default
openstack --os-cloud d4s-pre security group \
rule create --description "HTTP from the jump proxy" \
--ingress --protocol tcp --dst-port 80 --remote-ip <private IP of pre-shell> \
default
openstack --os-cloud d4s-pre security group \
rule create --description "Tomcat debugging on port 8100 from the jump proxy" \
--ingress --protocol tcp --dst-port 8100 --remote-ip <private IP of pre-shell> \
default
# VM internal CA
openstack --os-cloud d4s-pre server create \
--image Ubuntu-Jammy-22.04 --flavor m1.small --description "Internal CA" \
--key-name adellam-ed25519 --network d4s-pre-cloud-main --hostname internal-ca \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 15 --security-group default \
internal-ca
# VM prometheus
prometheus m1.large
# Server group anti affinity per HAPROXY
# 2 VM haproxy
haproxy m1.medium

View File

@ -0,0 +1,169 @@
#!/usr/bin/env sh
. ./variables.sh
#
# Docker Swarm
#
# Network for the NFS traffic with the Docker Swarm
#
openstack --os-cloud ${os_infra} network create --no-share --mtu 8942 swarm-nfs-net
openstack --os-cloud ${os_infra} subnet create --network swarm-nfs-net --dhcp --gateway none --subnet-range 192.168.1.0/24 --allocation-pool start=192.168.1.5,end=192.168.1.150 --dns-publish-fixed-ip swarm-nfs-subnet
#
# Security group (ingress allows everything)
#
openstack --os-cloud ${os_infra} security group create \
--description "Docker Swarm internal traffic" \
"Docker Swarm"
rules_to_delete=$(openstack --os-cloud ${os_infra} security group show -c rules "Docker Swarm" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud ${os_infra} security group rule delete $r
done
fi
openstack --os-cloud ${os_infra} security group rule create \
--description "TCP traffic" --ingress --protocol tcp \
--remote-ip ${os_private_network} "Docker Swarm"
openstack --os-cloud ${os_infra} security group rule create \
--description "UDP traffic" --ingress --protocol udp \
--remote-ip ${os_private_network} "Docker Swarm"
openstack --os-cloud ${os_infra} security group create \
--description "Traffico between Docker Swarm and the NFS server " \
"Docker Swarm NFS"
rules_to_delete=$(openstack --os-cloud ${os_infra} security group show -c rules "Docker Swarm NFS" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud ${os_infra} security group rule delete $r
done
fi
openstack --os-cloud ${os_infra} security group rule create \
--description "TCP traffic" --ingress --protocol tcp \
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
openstack --os-cloud ${os_infra} security group rule create \
--description "UDP traffic" --ingress --protocol udp \
--remote-ip 192.168.1.0/24 "Docker Swarm NFS"
# Server group (soft anti affinity)
openstack --os-cloud ${os_infra} server group create --policy soft-anti-affinity docker-managers
# VMs for the manager nodes
openstack --os-cloud ${os_infra} server create \
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
--key-name adellam-ed25519 \
--network ${os_infra}-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 3 --max 3 \
--security-group default --security-group "Docker Swarm" \
--hint group=50d520fd-d63c-4a66-9dbf-dba271971299 \
swarm-mgr
for i in 1 2 3; do
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm mgr $i data disk" swarm-mgr-data-$i
openstack --os-cloud ${os_infra} server add volume swarm-mgr-$i swarm-mgr-data-$i --device /dev/vdb
done
# VMs for the worker nodes m1.xlarge
openstack --os-cloud ${os_infra} server group create --policy soft-anti-affinity docker-workers
openstack --os-cloud ${os_infra} server create \
--image Ubuntu-Jammy-22.04 --flavor m1.xlarge \
--key-name adellam-ed25519 \
--network ${os_infra}-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 5 --max 5 \
--security-group default --security-group "Docker Swarm" \
--hint group=5eaa8fbb-1ac4-4249-a33b-32a97ec99cd5 \
swarm-worker
for i in 1 2 3 4 5 ; do
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm worker $i data disk" swarm-worker-data-$i
openstack --os-cloud ${os_infra} server add volume swarm-worker-$i swarm-worker-data-$i --device /dev/vdb
done
openstack --os-cloud ${os_infra} server create \
--image Ubuntu-Jammy-22.04 --flavor m1.medium \
--key-name adellam-ed25519 \
--network ${os_infra}-cloud-main \
--network swarm-nfs-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 1 --max 1 \
--security-group default --security-group "Docker Swarm NFS" \
swarm-nfs-server
openstack --os-cloud ${os_infra} volume create --size 100 --description "Swarm NFS server data disk" swarm-nfs-server-data
openstack --os-cloud ${os_infra} server add volume swarm-nfs-server swarm-nfs-server-data --device /dev/vdb
#
# Octavia
#
# Create the swarm load balancer
openstack --os-cloud ${os_infra} loadbalancer create --description "D4Science dev L4 balancer for Docker Swarm" --flavor octavia_amphora-mvcpu-ha --enable --wait --name "l4-swarm-dev" --vip-network-id ${os_infra}-cloud-main
# Create and assign a floating IP address
openstack --os-cloud ${os_infra} floating ip create --description "D4Science dev Docker Swarm load balancer" --dns-domain ${os_dns_zone}. --dns-name swarm-lb external-network
# Add the floating IP to the load balancer
# the vip_port_id is showed by 'openstack loadbalancer show l4-swarm-dev'
# openstack --os-cloud ${os_infra} floating ip set --port <load_balancer_vip_port_id> <floating_ip_id>
openstack --os-cloud ${os_infra} floating ip set --port 1a3077e6-68aa-43d2-b117-44a5bb6852cb 146.48.30.38
#
# listener for the haproxy stats
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-stats" --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --protocol-port 8880 --allowed-cidr 146.48.122.27/32 --allowed-cidr 146.48.122.49/32 --allowed-cidr 146.48.28.10/32 --allowed-cidr 146.48.28.11/32 --enable --wait "l4-swarm-dev"
# Pool for the haproxy stats
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-stats --description "Stats of the Docker Swarm HAPROXY" --protocol TCP --listener l4-swarm-haproxy-stats --lb-algorithm ROUND_ROBIN --enable --disable-tls --listener "l4-swarm-haproxy-stats"
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8880 swarm-haproxy-stats
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8880 swarm-haproxy-stats
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8880 swarm-haproxy-stats
# Create a health check for the pool
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-stats-check --type TCP --enable --wait --delay 5 --timeout 5 --max-retries 3 swarm-haproxy-stats
#
# listener for the swarm haproxy plain http on port 8080
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-8080" --description "Docker Swarm HAPROXY port 8080" --protocol TCP --protocol-port 8080 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-8080 --description "Docker Swarm HAPROXY 8080" --protocol PROXYV2 --listener l4-swarm-haproxy-8080 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 8080 swarm-haproxy-8080
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-8080-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-8080
#
# listener for the swarm haproxy plain http on port 80
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-80" --description "Docker Swarm HAPROXY port 80" --protocol TCP --protocol-port 80 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-80 --description "Docker Swarm HAPROXY 80" --protocol PROXYV2 --listener l4-swarm-haproxy-80 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members for the haproxy stats
# Add members to the pool
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 80 swarm-haproxy-80
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-http-check --type HTTP --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-80
#
# listener for the swarm haproxy on port 443
openstack --os-cloud ${os_infra} loadbalancer listener create --name "l4-swarm-haproxy-443" --description "Docker Swarm HAPROXY port 443" --protocol TCP --protocol-port 443 --allowed-cidr 0.0.0.0/0 --enable --wait "l4-swarm-dev"
# Pool for the listener
openstack --os-cloud ${os_infra} loadbalancer pool create --name swarm-haproxy-443 --description "Docker Swarm HAPROXY 443" --protocol PROXYV2 --listener l4-swarm-haproxy-443 --lb-algorithm LEAST_CONNECTIONS --enable --disable-tls
# Pool members
# Add members to the pool
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-1 --address 10.1.29.205 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-2 --address 10.1.30.212 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud ${os_infra} loadbalancer member create --name swarm-mgr-3 --address 10.1.30.206 --protocol-port 443 swarm-haproxy-443
openstack --os-cloud ${os_infra} loadbalancer healthmonitor create --name swarm-haproxy-https-check --type HTTPS --http-method GET --url-path "/_haproxy_health_check" --enable --wait --delay 5 --timeout 5 --max-retries 3 --expected-codes 200 swarm-haproxy-443
#
# Add a CNAME for portainer
#
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. portainer
# CNAMES for CCP, CDN and the conductor
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. ccp
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. cdn
openstack --os-cloud ${os_infra} recordset create --record swarm-lb.${os_dns_zone}. --type CNAME ${os_dns_zone}. conductor

View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
. ./variables.sh
openstack --os-cloud ${os_infra} zone create --email postmaster@isti.cnr.it ${os_dns_zone}.
openstack --os-cloud ${os_infra} network create --description "D4Science Preprod private network (use this as the main network)" --no-share --mtu 8942 --dns-domain ${os_dns_zone}. ${os_infra}-cloud-main
openstack --os-cloud ${os_infra} subnet create --network ${os_infra}-cloud-main --dhcp --dns-nameserver 146.48.29.97 --dns-nameserver 146.48.29.98 --dns-nameserver 146.48.29.99 --subnet-range ${os_private_network} --allocation-pool start=${os_private_network_prefix}.${os_private_network_start_octet}.5,end=${os_private_network_prefix}.${os_private_network_allocation_end_octet}.255 --gateway ${os_private_gw} --dns-publish-fixed-ip ${os_infra}-cloud-sub
openstack --os-cloud ${os_infra} router create --description "D4Science Preprod main router" --external-gateway external-network ${os_infra}-cloud-external-router
openstack --os-cloud ${os_infra} router add subnet ${os_infra}-cloud-external-router ${os_infra}-cloud-sub

View File

@ -0,0 +1,41 @@
#!/usr/bin/env sh
. ./variables.sh
#
# PostgreSQL
#
# Network for the DB traffic
openstack --os-cloud ${os_infra} network create --no-share --mtu 8942 postgresql-srv-net
openstack --os-cloud ${os_infra} subnet create --network postgresql-srv-net --dhcp --gateway none --subnet-range 192.168.2.0/23 --allocation-pool start=192.168.2.5,end=192.168.3.150 --dns-publish-fixed-ip postgresql-srv-subnet
# Security group that allows postgresql traffic on the dedicated subnet only
openstack --os-cloud ${os_infra} security group create \
--description "PostgreSQL internal traffic" \
"PostgreSQL service"
rules_to_delete=$(openstack --os-cloud ${os_infra} security group show -c rules "PostgreSQL service" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud ${os_infra} security group rule delete $r
done
fi
openstack --os-cloud ${os_infra} security group rule create \
--description "TCP traffic" \
--ingress --protocol tcp --dst-port 5432 \
--remote-ip 192.168.2.0/24 "PostgreSQL service"
#
# PostgreSQL VM
openstack --os-cloud ${os_infra} server create \
--image Ubuntu-Jammy-22.04 --flavor m1.large \
--key-name adellam-ed25519 \
--network ${os_infra}-cloud-main \
--network postgresql-srv-net \
--user-data $HOME/Projects/infrascience/cloud-vms-data/cloud-init-openstack-ubuntu.sh \
--boot-from-volume 10 \
--min 1 --max 1 \
--security-group default --security-group "PostgreSQL service" \
postgresql-server
#
# Data volume for PostgreSQL
openstack --os-cloud ${os_infra} volume create --size 200 --description "PostgreSQL server data disk" postgresql-server-data
openstack --os-cloud ${os_infra} server add volume postgresql-server postgresql-server-data --device /dev/vdb

View File

@ -0,0 +1,70 @@
#!/usr/bin/env bash
. ./variables.sh
#
# Part of the security groups
#
# Remove the ingress rules from the default group
# openstack --os-cloud d4s-pre security group show default
rules_to_delete=
rules_to_delete=$(openstack --os-cloud d4s-pre security group show -c rules default | grep ingress | grep -v protocol | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-pre security group rule delete $r
done
fi
openstack --os-cloud d4s-pre security group rule create \
--description "Allow ICMP" --ingress --protocol icmp \
--remote-ip 0.0.0.0/0 default
openstack --os-cloud d4s-pre security group rule create \
--description "Prometheus node exporter" --ingress --protocol icmp \
--dst-port "9100" \
--remote-ip 10.1.32.0/22 default
# SSH access
openstack --os-cloud d4s-pre security group create \
--description "Access to the SSH Proxy Jump server" \
"Limited SSH access"
rules_to_delete=$(openstack --os-cloud d4s-pre security group show -c rules "Limited SSH access" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-pre security group rule delete $r
done
fi
openstack --os-cloud d4s-pre security group rule create \
--description "Access from S2I2S vpn 1" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.28.10/32 "Limited SSH access"
openstack --os-cloud d4s-pre security group rule create \
--description "Access from S2I2S vpn 2" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.28.11/32 "Limited SSH access"
openstack --os-cloud d4s-pre security group rule create \
--description "Access from D4Science VPN 1" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.122.27/32 "Limited SSH access"
openstack --os-cloud d4s-pre security group rule create \
--description "Access from D4Science VPN 2" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.122.49/32 "Limited SSH access"
openstack --os-cloud d4s-pre security group rule create \
--description "Access from shell.d4science.org" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.122.95/32 "Limited SSH access"
openstack --os-cloud d4s-pre security group rule create \
--description "SSH from the InfraScience Network (because masquerade on the Xen hypervisors)" --ingress --protocol tcp --dst-port "22" \
--remote-ip 146.48.122.0/23 "Limited SSH access"
# Limited HTTPS access
openstack --os-cloud d4s-pre security group create \
--description "Limited HTTPs and public HTTP. For servers with public IP addresses that can be accessed from our VPN only" \
"Limited HTTPS access"
rules_to_delete=$(openstack --os-cloud d4s-pre security group show -c rules "Limited HTTPS access" | grep egress | awk -F id= '{ print $2 }' | awk -F \' '{ print $2 }')
if [ -n "$rules_to_delete" ] ; then
for r in $(echo $rules_to_delete) ; do
openstack --os-cloud d4s-pre security group rule delete $r
done
fi
# HAPROXY, Prometheus, shell-jump: create VM with a fixed IP address so that we can statically define the security groups
# HAPROXY L7 security group id: 20ff5149-54d6-49b4-b7e4-31fef6f08b3f
dest_ports="8880 9999"
for port in $dest_ports ; do
openstack --os-cloud ${os_infra} security group rule create --ingress --protocol tcp --dst-port "$port" --remote-ip 10.1.30.180/32 20ff5149-54d6-49b4-b7e4-31fef6f08b3f
openstack --os-cloud ${os_infra} security group rule create --ingress --protocol tcp --dst-port "$port" --remote-ip 10.1.29.161/32 20ff5149-54d6-49b4-b7e4-31fef6f08b3f
done

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Basic project setup
# - Network
# - Subnet
# - Router
# - DNS zone
# Variables that must be included by all the scripts
#
os_infra=d4s-pre
os_dns_zone=cloud-pre.d4science.org
os_private_network_prefix="10.1"
os_private_network_start_octet=32
os_private_network_allocation_end_octet=35
os_private_network="${os_private_network_prefix}.${os_private_network_start_octet}.0/22"
os_private_gw=10.1.32.1