K8s resources at garr-ct1.

This commit is contained in:
Andrea Dell'Amico 2024-07-02 17:56:51 +02:00
parent e28b7ed747
commit dbdee08c44
Signed by: andrea.dellamico
GPG Key ID: 147ABE6CEB9E20FF
6 changed files with 4942 additions and 321 deletions

View File

@ -26,3 +26,15 @@ variable "k8s_workers_data" {
availability_zone_hints = "nova"
}
}
variable "octavia_kubernetes_data" {
type = map(string)
default = {
k8s_lb_name = "d4s-garr-ct1-k8s-lb-l4"
k8s_lb_description = "L4 balancer that serves the D4Science Kubernetes at GARR-CT1"
# openstack --os-cloud garr-ct1 server list
# does not return any flavor
# octavia_flavor = ""
# octavia_flavor_id = ""
}
}

View File

@ -0,0 +1,94 @@
#
# Octavia
#
# Kubernetes load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "k8s_lb" {
vip_subnet_id = data.terraform_remote_state.privnet_dns_router.outputs.main_private_subnet.id
name = var.octavia_kubernetes_data.k8s_lb_name
description = var.octavia_kubernetes_data.k8s_lb_description
# flavor_id = var.octavia_kubernetes_data.octavia_flavor_id
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "k8s_lb_ip" {
pool = data.terraform_remote_state.privnet_dns_router.outputs.external_network.name
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_kubernetes_data.k8s_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "k8s_lb" {
floating_ip = openstack_networking_floatingip_v2.k8s_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.k8s_lb.vip_port_id
}
# HAPROXY HTTP
resource "openstack_lb_listener_v2" "k8s_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.k8s_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the k8s ingress"
name = "k8s_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "k8s_http_pool" {
listener_id = openstack_lb_listener_v2.k8s_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "k8s-lb-http"
description = "Pool for the HTTP listener of the k8s service"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "k8s_http_pool_members" {
count = var.k8s_controllers_data.count
pool_id = openstack_lb_pool_v2.k8s_http_pool.id
member {
name = format("k8s controller %02d", count.index + 1)
address = openstack_compute_instance_v2.docker_k8s_controllers[count.index].network.0.fixed_ip_v4
protocol_port = 80
}
}
# HAPROXY HTTPS
resource "openstack_lb_listener_v2" "k8s_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.k8s_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main k8s service"
name = "k8s_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "k8s_https_pool" {
listener_id = openstack_lb_listener_v2.k8s_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "k8s-lb-https"
description = "Pool for the HTTPS listener of the k8s service"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "k8s_https_pool_members" {
count = var.k8s_controllers_data.count
pool_id = openstack_lb_pool_v2.k8s_https_pool.id
member {
name = format("k8s controller %02d", count.index + 1)
address = openstack_compute_instance_v2.docker_k8s_controllers[count.index].network.0.fixed_ip_v4
protocol_port = 443
}
}
output "k8s_loadbalancer_ip" {
description = "Kubernetes Load balancer IP address"
value = openstack_lb_loadbalancer_v2.k8s_lb.vip_address
}

View File

@ -1,319 +0,0 @@
#
# Octavia
#
# Swarm load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_swarm_data.swarm_lb_name
description = var.octavia_swarm_data.swarm_lb_description
flavor_id = var.octavia_swarm_data.octavia_flavor_id
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
# availability_zone = var.availability_zones_names.availability_zone_no_gpu
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_swarm_data.swarm_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
}
locals {
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "swarm_lb_dns_recordset" {
zone_id = var.dns_zone_id
name = local.swarm_recordset_name
description = "Public IP address of the load balancer in front of Docker Swarm"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.swarm_lb_ip.address]
}
resource "openstack_dns_recordset_v2" "swarm_portainer_dns_recordset" {
zone_id = var.dns_zone_id
name = local.portainer_recordset_name
description = "Portainer hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "ccp_dns_recordset" {
zone_id = var.dns_zone_id
name = local.ccp_recordset_name
description = "CCP hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "cdn_dns_recordset" {
zone_id = var.dns_zone_id
name = local.cdn_recordset_name
description = "CDN hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
zone_id = var.dns_zone_id
name = local.conductor_recordset_name
description = "Conductor hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
name = "swarm_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http"
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
name = "swarm_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTPS
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "swarm_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-https"
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
name = "swarm_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP on port 8080
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8080
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_8080_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http-8080"
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8080
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
name = "swarm_haproxy_8080_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "swarm_loadbalancer_ip" {
description = "Docker Swarm Load balancer IP address"
value = openstack_lb_loadbalancer_v2.swarm_lb.vip_address
}
output "swarm_manager_nodes" {
description = "Docker Swarm Manager nodes data"
value = openstack_compute_instance_v2.docker_swarm_managers
sensitive = true
}
output "swarm_worker_nodes" {
description = "Docker Swarm Worker nodes data"
value = openstack_compute_instance_v2.docker_swarm_workers
sensitive = true
}
output "swarm_managers_nfs_ip_ports" {
description = "IP addresses in the share NFS network"
value = openstack_networking_port_v2.swarm_mgr_nfs_port
}
output "swarm_workers_nfs_ip_ports" {
description = "IP addresses in the share NFS network"
value = openstack_networking_port_v2.swarm_workers_nfs_port
}

View File

@ -4,6 +4,6 @@ output "k8s_controllers" {
}
output "k8s_workers" {
value = openstack_compute_instance_v2.docker_k8s_workers
value = openstack_compute_instance_v2.docker_k8s_workers
sensitive = true
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ variable "ubuntu_2204" {
type = map(string)
default = {
name = "Ubuntu 22.04 - GARR"
uuid = "09d879ad-70ee-4f05-9fd8-ffb7b76e2d1b"
uuid = "94618f26-de42-4b1a-80a0-a88b73391a0a"
user_data_file = "../../openstack_vm_data_scripts/ubuntu2204.sh"
}
}