infrastructure-as-code/openstack-tf/modules/d4science_infra_setup/haproxy.tf

117 lines
4.7 KiB
HCL

#
# HAPROXY L7 behind the main Octavia balancer
#
# FIXME: terraform does not return the Octavia VRRP addresses, so we have to find them before creating the security group that allows the traffic between octavia and the haproxy instances
#
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# 5cc2354e-4465-4a1d-8390-c214e208c6de octavia-lb-vrrp-72392023-a774-4b58-a025-c1e99c5d152a fa:16:3e:62:24:2c [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.34.232'}] ACTIVE
# 8aa4e97f-723d-4a2a-b79f-912fa7651653 octavia-lb-vrrp-fbfcf712-0ceb-4a38-82da-0c9ebef5dff3 fa:16:3e:79:62:a5 [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.33.229'}] ACTIVE
#
# Server group
#
resource "openstack_compute_servergroup_v2" "main_haproxy_l7" {
name = "main_haproxy_l7"
policies = ["anti-affinity"]
}
# Security group
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
name = "traffic_from_main_lb_to_haproxy_l7"
delete_default_rules = "true"
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 1 to l7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 2 to l7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_8880" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 8880"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8880
port_range_max = 8880
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
# Instance
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
count = var.haproxy_l7_data.vm_count
name = format("%s-%02d", var.haproxy_l7_data.name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.haproxy_l7_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
}
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.main_haproxy_l7_ip.* [count.index]
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}