602 lines
20 KiB
HCL
602 lines
20 KiB
HCL
#
|
|
# Server groups for both the masters and the workers
|
|
#
|
|
resource "openstack_compute_servergroup_v2" "swarm_masters" {
|
|
name = "swarm_masters"
|
|
policies = ["anti-affinity"]
|
|
}
|
|
resource "openstack_compute_servergroup_v2" "swarm_workers" {
|
|
name = "swarm_workers"
|
|
policies = ["soft-anti-affinity"]
|
|
}
|
|
#
|
|
# Network for the NFS traffic
|
|
#
|
|
resource "openstack_networking_network_v2" "swarm_nfs_net" {
|
|
name = var.swarm_nfs_private_network.network_name
|
|
admin_state_up = "true"
|
|
external = "false"
|
|
description = var.swarm_nfs_private_network.network_description
|
|
dns_domain = var.dns_zone.zone_name
|
|
mtu = var.mtu_size
|
|
port_security_enabled = true
|
|
shared = false
|
|
region = var.main_region
|
|
}
|
|
|
|
# Subnet
|
|
resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
|
|
name = "swarm-nfs-net"
|
|
description = "Subnet used by the Swarm cluster and the NFS service"
|
|
network_id = openstack_networking_network_v2.swarm_nfs_net.id
|
|
cidr = var.swarm_nfs_private_network.network_cidr
|
|
dns_nameservers = var.resolvers_ip
|
|
ip_version = 4
|
|
enable_dhcp = true
|
|
no_gateway = true
|
|
allocation_pool {
|
|
start = var.swarm_nfs_private_network.allocation_pool_start
|
|
end = var.swarm_nfs_private_network.allocation_pool_end
|
|
}
|
|
}
|
|
|
|
#
|
|
# Security groups
|
|
#
|
|
resource "openstack_networking_secgroup_v2" "swarm_internal_traffic" {
|
|
name = "swarm_internal_docker_traffic"
|
|
delete_default_rules = "true"
|
|
description = "Traffic between the Docker Swarm nodes"
|
|
}
|
|
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
|
|
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
|
description = "UDP traffic between Swarm nodes"
|
|
direction = "ingress"
|
|
ethertype = "IPv4"
|
|
protocol = "udp"
|
|
remote_ip_prefix = var.main_private_subnet.cidr
|
|
}
|
|
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
|
|
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
|
description = "TCP traffic between Swarm nodes"
|
|
direction = "ingress"
|
|
ethertype = "IPv4"
|
|
protocol = "tcp"
|
|
remote_ip_prefix = var.main_private_subnet.cidr
|
|
}
|
|
resource "openstack_networking_secgroup_v2" "swarm_nfs_traffic" {
|
|
name = "docker_swarm_nfs"
|
|
delete_default_rules = "true"
|
|
description = "Traffic between Docker Swarm and the NFS service"
|
|
}
|
|
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_udp" {
|
|
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
|
description = "UDP traffic"
|
|
direction = "ingress"
|
|
ethertype = "IPv4"
|
|
protocol = "udp"
|
|
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
|
}
|
|
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
|
|
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
|
description = "TCP traffic"
|
|
direction = "ingress"
|
|
ethertype = "IPv4"
|
|
protocol = "tcp"
|
|
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
|
}
|
|
|
|
#
|
|
# Swarm Manager VMs
|
|
#
|
|
# Instance
|
|
resource "openstack_compute_instance_v2" "docker_swarm_managers" {
|
|
count = var.docker_swarm_data.mgr_count
|
|
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index + 1)
|
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
|
flavor_name = var.docker_swarm_data.mgr_flavor
|
|
key_pair = module.ssh_settings.ssh_key_name
|
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
|
scheduler_hints {
|
|
group = openstack_compute_servergroup_v2.swarm_masters.id
|
|
}
|
|
block_device {
|
|
uuid = var.ubuntu_2204.uuid
|
|
source_type = "image"
|
|
volume_size = 10
|
|
boot_index = 0
|
|
destination_type = "volume"
|
|
delete_on_termination = false
|
|
}
|
|
|
|
block_device {
|
|
source_type = "blank"
|
|
volume_size = var.docker_swarm_data.mgr_data_disk_size
|
|
boot_index = -1
|
|
destination_type = "volume"
|
|
delete_on_termination = false
|
|
}
|
|
|
|
network {
|
|
name = var.main_private_network.name
|
|
fixed_ip_v4 = var.swarm_managers_ip.* [count.index]
|
|
}
|
|
network {
|
|
name = var.swarm_nfs_private_network.network_name
|
|
}
|
|
|
|
user_data = file("${var.ubuntu2204_data_file}")
|
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
|
# Do not replace the instance when the ssh key changes
|
|
lifecycle {
|
|
ignore_changes = [
|
|
# Ignore changes to tags, e.g. because a management agent
|
|
# updates these based on some ruleset managed elsewhere.
|
|
key_pair, user_data, network
|
|
]
|
|
}
|
|
|
|
}
|
|
|
|
# Swarm worker nodes
|
|
resource "openstack_compute_instance_v2" "docker_swarm_workers" {
|
|
count = var.docker_swarm_data.worker_count
|
|
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index + 1)
|
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
|
flavor_name = var.docker_swarm_data.worker_flavor
|
|
key_pair = module.ssh_settings.ssh_key_name
|
|
security_groups = ["default", var.default_security_group_name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
|
scheduler_hints {
|
|
group = openstack_compute_servergroup_v2.swarm_workers.id
|
|
}
|
|
block_device {
|
|
uuid = var.ubuntu_2204.uuid
|
|
source_type = "image"
|
|
volume_size = 10
|
|
boot_index = 0
|
|
destination_type = "volume"
|
|
delete_on_termination = false
|
|
}
|
|
|
|
block_device {
|
|
source_type = "blank"
|
|
volume_size = var.docker_swarm_data.worker_data_disk_size
|
|
boot_index = -1
|
|
destination_type = "volume"
|
|
delete_on_termination = false
|
|
}
|
|
|
|
network {
|
|
name = var.main_private_network.name
|
|
}
|
|
network {
|
|
name = var.swarm_nfs_private_network.network_name
|
|
}
|
|
network {
|
|
name = var.networks_list.shared_postgresql
|
|
}
|
|
|
|
user_data = file("${var.ubuntu2204_data_file}")
|
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
|
# Do not replace the instance when the ssh key changes
|
|
lifecycle {
|
|
ignore_changes = [
|
|
# Ignore changes to tags, e.g. because a management agent
|
|
# updates these based on some ruleset managed elsewhere.
|
|
key_pair, user_data, network
|
|
]
|
|
}
|
|
}
|
|
|
|
#
|
|
# Manila NFS Share
|
|
#
|
|
# Managers
|
|
resource "openstack_networking_port_v2" "swarm_mgr_nfs_port" {
|
|
count = var.docker_swarm_data.mgr_count
|
|
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index + 1)
|
|
network_id = data.terraform_remote_state.privnet_dns_router.outputs.storage_nfs_network_id
|
|
admin_state_up = "true"
|
|
fixed_ip {
|
|
subnet_id = data.terraform_remote_state.privnet_dns_router.outputs.storage_nfs_subnet_id
|
|
}
|
|
}
|
|
|
|
resource "openstack_networking_port_secgroup_associate_v2" "swarm_mgr_nfs_port_secgroup" {
|
|
count = var.docker_swarm_data.mgr_count
|
|
port_id = openstack_networking_port_v2.swarm_mgr_nfs_port[count.index].id
|
|
security_group_ids = [data.terraform_remote_state.privnet_dns_router.outputs.nfs_share_no_ingress_secgroup_id]
|
|
}
|
|
|
|
resource "openstack_compute_interface_attach_v2" "nfs_port_to_swarm_mgr" {
|
|
count = var.docker_swarm_data.mgr_count
|
|
instance_id = openstack_compute_instance_v2.docker_swarm_managers[count.index].id
|
|
port_id = openstack_networking_port_v2.swarm_mgr_nfs_port[count.index].id
|
|
}
|
|
|
|
# Workers
|
|
resource "openstack_networking_port_v2" "swarm_workers_nfs_port" {
|
|
count = var.docker_swarm_data.worker_count
|
|
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index + 1)
|
|
network_id = data.terraform_remote_state.privnet_dns_router.outputs.storage_nfs_network_id
|
|
admin_state_up = "true"
|
|
fixed_ip {
|
|
subnet_id = data.terraform_remote_state.privnet_dns_router.outputs.storage_nfs_subnet_id
|
|
}
|
|
}
|
|
|
|
resource "openstack_networking_port_secgroup_associate_v2" "swarm_worker_nfs_port_secgroup" {
|
|
count = var.docker_swarm_data.worker_count
|
|
port_id = openstack_networking_port_v2.swarm_workers_nfs_port[count.index].id
|
|
security_group_ids = [data.terraform_remote_state.privnet_dns_router.outputs.nfs_share_no_ingress_secgroup_id]
|
|
}
|
|
|
|
resource "openstack_compute_interface_attach_v2" "nfs_port_to_swarm_workers" {
|
|
count = var.docker_swarm_data.worker_count
|
|
instance_id = openstack_compute_instance_v2.docker_swarm_workers[count.index].id
|
|
port_id = openstack_networking_port_v2.swarm_workers_nfs_port[count.index].id
|
|
}
|
|
|
|
# NFS standalone server
|
|
# Block device
|
|
resource "openstack_blockstorage_volume_v3" "swarm_nfs_data_vol" {
|
|
name = var.docker_swarm_data.nfs_server_data_disk_name
|
|
size = var.docker_swarm_data.nfs_server_data_disk_size
|
|
}
|
|
|
|
# Instance
|
|
resource "openstack_compute_instance_v2" "swarm_nfs_server" {
|
|
name = var.docker_swarm_data.nfs_server_name
|
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
|
flavor_name = var.docker_swarm_data.nfs_server_flavor
|
|
key_pair = module.ssh_settings.ssh_key_name
|
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
|
|
block_device {
|
|
uuid = var.ubuntu_2204.uuid
|
|
source_type = "image"
|
|
volume_size = 10
|
|
boot_index = 0
|
|
destination_type = "volume"
|
|
delete_on_termination = false
|
|
}
|
|
|
|
network {
|
|
name = var.main_private_network.name
|
|
}
|
|
network {
|
|
name = var.swarm_nfs_private_network.network_name
|
|
fixed_ip_v4 = var.swarm_nfs_private_network.server_ip
|
|
}
|
|
|
|
user_data = file("${var.ubuntu2204_data_file}")
|
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
|
}
|
|
|
|
# Attach the additional volume
|
|
resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
|
|
instance_id = openstack_compute_instance_v2.swarm_nfs_server.id
|
|
volume_id = openstack_blockstorage_volume_v3.swarm_nfs_data_vol.id
|
|
device = var.docker_swarm_data.nfs_server_data_disk_device
|
|
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
|
|
}
|
|
|
|
#
|
|
# Octavia
|
|
#
|
|
# Swarm load balancer. L4, backed by Octavia
|
|
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
|
|
vip_subnet_id = var.main_private_subnet_id
|
|
name = var.octavia_swarm_data.swarm_lb_name
|
|
description = var.octavia_swarm_data.swarm_lb_description
|
|
flavor_id = var.octavia_swarm_data.octavia_flavor_id
|
|
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
|
|
# availability_zone = var.availability_zones_names.availability_zone_no_gpu
|
|
loadbalancer_provider = "amphora"
|
|
}
|
|
|
|
# Allocate a floating IP
|
|
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
|
|
pool = var.floating_ip_pools.main_public_ip_pool
|
|
# The DNS association does not work because of a bug in the OpenStack API
|
|
# dns_name = "main-lb"
|
|
# dns_domain = var.dns_zone.zone_name
|
|
description = var.octavia_swarm_data.swarm_lb_description
|
|
}
|
|
|
|
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
|
|
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
|
|
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
|
|
}
|
|
|
|
locals {
|
|
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
|
|
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
|
|
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
|
|
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
|
|
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
|
|
}
|
|
|
|
resource "openstack_dns_recordset_v2" "swarm_lb_dns_recordset" {
|
|
zone_id = var.dns_zone_id
|
|
name = local.swarm_recordset_name
|
|
description = "Public IP address of the load balancer in front of Docker Swarm"
|
|
ttl = 8600
|
|
type = "A"
|
|
records = [openstack_networking_floatingip_v2.swarm_lb_ip.address]
|
|
}
|
|
|
|
resource "openstack_dns_recordset_v2" "swarm_portainer_dns_recordset" {
|
|
zone_id = var.dns_zone_id
|
|
name = local.portainer_recordset_name
|
|
description = "Portainer hostname"
|
|
ttl = 8600
|
|
type = "CNAME"
|
|
records = [local.swarm_recordset_name]
|
|
}
|
|
|
|
resource "openstack_dns_recordset_v2" "ccp_dns_recordset" {
|
|
zone_id = var.dns_zone_id
|
|
name = local.ccp_recordset_name
|
|
description = "CCP hostname"
|
|
ttl = 8600
|
|
type = "CNAME"
|
|
records = [local.swarm_recordset_name]
|
|
}
|
|
|
|
resource "openstack_dns_recordset_v2" "cdn_dns_recordset" {
|
|
zone_id = var.dns_zone_id
|
|
name = local.cdn_recordset_name
|
|
description = "CDN hostname"
|
|
ttl = 8600
|
|
type = "CNAME"
|
|
records = [local.swarm_recordset_name]
|
|
}
|
|
|
|
resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
|
|
zone_id = var.dns_zone_id
|
|
name = local.conductor_recordset_name
|
|
description = "Conductor hostname"
|
|
ttl = 8600
|
|
type = "CNAME"
|
|
records = [local.swarm_recordset_name]
|
|
}
|
|
|
|
# Main HAPROXY stats listener
|
|
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
|
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
|
protocol = "TCP"
|
|
protocol_port = 8880
|
|
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
|
|
name = "swarm_haproxy_stats_listener"
|
|
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
|
|
|
|
}
|
|
|
|
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
|
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
|
|
protocol = "TCP"
|
|
lb_method = "LEAST_CONNECTIONS"
|
|
name = "swarm-haproxy-lb-stats"
|
|
description = "Pool for the stats of the main HAPROXY instances"
|
|
persistence {
|
|
type = "SOURCE_IP"
|
|
}
|
|
}
|
|
|
|
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
|
member {
|
|
name = "swarm mgr haproxy 1"
|
|
address = var.docker_swarm_data.mgr1_ip
|
|
protocol_port = 8880
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 2"
|
|
address = var.docker_swarm_data.mgr2_ip
|
|
protocol_port = 8880
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 3"
|
|
address = var.docker_swarm_data.mgr3_ip
|
|
protocol_port = 8880
|
|
}
|
|
}
|
|
|
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
|
name = "swarm_haproxy_stats_monitor"
|
|
type = "TCP"
|
|
delay = 20
|
|
timeout = 5
|
|
max_retries = 3
|
|
admin_state_up = true
|
|
}
|
|
|
|
# HAPROXY HTTP
|
|
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
|
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
|
protocol = "TCP"
|
|
protocol_port = 80
|
|
description = "HTTP listener of the Docker Swarm HAPROXY instances"
|
|
name = "swarm_haproxy_http_listener"
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
|
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
|
|
protocol = "PROXYV2"
|
|
lb_method = "LEAST_CONNECTIONS"
|
|
name = "swarm-haproxy-lb-http"
|
|
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
|
|
persistence {
|
|
type = "SOURCE_IP"
|
|
}
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
|
member {
|
|
name = "swarm mgr haproxy 1"
|
|
address = var.docker_swarm_data.mgr1_ip
|
|
protocol_port = 80
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 2"
|
|
address = var.docker_swarm_data.mgr2_ip
|
|
protocol_port = 80
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 3"
|
|
address = var.docker_swarm_data.mgr3_ip
|
|
protocol_port = 80
|
|
}
|
|
}
|
|
|
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
|
name = "swarm_haproxy_http_monitor"
|
|
type = "HTTP"
|
|
http_method = "GET"
|
|
url_path = "/_haproxy_health_check"
|
|
expected_codes = "200"
|
|
delay = 20
|
|
timeout = 5
|
|
max_retries = 3
|
|
admin_state_up = true
|
|
}
|
|
|
|
# HAPROXY HTTPS
|
|
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
|
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
|
protocol = "TCP"
|
|
protocol_port = 443
|
|
description = "HTTPS listener of the main HAPROXY instances"
|
|
name = "swarm_haproxy_https_listener"
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
|
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
|
|
protocol = "PROXYV2"
|
|
lb_method = "LEAST_CONNECTIONS"
|
|
name = "swarm-haproxy-lb-https"
|
|
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
|
|
persistence {
|
|
type = "SOURCE_IP"
|
|
}
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
|
member {
|
|
name = "swarm mgr haproxy 1"
|
|
address = var.docker_swarm_data.mgr1_ip
|
|
protocol_port = 443
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 2"
|
|
address = var.docker_swarm_data.mgr2_ip
|
|
protocol_port = 443
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 3"
|
|
address = var.docker_swarm_data.mgr3_ip
|
|
protocol_port = 443
|
|
}
|
|
}
|
|
|
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
|
name = "swarm_haproxy_https_monitor"
|
|
type = "HTTPS"
|
|
http_method = "GET"
|
|
url_path = "/_haproxy_health_check"
|
|
expected_codes = "200"
|
|
delay = 20
|
|
timeout = 5
|
|
max_retries = 3
|
|
admin_state_up = true
|
|
}
|
|
|
|
# HAPROXY HTTP on port 8080
|
|
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
|
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
|
protocol = "TCP"
|
|
protocol_port = 8080
|
|
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
|
name = "swarm_haproxy_8080_listener"
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
|
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
|
|
protocol = "PROXYV2"
|
|
lb_method = "LEAST_CONNECTIONS"
|
|
name = "swarm-haproxy-lb-http-8080"
|
|
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
|
persistence {
|
|
type = "SOURCE_IP"
|
|
}
|
|
admin_state_up = true
|
|
}
|
|
|
|
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
|
member {
|
|
name = "swarm mgr haproxy 1"
|
|
address = var.docker_swarm_data.mgr1_ip
|
|
protocol_port = 8080
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 2"
|
|
address = var.docker_swarm_data.mgr2_ip
|
|
protocol_port = 8080
|
|
}
|
|
member {
|
|
name = "swarm mgr haproxy 3"
|
|
address = var.docker_swarm_data.mgr3_ip
|
|
protocol_port = 8080
|
|
}
|
|
}
|
|
|
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
|
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
|
name = "swarm_haproxy_8080_monitor"
|
|
type = "HTTP"
|
|
http_method = "GET"
|
|
url_path = "/_haproxy_health_check"
|
|
expected_codes = "200"
|
|
delay = 20
|
|
timeout = 5
|
|
max_retries = 3
|
|
admin_state_up = true
|
|
}
|
|
|
|
output "swarm_loadbalancer_ip" {
|
|
description = "Docker Swarm Load balancer IP address"
|
|
value = openstack_lb_loadbalancer_v2.swarm_lb.vip_address
|
|
}
|
|
|
|
output "swarm_manager_nodes" {
|
|
description = "Docker Swarm Manager nodes data"
|
|
value = openstack_compute_instance_v2.docker_swarm_managers
|
|
sensitive = true
|
|
}
|
|
|
|
output "swarm_worker_nodes" {
|
|
description = "Docker Swarm Worker nodes data"
|
|
value = openstack_compute_instance_v2.docker_swarm_workers
|
|
sensitive = true
|
|
}
|
|
|
|
output "swarm_managers_nfs_ip_ports" {
|
|
description = "IP addresses in the share NFS network"
|
|
value = openstack_networking_port_v2.swarm_mgr_nfs_port
|
|
}
|
|
output "swarm_workers_nfs_ip_ports" {
|
|
description = "IP addresses in the share NFS network"
|
|
value = openstack_networking_port_v2.swarm_workers_nfs_port
|
|
}
|