Cleanup: prepare for modules.
This commit is contained in:
parent
b915cbb949
commit
dc6f15bedf
|
@ -5,11 +5,11 @@ resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "prometheus_server" {
|
resource "openstack_compute_instance_v2" "prometheus_server" {
|
||||||
name = var.prometheus_server_data.name
|
name = var.prometheus_server_data.name
|
||||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = var.prometheus_server_data.flavor
|
flavor_name = var.prometheus_server_data.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.restricted_web.name,openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
|
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.restricted_web.name, openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = var.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
|
@ -20,23 +20,23 @@ resource "openstack_compute_instance_v2" "prometheus_server" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = var.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = var.basic_services_ip.prometheus
|
fixed_ip_v4 = var.basic_services_ip.prometheus
|
||||||
}
|
}
|
||||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
|
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
|
||||||
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
||||||
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
|
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
|
||||||
device = var.prometheus_server_data.vol_data_device
|
device = var.prometheus_server_data.vol_data_device
|
||||||
}
|
}
|
||||||
|
|
||||||
# Floating IP and DNS record
|
# Floating IP and DNS record
|
||||||
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
|
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
|
||||||
pool = var.floating_ip_pools.main_public_ip_pool
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
# The DNS association does not work because of a bug in the OpenStack API
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
description = "Prometheus server"
|
description = "Prometheus server"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
||||||
|
@ -45,7 +45,7 @@ resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
|
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
|
||||||
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
|
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/15-security-groups.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/20-octavia.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/25-ssh-jump-proxy.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/30-internal-ca.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/35-prometheus.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/40-postgresql.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/45-haproxy.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../modules/docker_swarm/docker-swarm.tf
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
echo "Do not use"
|
||||||
|
|
||||||
# Define required providers
|
# Define required providers
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.14.0"
|
required_version = ">= 0.14.0"
|
||||||
|
@ -17,10 +19,7 @@ data "terraform_remote_state" "privnet_dns_router" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# module "variables" {
|
module "d4science_infra_setup" {
|
||||||
# source = "../variables"
|
source = "../../modules/d4science_infra_setup"
|
||||||
# }
|
}
|
||||||
|
|
||||||
# module "basic_setup" {
|
|
||||||
# source = "../../modules/basic_setup"
|
|
||||||
# }
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../../modules/docker_swarm/swarm-variables.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../variables/variables-dev.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../modules/common_variables/variables.tf
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
echo "Do not use."
|
||||||
# Define required providers
|
# Define required providers
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.14.0"
|
required_version = ">= 0.14.0"
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
# Main services
|
|
||||||
|
|
||||||
* Load balancer as a service (openstack), L4.
|
|
||||||
|
|
||||||
> * Main HAPROXY load balancer
|
|
||||||
|
|
||||||
* Two VMs as HAPROXY L7 instances for the main services. The dataminers will be also served by this load balancer.
|
|
||||||
* A shell server, with floating IP address, that will be used as a proxy to reach all the other VMs.
|
|
||||||
* A internal CA service.
|
|
||||||
* A Prometheus instance.
|
|
||||||
* A PostgreSQL server instance, with a dedicated network
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Define required providers
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.14.0"
|
|
||||||
required_providers {
|
|
||||||
openstack = {
|
|
||||||
source = "terraform-provider-openstack/openstack"
|
|
||||||
version = "~> 1.53.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data "terraform_remote_state" "privnet_dns_router" {
|
|
||||||
backend = "local"
|
|
||||||
|
|
||||||
config = {
|
|
||||||
path = "../project-setup/terraform.tfstate"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module "d4science_infra_setup" {
|
|
||||||
source = "../../modules/d4science_infra_setup"
|
|
||||||
}
|
|
|
@ -1,3 +0,0 @@
|
||||||
provider "openstack" {
|
|
||||||
cloud = "d4s-pre"
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1 +0,0 @@
|
||||||
../../common_setups/15-security-groups.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/20-octavia.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/25-ssh-jump-proxy.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/30-internal-ca.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/35-prometheus.tf
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/40-postgresql.tf
|
|
|
@ -7,6 +7,6 @@ resource "openstack_blockstorage_volume_v3" "shared_postgresql_backup_vol" {
|
||||||
resource "openstack_compute_volume_attach_v2" "shared_postgresql_backup_attach_vol" {
|
resource "openstack_compute_volume_attach_v2" "shared_postgresql_backup_attach_vol" {
|
||||||
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
|
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
|
||||||
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_backup_vol.id
|
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_backup_vol.id
|
||||||
device = var.shared_postgresql_server_data.vol_backup_device
|
device = var.shared_postgresql_server_data.vol_backup_device
|
||||||
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
|
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../../common_setups/45-haproxy.tf
|
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/haproxy.tf
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/internal-ca.tf
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/octavia.tf
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/postgresql.tf
|
|
@ -1,62 +1,62 @@
|
||||||
default_security_group_name = "default_for_all"
|
default_security_group_name = "default_for_all"
|
||||||
|
|
||||||
shared_postgresql_server_data = {
|
shared_postgresql_server_data = {
|
||||||
name ="shared-postgresql-server"
|
name = "shared-postgresql-server"
|
||||||
flavor = "m1.large"
|
flavor = "m1.large"
|
||||||
vol_data_name = "shared-postgresql-data"
|
vol_data_name = "shared-postgresql-data"
|
||||||
vol_data_size = "300"
|
vol_data_size = "300"
|
||||||
vol_data_device = "/dev/vdb"
|
vol_data_device = "/dev/vdb"
|
||||||
vol_backup_name = "shared-postgresql-backup-data"
|
vol_backup_name = "shared-postgresql-backup-data"
|
||||||
vol_backup_size = "100"
|
vol_backup_size = "100"
|
||||||
vol_backup_device = "/dev/vdc"
|
vol_backup_device = "/dev/vdc"
|
||||||
network_name = "postgresql-srv-net"
|
network_name = "postgresql-srv-net"
|
||||||
network_description = "Network used to communicate with the shared postgresql service"
|
network_description = "Network used to communicate with the shared postgresql service"
|
||||||
network_cidr = "192.168.0.0/22"
|
network_cidr = "192.168.0.0/22"
|
||||||
allocation_pool_start = "192.168.0.100"
|
allocation_pool_start = "192.168.0.100"
|
||||||
allocation_pool_end = "192.168.3.254"
|
allocation_pool_end = "192.168.3.254"
|
||||||
server_ip = "192.168.0.5"
|
server_ip = "192.168.0.5"
|
||||||
server_cidr = "192.168.0.5/22"
|
server_cidr = "192.168.0.5/22"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Provided in the output of the project setup
|
# Provided in the output of the project setup
|
||||||
main_private_network_id = "020df98d-ae72-452a-b376-3b6dc289acac"
|
main_private_network_id = "020df98d-ae72-452a-b376-3b6dc289acac"
|
||||||
main_private_subnet_id = "5d7b83ad-e058-4a3a-bfd8-d20ba6d42e1a"
|
main_private_subnet_id = "5d7b83ad-e058-4a3a-bfd8-d20ba6d42e1a"
|
||||||
dns_zone_id = "74135b34-1a9c-4c01-8cf0-22450a5660c4"
|
dns_zone_id = "74135b34-1a9c-4c01-8cf0-22450a5660c4"
|
||||||
|
|
||||||
octavia_information = {
|
octavia_information = {
|
||||||
main_lb_name = "d4s-production-cloud-l4-load-balancer"
|
main_lb_name = "d4s-production-cloud-l4-load-balancer"
|
||||||
main_lb_description = "Main L4 load balancer for the D4Science production"
|
main_lb_description = "Main L4 load balancer for the D4Science production"
|
||||||
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
|
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
|
||||||
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
||||||
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
||||||
main_lb_hostname = "main-lb"
|
main_lb_hostname = "main-lb"
|
||||||
# The following aren't available when the module runs so we have to get them with the command
|
# The following aren't available when the module runs so we have to get them with the command
|
||||||
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
||||||
# This means that the execution will fail
|
# This means that the execution will fail
|
||||||
octavia_vrrp_ip_1 = "10.1.42.119/32"
|
octavia_vrrp_ip_1 = "10.1.42.119/32"
|
||||||
octavia_vrrp_ip_2 = "10.1.42.188/32"
|
octavia_vrrp_ip_2 = "10.1.42.188/32"
|
||||||
}
|
}
|
||||||
|
|
||||||
docker_swarm_data = {
|
docker_swarm_data = {
|
||||||
mgr_name = "swarm-mgr"
|
mgr_name = "swarm-mgr"
|
||||||
mgr1_ip = "10.1.40.31"
|
mgr1_ip = "10.1.40.31"
|
||||||
mgr1_cidr = "10.1.40.31/32"
|
mgr1_cidr = "10.1.40.31/32"
|
||||||
mgr2_ip = "10.1.40.32"
|
mgr2_ip = "10.1.40.32"
|
||||||
mgr2_cidr = "10.1.40.32/32"
|
mgr2_cidr = "10.1.40.32/32"
|
||||||
mgr3_ip = "10.1.40.33"
|
mgr3_ip = "10.1.40.33"
|
||||||
mgr3_cidr = "10.1.40.33/32"
|
mgr3_cidr = "10.1.40.33/32"
|
||||||
mgr_count = 3
|
mgr_count = 3
|
||||||
mgr_flavor = "m1.large"
|
mgr_flavor = "m1.large"
|
||||||
mgr_data_disk_size = 100
|
mgr_data_disk_size = 100
|
||||||
worker_name = "swarm-worker"
|
worker_name = "swarm-worker"
|
||||||
worker_count = 8
|
worker_count = 8
|
||||||
worker_flavor = "m1.xxl"
|
worker_flavor = "m1.xxl"
|
||||||
worker_data_disk_size = 200
|
worker_data_disk_size = 200
|
||||||
nfs_server_name = "swarm-nfs-server"
|
nfs_server_name = "swarm-nfs-server"
|
||||||
nfs_server_flavor = "m1.medium"
|
nfs_server_flavor = "m1.medium"
|
||||||
nfs_server_data_disk_name = "Swarm NFS server data Disk"
|
nfs_server_data_disk_name = "Swarm NFS server data Disk"
|
||||||
nfs_server_data_disk_size = 200
|
nfs_server_data_disk_size = 200
|
||||||
nfs_server_data_disk_device = "/dev/vdb"
|
nfs_server_data_disk_device = "/dev/vdb"
|
||||||
}
|
}
|
||||||
|
|
||||||
swarm_managers_ip = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
|
swarm_managers_ip = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
octavia_swarm_data = {
|
octavia_swarm_data = {
|
||||||
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
||||||
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
|
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
|
||||||
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
||||||
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
||||||
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
||||||
swarm_lb_hostname = "swarm-lb"
|
swarm_lb_hostname = "swarm-lb"
|
||||||
swarm_octavia_main_ip = "10.1.40.30"
|
swarm_octavia_main_ip = "10.1.40.30"
|
||||||
swarm_octavia_main_cidr = "10.1.40.30/32"
|
swarm_octavia_main_cidr = "10.1.40.30/32"
|
||||||
# The following aren't available when the module runs so we have to get them with the command
|
# The following aren't available when the module runs so we have to get them with the command
|
||||||
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
||||||
# This means that the execution will fail
|
# This means that the execution will fail
|
||||||
octavia_vrrp_ip_1 = "10.1.43.97/32"
|
octavia_vrrp_ip_1 = "10.1.43.97/32"
|
||||||
octavia_vrrp_ip_2 = "10.1.44.78/32"
|
octavia_vrrp_ip_2 = "10.1.44.78/32"
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/prometheus.tf
|
|
@ -1,6 +1,6 @@
|
||||||
# Define required providers
|
# Define required providers
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.14.0"
|
required_version = ">= 0.14.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
|
@ -10,5 +10,5 @@ required_version = ">= 0.14.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "openstack" {
|
provider "openstack" {
|
||||||
cloud = "d4s-production"
|
cloud = "d4s-production"
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/security-groups.tf
|
|
@ -0,0 +1 @@
|
||||||
|
../../modules/d4science_infra_setup/ssh-jump-proxy.tf
|
|
@ -6,13 +6,13 @@ variable "os_project_data" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone" {
|
variable "dns_zone" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
zone_name = "cloud.d4science.org."
|
zone_name = "cloud.d4science.org."
|
||||||
email = "postmaster@isti.cnr.it"
|
email = "postmaster@isti.cnr.it"
|
||||||
description = "DNS primary zone for the d4s-production-cloud project"
|
description = "DNS primary zone for the d4s-production-cloud project"
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone_id" {
|
variable "dns_zone_id" {
|
||||||
|
@ -27,9 +27,9 @@ variable "default_security_group_name" {
|
||||||
variable "main_private_network" {
|
variable "main_private_network" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = "d4s-production-cloud-main"
|
name = "d4s-production-cloud-main"
|
||||||
description = "D4Science Production private network (use this as the main network)"
|
description = "D4Science Production private network (use this as the main network)"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_private_network_id" {
|
variable "main_private_network_id" {
|
||||||
|
@ -40,13 +40,13 @@ variable "main_private_network_id" {
|
||||||
variable "main_private_subnet" {
|
variable "main_private_subnet" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = "d4s-production-cloud-main-subnet"
|
name = "d4s-production-cloud-main-subnet"
|
||||||
description = "D4Science Production main private subnet"
|
description = "D4Science Production main private subnet"
|
||||||
cidr = "10.1.40.0/21"
|
cidr = "10.1.40.0/21"
|
||||||
gateway_ip = "10.1.40.1"
|
gateway_ip = "10.1.40.1"
|
||||||
allocation_start = "10.1.41.100"
|
allocation_start = "10.1.41.100"
|
||||||
allocation_end = "10.1.47.254"
|
allocation_end = "10.1.47.254"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_private_subnet_id" {
|
variable "main_private_subnet_id" {
|
||||||
|
@ -57,45 +57,45 @@ variable "main_private_subnet_id" {
|
||||||
variable "external_router" {
|
variable "external_router" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = "d4s-production-cloud-external-router"
|
name = "d4s-production-cloud-external-router"
|
||||||
description = "D4Science Production main router"
|
description = "D4Science Production main router"
|
||||||
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
|
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "basic_services_ip" {
|
variable "basic_services_ip" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
ca = "10.1.40.4"
|
ca = "10.1.40.4"
|
||||||
ca_cidr = "10.1.40.4/32"
|
ca_cidr = "10.1.40.4/32"
|
||||||
ssh_jump = "10.1.40.5"
|
ssh_jump = "10.1.40.5"
|
||||||
ssh_jump_cidr = "10.1.40.5/32"
|
ssh_jump_cidr = "10.1.40.5/32"
|
||||||
prometheus = "10.1.40.10"
|
prometheus = "10.1.40.10"
|
||||||
prometheus_cidr = "10.1.40.10/32"
|
prometheus_cidr = "10.1.40.10/32"
|
||||||
haproxy_l7_1 = "10.1.40.11"
|
haproxy_l7_1 = "10.1.40.11"
|
||||||
haproxy_l7_1_cidr = "10.1.40.11/32"
|
haproxy_l7_1_cidr = "10.1.40.11/32"
|
||||||
haproxy_l7_2 = "10.1.40.12"
|
haproxy_l7_2 = "10.1.40.12"
|
||||||
haproxy_l7_2_cidr = "10.1.40.12/32"
|
haproxy_l7_2_cidr = "10.1.40.12/32"
|
||||||
octavia_main = "10.1.40.20"
|
octavia_main = "10.1.40.20"
|
||||||
octavia_main_cidr = "10.1.40.20/32"
|
octavia_main_cidr = "10.1.40.20/32"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_haproxy_l7_ip" {
|
variable "main_haproxy_l7_ip" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = ["10.1.40.11", "10.1.40.12"]
|
default = ["10.1.40.11", "10.1.40.12"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "octavia_information" {
|
variable "octavia_information" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
main_lb_name = "d4s-production-cloud-l4-load-balancer"
|
main_lb_name = "d4s-production-cloud-l4-load-balancer"
|
||||||
main_lb_description = "Main L4 load balancer for the D4Science production"
|
main_lb_description = "Main L4 load balancer for the D4Science production"
|
||||||
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
|
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
|
||||||
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
||||||
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
||||||
main_lb_hostname = "main-lb"
|
main_lb_hostname = "main-lb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,14 +62,14 @@ variable "networks_with_d4s_services" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone" {
|
variable "dns_zone" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
zone_name = ""
|
zone_name = ""
|
||||||
email = "postmaster@isti.cnr.it"
|
email = "postmaster@isti.cnr.it"
|
||||||
description = ""
|
description = ""
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
id = ""
|
id = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone_id" {
|
variable "dns_zone_id" {
|
||||||
|
@ -80,9 +80,9 @@ variable "dns_zone_id" {
|
||||||
variable "main_private_network" {
|
variable "main_private_network" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = ""
|
name = ""
|
||||||
description = ""
|
description = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_private_network_id" {
|
variable "main_private_network_id" {
|
||||||
|
@ -93,13 +93,13 @@ variable "main_private_network_id" {
|
||||||
variable "main_private_subnet" {
|
variable "main_private_subnet" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = ""
|
name = ""
|
||||||
description = ""
|
description = ""
|
||||||
cidr = ""
|
cidr = ""
|
||||||
gateway_ip = ""
|
gateway_ip = ""
|
||||||
allocation_start = ""
|
allocation_start = ""
|
||||||
allocation_end = ""
|
allocation_end = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_private_subnet_id" {
|
variable "main_private_subnet_id" {
|
||||||
|
@ -110,10 +110,10 @@ variable "main_private_subnet_id" {
|
||||||
variable "external_router" {
|
variable "external_router" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = ""
|
name = ""
|
||||||
description = ""
|
description = ""
|
||||||
id = ""
|
id = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ubuntu_1804" {
|
variable "ubuntu_1804" {
|
||||||
|
@ -247,42 +247,42 @@ variable "default_security_group_name" {
|
||||||
variable "basic_services_ip" {
|
variable "basic_services_ip" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
ca = ""
|
ca = ""
|
||||||
ca_cidr = ""
|
ca_cidr = ""
|
||||||
ssh_jump = ""
|
ssh_jump = ""
|
||||||
ssh_jump_cidr = ""
|
ssh_jump_cidr = ""
|
||||||
prometheus = ""
|
prometheus = ""
|
||||||
prometheus_cidr = ""
|
prometheus_cidr = ""
|
||||||
haproxy_l7_1 = ""
|
haproxy_l7_1 = ""
|
||||||
haproxy_l7_1_cidr = ""
|
haproxy_l7_1_cidr = ""
|
||||||
haproxy_l7_2 = ""
|
haproxy_l7_2 = ""
|
||||||
haproxy_l7_2_cidr = ""
|
haproxy_l7_2_cidr = ""
|
||||||
octavia_main = ""
|
octavia_main = ""
|
||||||
octavia_main_cidr = ""
|
octavia_main_cidr = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "main_haproxy_l7_ip" {
|
variable "main_haproxy_l7_ip" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = []
|
default = []
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "octavia_information" {
|
variable "octavia_information" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
main_lb_name = ""
|
main_lb_name = ""
|
||||||
main_lb_description = ""
|
main_lb_description = ""
|
||||||
swarm_lb_name = ""
|
swarm_lb_name = ""
|
||||||
octavia_flavor = ""
|
octavia_flavor = ""
|
||||||
octavia_flavor_id = ""
|
octavia_flavor_id = ""
|
||||||
main_lb_hostname = ""
|
main_lb_hostname = ""
|
||||||
# The following aren't available when the module runs so we have to get them with the command
|
# The following aren't available when the module runs so we have to get them with the command
|
||||||
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
||||||
# This means that the execution will fail
|
# This means that the execution will fail
|
||||||
octavia_vrrp_ip_1 = ""
|
octavia_vrrp_ip_1 = ""
|
||||||
octavia_vrrp_ip_2 = ""
|
octavia_vrrp_ip_2 = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Added by Francesco
|
# Added by Francesco
|
||||||
|
|
|
@ -15,113 +15,113 @@ resource "openstack_compute_servergroup_v2" "main_haproxy_l7" {
|
||||||
}
|
}
|
||||||
# Security group
|
# Security group
|
||||||
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
|
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
|
||||||
name = "traffic_from_main_lb_to_haproxy_l7"
|
name = "traffic_from_main_lb_to_haproxy_l7"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
|
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Peer traffic from haproxy l7 1 to l7 2"
|
description = "Peer traffic from haproxy l7 1 to l7 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 10000
|
port_range_min = 10000
|
||||||
port_range_max = 10000
|
port_range_max = 10000
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Peer traffic from haproxy l7 2 to l7 1"
|
description = "Peer traffic from haproxy l7 2 to l7 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 10000
|
port_range_min = 10000
|
||||||
port_range_max = 10000
|
port_range_max = 10000
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_80" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_80" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
|
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_443" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_443" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
|
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_8080" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_8080" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
|
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8080
|
port_range_min = 8080
|
||||||
port_range_max = 8080
|
port_range_max = 8080
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_80" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_80" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
|
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_443" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_443" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
|
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_8080" {
|
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_8080" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
|
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8080
|
port_range_min = 8080
|
||||||
port_range_max = 8080
|
port_range_max = 8080
|
||||||
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
|
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Instance
|
# Instance
|
||||||
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
|
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
|
||||||
count = module.common_variables.haproxy_l7_data.vm_count
|
count = var.haproxy_l7_data.vm_count
|
||||||
name = format("%s-%02d", module.common_variables.haproxy_l7_data.name, count.index+1)
|
name = format("%s-%02d", var.haproxy_l7_data.name, count.index + 1)
|
||||||
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = module.common_variables.haproxy_l7_data.flavor
|
flavor_name = var.haproxy_l7_data.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
|
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
|
||||||
scheduler_hints {
|
scheduler_hints {
|
||||||
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
|
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
|
||||||
}
|
}
|
||||||
block_device {
|
block_device {
|
||||||
uuid = module.common_variables.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = 10
|
volume_size = 10
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
|
@ -130,9 +130,9 @@ resource "openstack_compute_instance_v2" "main_haproxy_l7" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = module.common_variables.main_haproxy_l7_ip.*[count.index]
|
fixed_ip_v4 = var.main_haproxy_l7_ip.* [count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
resource "openstack_compute_instance_v2" "internal_ca" {
|
resource "openstack_compute_instance_v2" "internal_ca" {
|
||||||
name = module.common_variables.internal_ca_data.name
|
name = var.internal_ca_data.name
|
||||||
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = module.common_variables.internal_ca_data.flavor
|
flavor_name = var.internal_ca_data.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [module.common_variables.default_security_group_name]
|
security_groups = [var.default_security_group_name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = module.common_variables.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = 10
|
volume_size = 10
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
|
@ -14,8 +14,8 @@ resource "openstack_compute_instance_v2" "internal_ca" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = module.common_variables.basic_services_ip.ca
|
fixed_ip_v4 = var.basic_services_ip.ca
|
||||||
}
|
}
|
||||||
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,33 +1,33 @@
|
||||||
# Main load balancer. L4, backed by Octavia
|
# Main load balancer. L4, backed by Octavia
|
||||||
resource "openstack_lb_loadbalancer_v2" "main_lb" {
|
resource "openstack_lb_loadbalancer_v2" "main_lb" {
|
||||||
vip_subnet_id = module.common_variables.main_private_subnet_id
|
vip_subnet_id = var.main_private_subnet_id
|
||||||
name = module.common_variables.octavia_information.main_lb_name
|
name = var.octavia_information.main_lb_name
|
||||||
description = module.common_variables.octavia_information.main_lb_description
|
description = var.octavia_information.main_lb_description
|
||||||
flavor_id = module.common_variables.octavia_information.octavia_flavor_id
|
flavor_id = var.octavia_information.octavia_flavor_id
|
||||||
vip_address = module.common_variables.basic_services_ip.octavia_main
|
vip_address = var.basic_services_ip.octavia_main
|
||||||
loadbalancer_provider = "amphora"
|
loadbalancer_provider = "amphora"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allocate a floating IP
|
# Allocate a floating IP
|
||||||
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
|
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
|
||||||
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
# The DNS association does not work because of a bug in the OpenStack API
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
# dns_name = "main-lb"
|
# dns_name = "main-lb"
|
||||||
# dns_domain = module.common_variables.dns_zone.zone_name
|
# dns_domain = var.dns_zone.zone_name
|
||||||
description = module.common_variables.octavia_information.main_lb_description
|
description = var.octavia_information.main_lb_description
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
|
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
|
||||||
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
|
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
|
||||||
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
|
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
recordset_name = "${module.common_variables.octavia_information.main_lb_hostname}.${module.common_variables.dns_zone.zone_name}"
|
recordset_name = "${var.octavia_information.main_lb_hostname}.${var.dns_zone.zone_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
|
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
|
||||||
zone_id = module.common_variables.dns_zone_id
|
zone_id = var.dns_zone_id
|
||||||
name = local.recordset_name
|
name = local.recordset_name
|
||||||
description = "Public IP address of the main load balancer"
|
description = "Public IP address of the main load balancer"
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
|
@ -37,146 +37,146 @@ resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
|
||||||
|
|
||||||
# Main HAPROXY stats listener
|
# Main HAPROXY stats listener
|
||||||
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
|
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
description = "Listener for the stats of the main HAPROXY instances"
|
description = "Listener for the stats of the main HAPROXY instances"
|
||||||
name = "main_haproxy_stats_listener"
|
name = "main_haproxy_stats_listener"
|
||||||
allowed_cidrs = [module.common_variables.ssh_sources.d4s_vpn_1_cidr,module.common_variables.ssh_sources.d4s_vpn_2_cidr,module.common_variables.ssh_sources.s2i2s_vpn_1_cidr,module.common_variables.ssh_sources.s2i2s_vpn_2_cidr]
|
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
|
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
|
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "main-haproxy-lb-stats"
|
name = "main-haproxy-lb-stats"
|
||||||
description = "Pool for the stats of the main HAPROXY instances"
|
description = "Pool for the stats of the main HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
|
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 1"
|
name = "haproxy l7 1"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_1
|
address = var.basic_services_ip.haproxy_l7_1
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 2"
|
name = "haproxy l7 2"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_2
|
address = var.basic_services_ip.haproxy_l7_2
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
|
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
||||||
name = "main_haproxy_stats_monitor"
|
name = "main_haproxy_stats_monitor"
|
||||||
type = "TCP"
|
type = "TCP"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# Main HAPROXY HTTP
|
# Main HAPROXY HTTP
|
||||||
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
|
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
description = "HTTP listener of the main HAPROXY instances"
|
description = "HTTP listener of the main HAPROXY instances"
|
||||||
name = "main_haproxy_http_listener"
|
name = "main_haproxy_http_listener"
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
|
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
|
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
|
||||||
protocol = "PROXYV2"
|
protocol = "PROXYV2"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "main-haproxy-lb-http"
|
name = "main-haproxy-lb-http"
|
||||||
description = "Pool for the HTTP listener of the main HAPROXY instances"
|
description = "Pool for the HTTP listener of the main HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
|
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 1"
|
name = "haproxy l7 1"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_1
|
address = var.basic_services_ip.haproxy_l7_1
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 2"
|
name = "haproxy l7 2"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_2
|
address = var.basic_services_ip.haproxy_l7_2
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
|
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
||||||
name = "main_haproxy_http_monitor"
|
name = "main_haproxy_http_monitor"
|
||||||
type = "HTTP"
|
type = "HTTP"
|
||||||
http_method = "GET"
|
http_method = "GET"
|
||||||
url_path = "/_haproxy_health_check"
|
url_path = "/_haproxy_health_check"
|
||||||
expected_codes = "200"
|
expected_codes = "200"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# Main HAPROXY HTTPS
|
# Main HAPROXY HTTPS
|
||||||
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
|
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
description = "HTTPS listener of the main HAPROXY instances"
|
description = "HTTPS listener of the main HAPROXY instances"
|
||||||
name = "main_haproxy_https_listener"
|
name = "main_haproxy_https_listener"
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
|
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
|
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
|
||||||
protocol = "PROXYV2"
|
protocol = "PROXYV2"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "main-haproxy-lb-https"
|
name = "main-haproxy-lb-https"
|
||||||
description = "Pool for the HTTPS listener of the main HAPROXY instances"
|
description = "Pool for the HTTPS listener of the main HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
|
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 1"
|
name = "haproxy l7 1"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_1
|
address = var.basic_services_ip.haproxy_l7_1
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "haproxy l7 2"
|
name = "haproxy l7 2"
|
||||||
address = module.common_variables.basic_services_ip.haproxy_l7_2
|
address = var.basic_services_ip.haproxy_l7_2
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
|
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
||||||
name = "main_haproxy_https_monitor"
|
name = "main_haproxy_https_monitor"
|
||||||
type = "HTTPS"
|
type = "HTTPS"
|
||||||
http_method = "GET"
|
http_method = "GET"
|
||||||
url_path = "/_haproxy_health_check"
|
url_path = "/_haproxy_health_check"
|
||||||
expected_codes = "200"
|
expected_codes = "200"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
output "main_loadbalancer_ip" {
|
output "main_loadbalancer_ip" {
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../common_variables/outputs.tf
|
|
@ -1,66 +1,66 @@
|
||||||
# PostgreSQL shared server
|
# PostgreSQL shared server
|
||||||
# Network
|
# Network
|
||||||
resource "openstack_networking_network_v2" "shared_postgresql_net" {
|
resource "openstack_networking_network_v2" "shared_postgresql_net" {
|
||||||
name = module.common_variables.shared_postgresql_server_data.network_name
|
name = var.shared_postgresql_server_data.network_name
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
external = "false"
|
external = "false"
|
||||||
description = module.common_variables.shared_postgresql_server_data.network_description
|
description = var.shared_postgresql_server_data.network_description
|
||||||
dns_domain = module.common_variables.dns_zone.zone_name
|
dns_domain = var.dns_zone.zone_name
|
||||||
mtu = module.common_variables.mtu_size
|
mtu = var.mtu_size
|
||||||
port_security_enabled = true
|
port_security_enabled = true
|
||||||
shared = false
|
shared = false
|
||||||
region = module.common_variables.main_region
|
region = var.main_region
|
||||||
}
|
}
|
||||||
|
|
||||||
# Subnet
|
# Subnet
|
||||||
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
|
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
|
||||||
name = "shared-postgresql-subnet"
|
name = "shared-postgresql-subnet"
|
||||||
description = "subnet used to connect to the shared PostgreSQL service"
|
description = "subnet used to connect to the shared PostgreSQL service"
|
||||||
network_id = openstack_networking_network_v2.shared_postgresql_net.id
|
network_id = openstack_networking_network_v2.shared_postgresql_net.id
|
||||||
cidr = module.common_variables.shared_postgresql_server_data.network_cidr
|
cidr = var.shared_postgresql_server_data.network_cidr
|
||||||
dns_nameservers = module.common_variables.resolvers_ip
|
dns_nameservers = var.resolvers_ip
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
enable_dhcp = true
|
enable_dhcp = true
|
||||||
no_gateway = true
|
no_gateway = true
|
||||||
allocation_pool {
|
allocation_pool {
|
||||||
start = module.common_variables.shared_postgresql_server_data.allocation_pool_start
|
start = var.shared_postgresql_server_data.allocation_pool_start
|
||||||
end = module.common_variables.shared_postgresql_server_data.allocation_pool_end
|
end = var.shared_postgresql_server_data.allocation_pool_end
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Security group
|
# Security group
|
||||||
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
|
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
|
||||||
name = "access_to_the_shared_postgresql_service"
|
name = "access_to_the_shared_postgresql_service"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Access the shared PostgreSQL service using the dedicated network"
|
description = "Access the shared PostgreSQL service using the dedicated network"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
|
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
|
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
|
||||||
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
|
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 5432
|
port_range_min = 5432
|
||||||
port_range_max = 5432
|
port_range_max = 5432
|
||||||
remote_ip_prefix = module.common_variables.shared_postgresql_server_data.network_cidr
|
remote_ip_prefix = var.shared_postgresql_server_data.network_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
# Block device
|
# Block device
|
||||||
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
|
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
|
||||||
name = module.common_variables.shared_postgresql_server_data.vol_data_name
|
name = var.shared_postgresql_server_data.vol_data_name
|
||||||
size = module.common_variables.shared_postgresql_server_data.vol_data_size
|
size = var.shared_postgresql_server_data.vol_data_size
|
||||||
}
|
}
|
||||||
|
|
||||||
# Instance
|
# Instance
|
||||||
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
|
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
|
||||||
name = module.common_variables.shared_postgresql_server_data.name
|
name = var.shared_postgresql_server_data.name
|
||||||
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = module.common_variables.shared_postgresql_server_data.flavor
|
flavor_name = var.shared_postgresql_server_data.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.shared_postgresql_access.name]
|
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.shared_postgresql_access.name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = module.common_variables.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = 10
|
volume_size = 10
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
|
@ -69,19 +69,19 @@ resource "openstack_compute_instance_v2" "shared_postgresql_server" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.main_private_network.name
|
name = var.main_private_network.name
|
||||||
}
|
}
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.shared_postgresql_server_data.network_name
|
name = var.shared_postgresql_server_data.network_name
|
||||||
fixed_ip_v4 = module.common_variables.shared_postgresql_server_data.server_ip
|
fixed_ip_v4 = var.shared_postgresql_server_data.server_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
|
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
|
||||||
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
|
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
|
||||||
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
|
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
|
||||||
device = module.common_variables.shared_postgresql_server_data.vol_data_device
|
device = var.shared_postgresql_server_data.vol_data_device
|
||||||
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
|
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
# Promertheus server. A floating IP is required
|
# Promertheus server. A floating IP is required
|
||||||
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
|
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
|
||||||
name = module.common_variables.prometheus_server_data.vol_data_name
|
name = var.prometheus_server_data.vol_data_name
|
||||||
size = module.common_variables.prometheus_server_data.vol_data_size
|
size = var.prometheus_server_data.vol_data_size
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "prometheus_server" {
|
resource "openstack_compute_instance_v2" "prometheus_server" {
|
||||||
name = module.common_variables.prometheus_server_data.name
|
name = var.prometheus_server_data.name
|
||||||
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = module.common_variables.prometheus_server_data.flavor
|
flavor_name = var.prometheus_server_data.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.restricted_web.name,openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
|
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.restricted_web.name, openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = module.common_variables.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = 10
|
volume_size = 10
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
|
@ -20,23 +20,23 @@ resource "openstack_compute_instance_v2" "prometheus_server" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = module.common_variables.basic_services_ip.prometheus
|
fixed_ip_v4 = var.basic_services_ip.prometheus
|
||||||
}
|
}
|
||||||
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
|
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
|
||||||
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
||||||
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
|
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
|
||||||
device = module.common_variables.prometheus_server_data.vol_data_device
|
device = var.prometheus_server_data.vol_data_device
|
||||||
}
|
}
|
||||||
|
|
||||||
# Floating IP and DNS record
|
# Floating IP and DNS record
|
||||||
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
|
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
|
||||||
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
# The DNS association does not work because of a bug in the OpenStack API
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
description = "Prometheus server"
|
description = "Prometheus server"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
||||||
|
@ -45,12 +45,12 @@ resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
prometheus_recordset_name = "${module.common_variables.prometheus_server_data.name}.${module.common_variables.dns_zone.zone_name}"
|
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
|
||||||
alertmanager_recordset_name = "alertmanager.${module.common_variables.dns_zone.zone_name}"
|
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
|
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
|
||||||
zone_id = module.common_variables.dns_zone_id
|
zone_id = var.dns_zone_id
|
||||||
name = local.prometheus_recordset_name
|
name = local.prometheus_recordset_name
|
||||||
description = "Public IP address of the Prometheus server"
|
description = "Public IP address of the Prometheus server"
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
|
@ -59,7 +59,7 @@ resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_dns_recordset_v2" "alertmanager_server_recordset" {
|
resource "openstack_dns_recordset_v2" "alertmanager_server_recordset" {
|
||||||
zone_id = module.common_variables.dns_zone_id
|
zone_id = var.dns_zone_id
|
||||||
name = local.alertmanager_recordset_name
|
name = local.alertmanager_recordset_name
|
||||||
description = "Prometheus alertmanager"
|
description = "Prometheus alertmanager"
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
|
|
|
@ -1,373 +1,373 @@
|
||||||
#
|
#
|
||||||
# This is the security group that should be added to every instance
|
# This is the security group that should be added to every instance
|
||||||
resource "openstack_networking_secgroup_v2" "default" {
|
resource "openstack_networking_secgroup_v2" "default" {
|
||||||
name = module.common_variables.default_security_group_name
|
name = var.default_security_group_name
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
|
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
|
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.default.id
|
security_group_id = openstack_networking_secgroup_v2.default.id
|
||||||
direction = "egress"
|
direction = "egress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
|
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.default.id
|
security_group_id = openstack_networking_secgroup_v2.default.id
|
||||||
description = "Allow ICMP from remote"
|
description = "Allow ICMP from remote"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
remote_ip_prefix = "0.0.0.0/0"
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
protocol = "icmp"
|
protocol = "icmp"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-jump-proxy" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-jump-proxy" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.default.id
|
security_group_id = openstack_networking_secgroup_v2.default.id
|
||||||
description = "SSH traffic from the jump proxy"
|
description = "SSH traffic from the jump proxy"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
|
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
|
resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.default.id
|
security_group_id = openstack_networking_secgroup_v2.default.id
|
||||||
description = "Prometheus access to the node exporter"
|
description = "Prometheus access to the node exporter"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 9100
|
port_range_min = 9100
|
||||||
port_range_max = 9100
|
port_range_max = 9100
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.prometheus_cidr
|
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
# SSH access to the jump proxy. Used by the jump proxy VM only
|
# SSH access to the jump proxy. Used by the jump proxy VM only
|
||||||
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
|
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
|
||||||
name = "ssh_access_to_the_jump_node"
|
name = "ssh_access_to_the_jump_node"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Security group that allows SSH access to the jump node from a limited set of sources"
|
description = "Security group that allows SSH access to the jump node from a limited set of sources"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-1" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-1" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from S2I2S VPN 1"
|
description = "SSH traffic from S2I2S VPN 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_1_cidr
|
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-2" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-2" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from S2I2S VPN 2"
|
description = "SSH traffic from S2I2S VPN 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_2_cidr
|
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-1" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-1" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from D4Science VPN 1"
|
description = "SSH traffic from D4Science VPN 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_1_cidr
|
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-2" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-2" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from D4Science VPN 2"
|
description = "SSH traffic from D4Science VPN 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_2_cidr
|
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-shell-d4s" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-shell-d4s" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from shell.d4science.org"
|
description = "SSH traffic from shell.d4science.org"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.shell_d4s_cidr
|
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "ssh-infrascience-net" {
|
resource "openstack_networking_secgroup_rule_v2" "ssh-infrascience-net" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
|
||||||
description = "SSH traffic from the InfraScience network"
|
description = "SSH traffic from the InfraScience network"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 22
|
port_range_min = 22
|
||||||
port_range_max = 22
|
port_range_max = 22
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.infrascience_net_cidr
|
remote_ip_prefix = var.ssh_sources.infrascience_net_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
# Debug via tunnel from the jump proxy node
|
# Debug via tunnel from the jump proxy node
|
||||||
resource "openstack_networking_secgroup_v2" "debugging" {
|
resource "openstack_networking_secgroup_v2" "debugging" {
|
||||||
name = "debugging_from_jump_node"
|
name = "debugging_from_jump_node"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
|
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "shell_8100" {
|
resource "openstack_networking_secgroup_rule_v2" "shell_8100" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
||||||
description = "Tomcat debug on port 8100 from the shell jump proxy"
|
description = "Tomcat debug on port 8100 from the shell jump proxy"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8100
|
port_range_min = 8100
|
||||||
port_range_max = 8100
|
port_range_max = 8100
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
|
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "shell_80" {
|
resource "openstack_networking_secgroup_rule_v2" "shell_80" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
||||||
description = "http debug port 80 from the shell jump proxy"
|
description = "http debug port 80 from the shell jump proxy"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
|
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "shell_443" {
|
resource "openstack_networking_secgroup_rule_v2" "shell_443" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
security_group_id = openstack_networking_secgroup_v2.debugging.id
|
||||||
description = "https debug port 443 from the shell jump proxy"
|
description = "https debug port 443 from the shell jump proxy"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
|
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
# Traffic from the main HAPROXY load balancers
|
# Traffic from the main HAPROXY load balancers
|
||||||
# Use on the web services that are exposed through the main HAPROXY
|
# Use on the web services that are exposed through the main HAPROXY
|
||||||
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
|
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
|
||||||
name = "traffic_from_the_main_load_balancers"
|
name = "traffic_from_the_main_load_balancers"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Allow traffic from the main L7 HAPROXY load balancers"
|
description = "Allow traffic from the main L7 HAPROXY load balancers"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-80" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-80" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 1"
|
description = "HTTP traffic from HAPROXY L7 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-80" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-80" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 2"
|
description = "HTTP traffic from HAPROXY L7 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-443" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-443" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTPS traffic from HAPROXY L7 1"
|
description = "HTTPS traffic from HAPROXY L7 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-443" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-443" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTPS traffic from HAPROXY L7 2"
|
description = "HTTPS traffic from HAPROXY L7 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8080" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8080" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 1"
|
description = "HTTP traffic from HAPROXY L7 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8080
|
port_range_min = 8080
|
||||||
port_range_max = 8080
|
port_range_max = 8080
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8080" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8080" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 2"
|
description = "HTTP traffic from HAPROXY L7 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8080
|
port_range_min = 8080
|
||||||
port_range_max = 8080
|
port_range_max = 8080
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8888" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8888" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 1"
|
description = "HTTP traffic from HAPROXY L7 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8888
|
port_range_min = 8888
|
||||||
port_range_max = 8888
|
port_range_max = 8888
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
|
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
|
||||||
description = "HTTP traffic from HAPROXY L7 2"
|
description = "HTTP traffic from HAPROXY L7 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 8888
|
port_range_min = 8888
|
||||||
port_range_max = 8888
|
port_range_max = 8888
|
||||||
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
|
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
# Security group that exposes web services directly. A floating IP is required.
|
# Security group that exposes web services directly. A floating IP is required.
|
||||||
resource "openstack_networking_secgroup_v2" "public_web" {
|
resource "openstack_networking_secgroup_v2" "public_web" {
|
||||||
name = "public_web_service"
|
name = "public_web_service"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
|
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "public_http" {
|
resource "openstack_networking_secgroup_rule_v2" "public_http" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.public_web.id
|
security_group_id = openstack_networking_secgroup_v2.public_web.id
|
||||||
description = "Allow HTTP from everywhere"
|
description = "Allow HTTP from everywhere"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = "0.0.0.0/0"
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "public_https" {
|
resource "openstack_networking_secgroup_rule_v2" "public_https" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.public_web.id
|
security_group_id = openstack_networking_secgroup_v2.public_web.id
|
||||||
description = "Allow HTTPS from everywhere"
|
description = "Allow HTTPS from everywhere"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = "0.0.0.0/0"
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
}
|
}
|
||||||
|
|
||||||
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
|
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
|
||||||
resource "openstack_networking_secgroup_v2" "restricted_web" {
|
resource "openstack_networking_secgroup_v2" "restricted_web" {
|
||||||
name = "restricted_web_service"
|
name = "restricted_web_service"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
|
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "http_from_everywhere" {
|
resource "openstack_networking_secgroup_rule_v2" "http_from_everywhere" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTP from everywhere"
|
description = "Allow HTTP from everywhere"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 80
|
port_range_min = 80
|
||||||
port_range_max = 80
|
port_range_max = 80
|
||||||
remote_ip_prefix = "0.0.0.0/0"
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_1" {
|
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_1" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTPS from D4Science VPN 1"
|
description = "Allow HTTPS from D4Science VPN 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_1_cidr
|
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_2" {
|
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_2" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTPS from D4Science VPN 2"
|
description = "Allow HTTPS from D4Science VPN 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_2_cidr
|
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_1" {
|
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_1" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTPS from S2I2S VPN 1"
|
description = "Allow HTTPS from S2I2S VPN 1"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_1_cidr
|
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_2" {
|
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_2" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTPS from S2I2S VPN 2"
|
description = "Allow HTTPS from S2I2S VPN 2"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_2_cidr
|
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
|
resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
|
||||||
description = "Allow HTTPS from shell.d4science.org"
|
description = "Allow HTTPS from shell.d4science.org"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.ssh_sources.shell_d4s_cidr
|
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
|
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
|
||||||
name = "prometheus_access_from_grafana"
|
name = "prometheus_access_from_grafana"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "The public grafana server must be able to get data from Prometheus"
|
description = "The public grafana server must be able to get data from Prometheus"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
|
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
|
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
|
||||||
description = "Allow HTTPS from grafana.d4science.org"
|
description = "Allow HTTPS from grafana.d4science.org"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = 443
|
port_range_min = 443
|
||||||
port_range_max = 443
|
port_range_max = 443
|
||||||
remote_ip_prefix = module.common_variables.prometheus_server_data.public_grafana_server_cidr
|
remote_ip_prefix = var.prometheus_server_data.public_grafana_server_cidr
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# VM used as jump proxy. A floating IP is required
|
# VM used as jump proxy. A floating IP is required
|
||||||
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
|
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
|
||||||
name = module.common_variables.ssh_jump_proxy.name
|
name = var.ssh_jump_proxy.name
|
||||||
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = module.common_variables.ssh_jump_proxy.flavor
|
flavor_name = var.ssh_jump_proxy.flavor
|
||||||
key_pair = module.ssh_settings.ssh_key_name
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
|
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = module.common_variables.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = 30
|
volume_size = 30
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
|
@ -15,17 +15,17 @@ resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = module.common_variables.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = module.common_variables.basic_services_ip.ssh_jump
|
fixed_ip_v4 = var.basic_services_ip.ssh_jump
|
||||||
}
|
}
|
||||||
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
}
|
}
|
||||||
|
|
||||||
# Floating IP and DNS record
|
# Floating IP and DNS record
|
||||||
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
|
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
|
||||||
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
# The DNS association does not work because of a bug in the OpenStack API
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
description = "SSH Proxy Jump Server"
|
description = "SSH Proxy Jump Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
|
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
|
||||||
|
@ -34,11 +34,11 @@ resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
ssh_recordset_name = "${module.common_variables.ssh_jump_proxy.name}.${module.common_variables.dns_zone.zone_name}"
|
ssh_recordset_name = "${var.ssh_jump_proxy.name}.${var.dns_zone.zone_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
|
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
|
||||||
zone_id = module.common_variables.dns_zone_id
|
zone_id = var.dns_zone_id
|
||||||
name = local.ssh_recordset_name
|
name = local.ssh_recordset_name
|
||||||
description = "Public IP address of the SSH Proxy Jump server"
|
description = "Public IP address of the SSH Proxy Jump server"
|
||||||
ttl = 8600
|
ttl = 8600
|
||||||
|
|
|
@ -17,11 +17,11 @@ data "terraform_remote_state" "privnet_dns_router" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module "common_variables" {
|
# module "common_variables" {
|
||||||
source = "../../modules/common_variables"
|
# source = "../../modules/common_variables"
|
||||||
}
|
# }
|
||||||
|
|
||||||
module "ssh_settings" {
|
# module "ssh_settings" {
|
||||||
source = "../../modules/ssh-key-ref"
|
# source = "../../modules/ssh-key-ref"
|
||||||
}
|
# }
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../common_variables/variables.tf
|
|
@ -13,27 +13,27 @@ resource "openstack_compute_servergroup_v2" "swarm_workers" {
|
||||||
# Network for the NFS traffic
|
# Network for the NFS traffic
|
||||||
#
|
#
|
||||||
resource "openstack_networking_network_v2" "swarm_nfs_net" {
|
resource "openstack_networking_network_v2" "swarm_nfs_net" {
|
||||||
name = var.swarm_nfs_private_network.network_name
|
name = var.swarm_nfs_private_network.network_name
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
external = "false"
|
external = "false"
|
||||||
description = var.swarm_nfs_private_network.network_description
|
description = var.swarm_nfs_private_network.network_description
|
||||||
dns_domain = var.dns_zone.zone_name
|
dns_domain = var.dns_zone.zone_name
|
||||||
mtu = var.mtu_size
|
mtu = var.mtu_size
|
||||||
port_security_enabled = true
|
port_security_enabled = true
|
||||||
shared = false
|
shared = false
|
||||||
region = var.main_region
|
region = var.main_region
|
||||||
}
|
}
|
||||||
|
|
||||||
# Subnet
|
# Subnet
|
||||||
resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
|
resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
|
||||||
name = "swarm-nfs-net"
|
name = "swarm-nfs-net"
|
||||||
description = "Subnet used by the Swarm cluster and the NFS service"
|
description = "Subnet used by the Swarm cluster and the NFS service"
|
||||||
network_id = openstack_networking_network_v2.swarm_nfs_net.id
|
network_id = openstack_networking_network_v2.swarm_nfs_net.id
|
||||||
cidr = var.swarm_nfs_private_network.network_cidr
|
cidr = var.swarm_nfs_private_network.network_cidr
|
||||||
dns_nameservers = var.resolvers_ip
|
dns_nameservers = var.resolvers_ip
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
enable_dhcp = true
|
enable_dhcp = true
|
||||||
no_gateway = true
|
no_gateway = true
|
||||||
allocation_pool {
|
allocation_pool {
|
||||||
start = var.swarm_nfs_private_network.allocation_pool_start
|
start = var.swarm_nfs_private_network.allocation_pool_start
|
||||||
end = var.swarm_nfs_private_network.allocation_pool_end
|
end = var.swarm_nfs_private_network.allocation_pool_end
|
||||||
|
@ -44,46 +44,46 @@ resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
|
||||||
# Security groups
|
# Security groups
|
||||||
#
|
#
|
||||||
resource "openstack_networking_secgroup_v2" "swarm_internal_traffic" {
|
resource "openstack_networking_secgroup_v2" "swarm_internal_traffic" {
|
||||||
name = "swarm_internal_docker_traffic"
|
name = "swarm_internal_docker_traffic"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Traffic between the Docker Swarm nodes"
|
description = "Traffic between the Docker Swarm nodes"
|
||||||
}
|
}
|
||||||
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
|
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
||||||
description = "UDP traffic between Swarm nodes"
|
description = "UDP traffic between Swarm nodes"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
remote_ip_prefix = var.main_private_subnet.cidr
|
remote_ip_prefix = var.main_private_subnet.cidr
|
||||||
}
|
}
|
||||||
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
|
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
|
||||||
description = "TCP traffic between Swarm nodes"
|
description = "TCP traffic between Swarm nodes"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
remote_ip_prefix = var.main_private_subnet.cidr
|
remote_ip_prefix = var.main_private_subnet.cidr
|
||||||
}
|
}
|
||||||
resource "openstack_networking_secgroup_v2" "swarm_nfs_traffic" {
|
resource "openstack_networking_secgroup_v2" "swarm_nfs_traffic" {
|
||||||
name = "docker_swarm_nfs"
|
name = "docker_swarm_nfs"
|
||||||
delete_default_rules = "true"
|
delete_default_rules = "true"
|
||||||
description = "Traffic between Docker Swarm and the NFS service"
|
description = "Traffic between Docker Swarm and the NFS service"
|
||||||
}
|
}
|
||||||
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_udp" {
|
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_udp" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
||||||
description = "UDP traffic"
|
description = "UDP traffic"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
||||||
}
|
}
|
||||||
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
|
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
|
||||||
description = "TCP traffic"
|
description = "TCP traffic"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -91,12 +91,12 @@ resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
|
||||||
#
|
#
|
||||||
# Instance
|
# Instance
|
||||||
resource "openstack_compute_instance_v2" "docker_swarm_managers" {
|
resource "openstack_compute_instance_v2" "docker_swarm_managers" {
|
||||||
count = var.docker_swarm_data.mgr_count
|
count = var.docker_swarm_data.mgr_count
|
||||||
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index+1)
|
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index + 1)
|
||||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = var.docker_swarm_data.mgr_flavor
|
flavor_name = var.docker_swarm_data.mgr_flavor
|
||||||
key_pair = var.ssh_key_file.name
|
key_pair = var.ssh_key_file.name
|
||||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
||||||
scheduler_hints {
|
scheduler_hints {
|
||||||
group = openstack_compute_servergroup_v2.swarm_masters.id
|
group = openstack_compute_servergroup_v2.swarm_masters.id
|
||||||
}
|
}
|
||||||
|
@ -118,25 +118,25 @@ resource "openstack_compute_instance_v2" "docker_swarm_managers" {
|
||||||
}
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = var.main_private_network.name
|
name = var.main_private_network.name
|
||||||
fixed_ip_v4 = var.swarm_managers_ip.*[count.index]
|
fixed_ip_v4 = var.swarm_managers_ip.* [count.index]
|
||||||
}
|
}
|
||||||
network {
|
network {
|
||||||
name = var.swarm_nfs_private_network.network_name
|
name = var.swarm_nfs_private_network.network_name
|
||||||
}
|
}
|
||||||
|
|
||||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Swarm worker nodes
|
# Swarm worker nodes
|
||||||
resource "openstack_compute_instance_v2" "docker_swarm_workers" {
|
resource "openstack_compute_instance_v2" "docker_swarm_workers" {
|
||||||
count = var.docker_swarm_data.worker_count
|
count = var.docker_swarm_data.worker_count
|
||||||
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index+1)
|
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index + 1)
|
||||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = var.docker_swarm_data.worker_flavor
|
flavor_name = var.docker_swarm_data.worker_flavor
|
||||||
key_pair = var.ssh_key_file.name
|
key_pair = var.ssh_key_file.name
|
||||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
|
||||||
scheduler_hints {
|
scheduler_hints {
|
||||||
group = openstack_compute_servergroup_v2.swarm_workers.id
|
group = openstack_compute_servergroup_v2.swarm_workers.id
|
||||||
}
|
}
|
||||||
|
@ -164,8 +164,8 @@ resource "openstack_compute_instance_v2" "docker_swarm_workers" {
|
||||||
name = var.swarm_nfs_private_network.network_name
|
name = var.swarm_nfs_private_network.network_name
|
||||||
}
|
}
|
||||||
|
|
||||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
||||||
}
|
}
|
||||||
|
|
||||||
# NFS server
|
# NFS server
|
||||||
|
@ -177,11 +177,11 @@ resource "openstack_blockstorage_volume_v3" "swarm_nfs_data_vol" {
|
||||||
|
|
||||||
# Instance
|
# Instance
|
||||||
resource "openstack_compute_instance_v2" "swarm_nfs_server" {
|
resource "openstack_compute_instance_v2" "swarm_nfs_server" {
|
||||||
name = var.docker_swarm_data.nfs_server_name
|
name = var.docker_swarm_data.nfs_server_name
|
||||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
flavor_name = var.docker_swarm_data.nfs_server_flavor
|
flavor_name = var.docker_swarm_data.nfs_server_flavor
|
||||||
key_pair = var.ssh_key_file.name
|
key_pair = var.ssh_key_file.name
|
||||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
|
||||||
block_device {
|
block_device {
|
||||||
uuid = var.ubuntu_2204.uuid
|
uuid = var.ubuntu_2204.uuid
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
|
@ -195,20 +195,20 @@ resource "openstack_compute_instance_v2" "swarm_nfs_server" {
|
||||||
name = var.main_private_network.name
|
name = var.main_private_network.name
|
||||||
}
|
}
|
||||||
network {
|
network {
|
||||||
name = var.swarm_nfs_private_network.network_name
|
name = var.swarm_nfs_private_network.network_name
|
||||||
fixed_ip_v4 = var.swarm_nfs_private_network.server_ip
|
fixed_ip_v4 = var.swarm_nfs_private_network.server_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
|
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Attach the additional volume
|
# Attach the additional volume
|
||||||
resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
|
resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
|
||||||
instance_id = openstack_compute_instance_v2.swarm_nfs_server.id
|
instance_id = openstack_compute_instance_v2.swarm_nfs_server.id
|
||||||
volume_id = openstack_blockstorage_volume_v3.swarm_nfs_data_vol.id
|
volume_id = openstack_blockstorage_volume_v3.swarm_nfs_data_vol.id
|
||||||
device = var.docker_swarm_data.nfs_server_data_disk_device
|
device = var.docker_swarm_data.nfs_server_data_disk_device
|
||||||
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
|
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -216,33 +216,33 @@ resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
|
||||||
#
|
#
|
||||||
# Swarm load balancer. L4, backed by Octavia
|
# Swarm load balancer. L4, backed by Octavia
|
||||||
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
|
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
|
||||||
vip_subnet_id = var.main_private_subnet_id
|
vip_subnet_id = var.main_private_subnet_id
|
||||||
name = var.octavia_swarm_data.swarm_lb_name
|
name = var.octavia_swarm_data.swarm_lb_name
|
||||||
description = var.octavia_swarm_data.swarm_lb_description
|
description = var.octavia_swarm_data.swarm_lb_description
|
||||||
flavor_id = var.octavia_swarm_data.octavia_flavor_id
|
flavor_id = var.octavia_swarm_data.octavia_flavor_id
|
||||||
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
|
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
|
||||||
loadbalancer_provider = "amphora"
|
loadbalancer_provider = "amphora"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allocate a floating IP
|
# Allocate a floating IP
|
||||||
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
|
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
|
||||||
pool = var.floating_ip_pools.main_public_ip_pool
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
# The DNS association does not work because of a bug in the OpenStack API
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
# dns_name = "main-lb"
|
# dns_name = "main-lb"
|
||||||
# dns_domain = var.dns_zone.zone_name
|
# dns_domain = var.dns_zone.zone_name
|
||||||
description = var.octavia_swarm_data.swarm_lb_description
|
description = var.octavia_swarm_data.swarm_lb_description
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
|
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
|
||||||
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
|
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
|
||||||
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
|
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
|
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
|
||||||
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
|
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
|
||||||
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
|
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
|
||||||
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
|
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
|
||||||
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
|
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,215 +293,215 @@ resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
|
||||||
|
|
||||||
# Main HAPROXY stats listener
|
# Main HAPROXY stats listener
|
||||||
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
|
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
|
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
|
||||||
name = "swarm_haproxy_stats_listener"
|
name = "swarm_haproxy_stats_listener"
|
||||||
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr,var.ssh_sources.d4s_vpn_2_cidr,var.ssh_sources.s2i2s_vpn_1_cidr,var.ssh_sources.s2i2s_vpn_2_cidr]
|
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
|
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "swarm-haproxy-lb-stats"
|
name = "swarm-haproxy-lb-stats"
|
||||||
description = "Pool for the stats of the main HAPROXY instances"
|
description = "Pool for the stats of the main HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
|
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 1"
|
name = "swarm mgr haproxy 1"
|
||||||
address = var.docker_swarm_data.mgr1_ip
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 2"
|
name = "swarm mgr haproxy 2"
|
||||||
address = var.docker_swarm_data.mgr2_ip
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 3"
|
name = "swarm mgr haproxy 3"
|
||||||
address = var.docker_swarm_data.mgr3_ip
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
protocol_port = 8880
|
protocol_port = 8880
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
||||||
name = "swarm_haproxy_stats_monitor"
|
name = "swarm_haproxy_stats_monitor"
|
||||||
type = "TCP"
|
type = "TCP"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# HAPROXY HTTP
|
# HAPROXY HTTP
|
||||||
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
|
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
description = "HTTP listener of the Docker Swarm HAPROXY instances"
|
description = "HTTP listener of the Docker Swarm HAPROXY instances"
|
||||||
name = "swarm_haproxy_http_listener"
|
name = "swarm_haproxy_http_listener"
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
|
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
|
||||||
protocol = "PROXYV2"
|
protocol = "PROXYV2"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "swarm-haproxy-lb-http"
|
name = "swarm-haproxy-lb-http"
|
||||||
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
|
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
|
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 1"
|
name = "swarm mgr haproxy 1"
|
||||||
address = var.docker_swarm_data.mgr1_ip
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 2"
|
name = "swarm mgr haproxy 2"
|
||||||
address = var.docker_swarm_data.mgr2_ip
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 3"
|
name = "swarm mgr haproxy 3"
|
||||||
address = var.docker_swarm_data.mgr3_ip
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
protocol_port = 80
|
protocol_port = 80
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
||||||
name = "swarm_haproxy_http_monitor"
|
name = "swarm_haproxy_http_monitor"
|
||||||
type = "HTTP"
|
type = "HTTP"
|
||||||
http_method = "GET"
|
http_method = "GET"
|
||||||
url_path = "/_haproxy_health_check"
|
url_path = "/_haproxy_health_check"
|
||||||
expected_codes = "200"
|
expected_codes = "200"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# HAPROXY HTTPS
|
# HAPROXY HTTPS
|
||||||
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
|
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
description = "HTTPS listener of the main HAPROXY instances"
|
description = "HTTPS listener of the main HAPROXY instances"
|
||||||
name = "swarm_haproxy_https_listener"
|
name = "swarm_haproxy_https_listener"
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
|
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
|
||||||
protocol = "PROXYV2"
|
protocol = "PROXYV2"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "swarm-haproxy-lb-https"
|
name = "swarm-haproxy-lb-https"
|
||||||
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
|
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
|
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 1"
|
name = "swarm mgr haproxy 1"
|
||||||
address = var.docker_swarm_data.mgr1_ip
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 2"
|
name = "swarm mgr haproxy 2"
|
||||||
address = var.docker_swarm_data.mgr2_ip
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 3"
|
name = "swarm mgr haproxy 3"
|
||||||
address = var.docker_swarm_data.mgr3_ip
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
protocol_port = 443
|
protocol_port = 443
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
||||||
name = "swarm_haproxy_https_monitor"
|
name = "swarm_haproxy_https_monitor"
|
||||||
type = "HTTPS"
|
type = "HTTPS"
|
||||||
http_method = "GET"
|
http_method = "GET"
|
||||||
url_path = "/_haproxy_health_check"
|
url_path = "/_haproxy_health_check"
|
||||||
expected_codes = "200"
|
expected_codes = "200"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# HAPROXY HTTP on port 8080
|
# HAPROXY HTTP on port 8080
|
||||||
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
|
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
|
||||||
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
protocol_port = 8080
|
protocol_port = 8080
|
||||||
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
||||||
name = "swarm_haproxy_8080_listener"
|
name = "swarm_haproxy_8080_listener"
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
|
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
|
||||||
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
|
||||||
protocol = "PROXYV2"
|
protocol = "PROXYV2"
|
||||||
lb_method = "LEAST_CONNECTIONS"
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
name = "swarm-haproxy-lb-http-8080"
|
name = "swarm-haproxy-lb-http-8080"
|
||||||
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
||||||
persistence {
|
persistence {
|
||||||
type = "SOURCE_IP"
|
type = "SOURCE_IP"
|
||||||
}
|
}
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
|
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 1"
|
name = "swarm mgr haproxy 1"
|
||||||
address = var.docker_swarm_data.mgr1_ip
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
protocol_port = 8080
|
protocol_port = 8080
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 2"
|
name = "swarm mgr haproxy 2"
|
||||||
address = var.docker_swarm_data.mgr2_ip
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
protocol_port = 8080
|
protocol_port = 8080
|
||||||
}
|
}
|
||||||
member {
|
member {
|
||||||
name = "swarm mgr haproxy 3"
|
name = "swarm mgr haproxy 3"
|
||||||
address = var.docker_swarm_data.mgr3_ip
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
protocol_port = 8080
|
protocol_port = 8080
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
|
||||||
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
||||||
name = "swarm_haproxy_8080_monitor"
|
name = "swarm_haproxy_8080_monitor"
|
||||||
type = "HTTP"
|
type = "HTTP"
|
||||||
http_method = "GET"
|
http_method = "GET"
|
||||||
url_path = "/_haproxy_health_check"
|
url_path = "/_haproxy_health_check"
|
||||||
expected_codes = "200"
|
expected_codes = "200"
|
||||||
delay = 20
|
delay = 20
|
||||||
timeout = 5
|
timeout = 5
|
||||||
max_retries = 3
|
max_retries = 3
|
||||||
admin_state_up = true
|
admin_state_up = true
|
||||||
}
|
}
|
||||||
|
|
||||||
output "swarm_loadbalancer_ip" {
|
output "swarm_loadbalancer_ip" {
|
||||||
|
|
|
@ -1,57 +1,57 @@
|
||||||
variable "docker_swarm_data" {
|
variable "docker_swarm_data" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
mgr_name = "swarm-mgr"
|
mgr_name = "swarm-mgr"
|
||||||
mgr1_ip = "10.1.40.31"
|
mgr1_ip = "10.1.40.31"
|
||||||
mgr1_cidr = "10.1.40.31/32"
|
mgr1_cidr = "10.1.40.31/32"
|
||||||
mgr2_ip = "10.1.40.32"
|
mgr2_ip = "10.1.40.32"
|
||||||
mgr2_cidr = "10.1.40.32/32"
|
mgr2_cidr = "10.1.40.32/32"
|
||||||
mgr3_ip = "10.1.40.33"
|
mgr3_ip = "10.1.40.33"
|
||||||
mgr3_cidr = "10.1.40.33/32"
|
mgr3_cidr = "10.1.40.33/32"
|
||||||
mgr_count = 3
|
mgr_count = 3
|
||||||
mgr_flavor = "m1.large"
|
mgr_flavor = "m1.large"
|
||||||
mgr_data_disk_size = 100
|
mgr_data_disk_size = 100
|
||||||
worker_name = "swarm-worker"
|
worker_name = "swarm-worker"
|
||||||
worker_count = 5
|
worker_count = 5
|
||||||
worker_flavor = "m1.xlarge"
|
worker_flavor = "m1.xlarge"
|
||||||
worker_data_disk_size = 100
|
worker_data_disk_size = 100
|
||||||
nfs_server_name = "swarm-nfs-server"
|
nfs_server_name = "swarm-nfs-server"
|
||||||
nfs_server_flavor = "m1.medium"
|
nfs_server_flavor = "m1.medium"
|
||||||
nfs_server_data_disk_name = "Swarm NFS server data Disk"
|
nfs_server_data_disk_name = "Swarm NFS server data Disk"
|
||||||
nfs_server_data_disk_size = 100
|
nfs_server_data_disk_size = 100
|
||||||
nfs_server_data_disk_device = "/dev/vdb"
|
nfs_server_data_disk_device = "/dev/vdb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "swarm_managers_ip" {
|
variable "swarm_managers_ip" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
|
default = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "octavia_swarm_data" {
|
variable "octavia_swarm_data" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
||||||
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
|
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
|
||||||
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
swarm_lb_name = "d4s-production-cloud-swarm-l4"
|
||||||
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
||||||
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
||||||
swarm_lb_hostname = "swarm-lb"
|
swarm_lb_hostname = "swarm-lb"
|
||||||
swarm_octavia_main_ip = "10.1.40.30"
|
swarm_octavia_main_ip = "10.1.40.30"
|
||||||
swarm_octavia_main_cidr = "10.1.40.30/32"
|
swarm_octavia_main_cidr = "10.1.40.30/32"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "swarm_nfs_private_network" {
|
variable "swarm_nfs_private_network" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
network_name = "swarm-nfs-net"
|
network_name = "swarm-nfs-net"
|
||||||
network_description = "Network used by the swarm nodes and the NFS service"
|
network_description = "Network used by the swarm nodes and the NFS service"
|
||||||
network_cidr = "192.168.4.0/23"
|
network_cidr = "192.168.4.0/23"
|
||||||
allocation_pool_start = "192.168.4.100"
|
allocation_pool_start = "192.168.4.100"
|
||||||
allocation_pool_end = "192.168.5.254"
|
allocation_pool_end = "192.168.5.254"
|
||||||
server_ip = "192.168.4.10"
|
server_ip = "192.168.4.10"
|
||||||
server_cidr = "192.168.4.5/23"
|
server_cidr = "192.168.4.5/23"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue