Resize the postgresql instances.
This commit is contained in:
parent
9ec3095aab
commit
703ef4da39
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"version": 4,
|
"version": 4,
|
||||||
"terraform_version": "1.7.5",
|
"terraform_version": "1.7.5",
|
||||||
"serial": 265,
|
"serial": 267,
|
||||||
"lineage": "6a53b692-c1a8-ed53-bc6c-b7fb5e017eb8",
|
"lineage": "6a53b692-c1a8-ed53-bc6c-b7fb5e017eb8",
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"almalinux_9": {
|
"almalinux_9": {
|
||||||
|
@ -344,7 +344,7 @@
|
||||||
"value": {
|
"value": {
|
||||||
"allocation_pool_end": "192.168.3.254",
|
"allocation_pool_end": "192.168.3.254",
|
||||||
"allocation_pool_start": "192.168.0.100",
|
"allocation_pool_start": "192.168.0.100",
|
||||||
"flavor": "m1.medium",
|
"flavor": "m1.large",
|
||||||
"name": "shared-postgresql-server",
|
"name": "shared-postgresql-server",
|
||||||
"network_cidr": "192.168.0.0/22",
|
"network_cidr": "192.168.0.0/22",
|
||||||
"network_description": "Network used to communicate with the shared postgresql service",
|
"network_description": "Network used to communicate with the shared postgresql service",
|
||||||
|
@ -1317,8 +1317,8 @@
|
||||||
],
|
],
|
||||||
"config_drive": null,
|
"config_drive": null,
|
||||||
"created": "2023-11-05 14:54:15 +0000 UTC",
|
"created": "2023-11-05 14:54:15 +0000 UTC",
|
||||||
"flavor_id": "4",
|
"flavor_id": "9",
|
||||||
"flavor_name": "m1.medium",
|
"flavor_name": "m1.large",
|
||||||
"floating_ip": null,
|
"floating_ip": null,
|
||||||
"force_delete": false,
|
"force_delete": false,
|
||||||
"id": "9ede65c7-70ca-4698-8551-754aa4f6fa1e",
|
"id": "9ede65c7-70ca-4698-8551-754aa4f6fa1e",
|
||||||
|
@ -1361,8 +1361,8 @@
|
||||||
"stop_before_destroy": false,
|
"stop_before_destroy": false,
|
||||||
"tags": [],
|
"tags": [],
|
||||||
"timeouts": null,
|
"timeouts": null,
|
||||||
"updated": "2023-11-05 14:54:48 +0000 UTC",
|
"updated": "2024-06-06 15:43:37 +0000 UTC",
|
||||||
"user_data": "bb83b25fd1219aa1b850ece9be8d7b0f31714608",
|
"user_data": "",
|
||||||
"vendor_options": [],
|
"vendor_options": [],
|
||||||
"volume": []
|
"volume": []
|
||||||
},
|
},
|
||||||
|
|
|
@ -4,7 +4,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
version = "~> 1.53.0"
|
version = ">= 1.54.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ default_security_group_name = "default_for_all"
|
||||||
|
|
||||||
shared_postgresql_server_data = {
|
shared_postgresql_server_data = {
|
||||||
name = "shared-postgresql-server"
|
name = "shared-postgresql-server"
|
||||||
flavor = "m1.large"
|
flavor = "m1.xxl"
|
||||||
vol_data_name = "shared-postgresql-data"
|
vol_data_name = "shared-postgresql-data"
|
||||||
vol_data_size = "300"
|
vol_data_size = "300"
|
||||||
vol_data_device = "/dev/vdb"
|
vol_data_device = "/dev/vdb"
|
||||||
|
|
|
@ -4,7 +4,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
version = "~> 1.53.0"
|
version = ">= 1.54.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,28 @@
|
||||||
|
# Define required providers
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 0.16.0"
|
||||||
|
required_providers {
|
||||||
|
openstack = {
|
||||||
|
source = "terraform-provider-openstack/openstack"
|
||||||
|
version = ">= 1.53.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "terraform_remote_state" "privnet_dns_router" {
|
||||||
|
backend = "local"
|
||||||
|
|
||||||
|
config = {
|
||||||
|
path = "../project-setup/terraform.tfstate"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# SSH settings
|
||||||
|
module "ssh_settings" {
|
||||||
|
source = "../../modules/ssh-key-ref"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Global variables (constants, really)
|
||||||
|
module "common_variables" {
|
||||||
|
source = "../../modules/garr_common_variables"
|
||||||
|
}
|
|
@ -0,0 +1,319 @@
|
||||||
|
#
|
||||||
|
# Octavia
|
||||||
|
#
|
||||||
|
# Swarm load balancer. L4, backed by Octavia
|
||||||
|
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
|
||||||
|
vip_subnet_id = var.main_private_subnet_id
|
||||||
|
name = var.octavia_swarm_data.swarm_lb_name
|
||||||
|
description = var.octavia_swarm_data.swarm_lb_description
|
||||||
|
flavor_id = var.octavia_swarm_data.octavia_flavor_id
|
||||||
|
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
|
||||||
|
# availability_zone = var.availability_zones_names.availability_zone_no_gpu
|
||||||
|
loadbalancer_provider = "amphora"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allocate a floating IP
|
||||||
|
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
|
||||||
|
pool = var.floating_ip_pools.main_public_ip_pool
|
||||||
|
# The DNS association does not work because of a bug in the OpenStack API
|
||||||
|
# dns_name = "main-lb"
|
||||||
|
# dns_domain = var.dns_zone.zone_name
|
||||||
|
description = var.octavia_swarm_data.swarm_lb_description
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
|
||||||
|
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
|
||||||
|
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
|
||||||
|
}
|
||||||
|
|
||||||
|
locals {
|
||||||
|
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
|
||||||
|
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
|
||||||
|
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
|
||||||
|
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
|
||||||
|
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_dns_recordset_v2" "swarm_lb_dns_recordset" {
|
||||||
|
zone_id = var.dns_zone_id
|
||||||
|
name = local.swarm_recordset_name
|
||||||
|
description = "Public IP address of the load balancer in front of Docker Swarm"
|
||||||
|
ttl = 8600
|
||||||
|
type = "A"
|
||||||
|
records = [openstack_networking_floatingip_v2.swarm_lb_ip.address]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_dns_recordset_v2" "swarm_portainer_dns_recordset" {
|
||||||
|
zone_id = var.dns_zone_id
|
||||||
|
name = local.portainer_recordset_name
|
||||||
|
description = "Portainer hostname"
|
||||||
|
ttl = 8600
|
||||||
|
type = "CNAME"
|
||||||
|
records = [local.swarm_recordset_name]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_dns_recordset_v2" "ccp_dns_recordset" {
|
||||||
|
zone_id = var.dns_zone_id
|
||||||
|
name = local.ccp_recordset_name
|
||||||
|
description = "CCP hostname"
|
||||||
|
ttl = 8600
|
||||||
|
type = "CNAME"
|
||||||
|
records = [local.swarm_recordset_name]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_dns_recordset_v2" "cdn_dns_recordset" {
|
||||||
|
zone_id = var.dns_zone_id
|
||||||
|
name = local.cdn_recordset_name
|
||||||
|
description = "CDN hostname"
|
||||||
|
ttl = 8600
|
||||||
|
type = "CNAME"
|
||||||
|
records = [local.swarm_recordset_name]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
|
||||||
|
zone_id = var.dns_zone_id
|
||||||
|
name = local.conductor_recordset_name
|
||||||
|
description = "Conductor hostname"
|
||||||
|
ttl = 8600
|
||||||
|
type = "CNAME"
|
||||||
|
records = [local.swarm_recordset_name]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main HAPROXY stats listener
|
||||||
|
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
|
||||||
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
|
protocol = "TCP"
|
||||||
|
protocol_port = 8880
|
||||||
|
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
|
||||||
|
name = "swarm_haproxy_stats_listener"
|
||||||
|
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
|
||||||
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
|
||||||
|
protocol = "TCP"
|
||||||
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
|
name = "swarm-haproxy-lb-stats"
|
||||||
|
description = "Pool for the stats of the main HAPROXY instances"
|
||||||
|
persistence {
|
||||||
|
type = "SOURCE_IP"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 1"
|
||||||
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
|
protocol_port = 8880
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 2"
|
||||||
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
|
protocol_port = 8880
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 3"
|
||||||
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
|
protocol_port = 8880
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
|
||||||
|
name = "swarm_haproxy_stats_monitor"
|
||||||
|
type = "TCP"
|
||||||
|
delay = 20
|
||||||
|
timeout = 5
|
||||||
|
max_retries = 3
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# HAPROXY HTTP
|
||||||
|
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
|
||||||
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
|
protocol = "TCP"
|
||||||
|
protocol_port = 80
|
||||||
|
description = "HTTP listener of the Docker Swarm HAPROXY instances"
|
||||||
|
name = "swarm_haproxy_http_listener"
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
|
||||||
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
|
||||||
|
protocol = "PROXYV2"
|
||||||
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
|
name = "swarm-haproxy-lb-http"
|
||||||
|
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
|
||||||
|
persistence {
|
||||||
|
type = "SOURCE_IP"
|
||||||
|
}
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 1"
|
||||||
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
|
protocol_port = 80
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 2"
|
||||||
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
|
protocol_port = 80
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 3"
|
||||||
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
|
protocol_port = 80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
|
||||||
|
name = "swarm_haproxy_http_monitor"
|
||||||
|
type = "HTTP"
|
||||||
|
http_method = "GET"
|
||||||
|
url_path = "/_haproxy_health_check"
|
||||||
|
expected_codes = "200"
|
||||||
|
delay = 20
|
||||||
|
timeout = 5
|
||||||
|
max_retries = 3
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# HAPROXY HTTPS
|
||||||
|
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
|
||||||
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
|
protocol = "TCP"
|
||||||
|
protocol_port = 443
|
||||||
|
description = "HTTPS listener of the main HAPROXY instances"
|
||||||
|
name = "swarm_haproxy_https_listener"
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
|
||||||
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
|
||||||
|
protocol = "PROXYV2"
|
||||||
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
|
name = "swarm-haproxy-lb-https"
|
||||||
|
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
|
||||||
|
persistence {
|
||||||
|
type = "SOURCE_IP"
|
||||||
|
}
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 1"
|
||||||
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
|
protocol_port = 443
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 2"
|
||||||
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
|
protocol_port = 443
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 3"
|
||||||
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
|
protocol_port = 443
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
|
||||||
|
name = "swarm_haproxy_https_monitor"
|
||||||
|
type = "HTTPS"
|
||||||
|
http_method = "GET"
|
||||||
|
url_path = "/_haproxy_health_check"
|
||||||
|
expected_codes = "200"
|
||||||
|
delay = 20
|
||||||
|
timeout = 5
|
||||||
|
max_retries = 3
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# HAPROXY HTTP on port 8080
|
||||||
|
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
|
||||||
|
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
|
||||||
|
protocol = "TCP"
|
||||||
|
protocol_port = 8080
|
||||||
|
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
||||||
|
name = "swarm_haproxy_8080_listener"
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
|
||||||
|
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
|
||||||
|
protocol = "PROXYV2"
|
||||||
|
lb_method = "LEAST_CONNECTIONS"
|
||||||
|
name = "swarm-haproxy-lb-http-8080"
|
||||||
|
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
|
||||||
|
persistence {
|
||||||
|
type = "SOURCE_IP"
|
||||||
|
}
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 1"
|
||||||
|
address = var.docker_swarm_data.mgr1_ip
|
||||||
|
protocol_port = 8080
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 2"
|
||||||
|
address = var.docker_swarm_data.mgr2_ip
|
||||||
|
protocol_port = 8080
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
name = "swarm mgr haproxy 3"
|
||||||
|
address = var.docker_swarm_data.mgr3_ip
|
||||||
|
protocol_port = 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
|
||||||
|
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
|
||||||
|
name = "swarm_haproxy_8080_monitor"
|
||||||
|
type = "HTTP"
|
||||||
|
http_method = "GET"
|
||||||
|
url_path = "/_haproxy_health_check"
|
||||||
|
expected_codes = "200"
|
||||||
|
delay = 20
|
||||||
|
timeout = 5
|
||||||
|
max_retries = 3
|
||||||
|
admin_state_up = true
|
||||||
|
}
|
||||||
|
|
||||||
|
output "swarm_loadbalancer_ip" {
|
||||||
|
description = "Docker Swarm Load balancer IP address"
|
||||||
|
value = openstack_lb_loadbalancer_v2.swarm_lb.vip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
output "swarm_manager_nodes" {
|
||||||
|
description = "Docker Swarm Manager nodes data"
|
||||||
|
value = openstack_compute_instance_v2.docker_swarm_managers
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
output "swarm_worker_nodes" {
|
||||||
|
description = "Docker Swarm Worker nodes data"
|
||||||
|
value = openstack_compute_instance_v2.docker_swarm_workers
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
output "swarm_managers_nfs_ip_ports" {
|
||||||
|
description = "IP addresses in the share NFS network"
|
||||||
|
value = openstack_networking_port_v2.swarm_mgr_nfs_port
|
||||||
|
}
|
||||||
|
output "swarm_workers_nfs_ip_ports" {
|
||||||
|
description = "IP addresses in the share NFS network"
|
||||||
|
value = openstack_networking_port_v2.swarm_workers_nfs_port
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
output "scilake_instance" {
|
||||||
|
value = openstack_compute_instance_v2.scilake_intelcomp_server
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
output "scilake_floating_ip" {
|
||||||
|
value = openstack_networking_floatingip_v2.scilake_catalogue_server_ip
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
provider "openstack" {
|
||||||
|
cloud = "garr-ct1"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,151 @@
|
||||||
|
#
|
||||||
|
# Server groups for both the controllers and the workers
|
||||||
|
#
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_controllers" {
|
||||||
|
name = "k8s_controllers"
|
||||||
|
policies = ["soft-anti-affinity"]
|
||||||
|
}
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_workers" {
|
||||||
|
name = "k8s_workers"
|
||||||
|
policies = ["soft-anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Allow traffic between the cluster nodes
|
||||||
|
#
|
||||||
|
resource "openstack_networking_secgroup_v2" "k8s_internal_traffic" {
|
||||||
|
name = "k8s_internal_traffic"
|
||||||
|
delete_default_rules = "true"
|
||||||
|
description = "Traffic between the Docker k8s nodes"
|
||||||
|
}
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.k8s_internal_traffic.id
|
||||||
|
description = "UDP traffic between k8s nodes"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "udp"
|
||||||
|
remote_ip_prefix = data.terraform_remote_state.privnet_dns_router.outputs.main_private_subnet.cidr
|
||||||
|
}
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.k8s_internal_traffic.id
|
||||||
|
description = "TCP traffic between k8s nodes"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
remote_ip_prefix = data.terraform_remote_state.privnet_dns_router.outputs.main_private_subnet.cidr
|
||||||
|
}
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "igmp_ingress_between_k8s_nodes" {
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.k8s_internal_traffic.id
|
||||||
|
description = "Ingress IGMP traffic between k8s nodes"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "igmp"
|
||||||
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "igmp_egress_between_k8s_nodes" {
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.k8s_internal_traffic.id
|
||||||
|
description = "Egress IGMP traffic between k8s nodes"
|
||||||
|
direction = "egress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "igmp"
|
||||||
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Controllers VMs
|
||||||
|
#
|
||||||
|
resource "openstack_compute_instance_v2" "docker_k8s_controllers" {
|
||||||
|
count = var.docker_k8s_data.mgr_count
|
||||||
|
name = format("%s-%02d", var.docker_k8s_data.mgr_name, count.index + 1)
|
||||||
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
|
flavor_name = var.docker_k8s_data.mgr_flavor
|
||||||
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
|
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.k8s_internal_traffic.name,"default","nfs_share_no_ingress"]
|
||||||
|
scheduler_hints {
|
||||||
|
group = openstack_compute_servergroup_v2.k8s_masters.id
|
||||||
|
}
|
||||||
|
block_device {
|
||||||
|
uuid = var.ubuntu_2204.uuid
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = 10
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
|
||||||
|
block_device {
|
||||||
|
source_type = "blank"
|
||||||
|
volume_size = var.docker_k8s_data.mgr_data_disk_size
|
||||||
|
boot_index = -1
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = var.main_private_network.name
|
||||||
|
fixed_ip_v4 = var.k8s_controllers_ip.* [count.index]
|
||||||
|
}
|
||||||
|
network {
|
||||||
|
name = var.k8s_nfs_private_network.network_name
|
||||||
|
}
|
||||||
|
|
||||||
|
user_data = file("${var.ubuntu2204_data_file}")
|
||||||
|
depends_on = [openstack_networking_subnet_v2.k8s_nfs_subnet]
|
||||||
|
# Do not replace the instance when the ssh key changes
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
# Ignore changes to tags, e.g. because a management agent
|
||||||
|
# updates these based on some ruleset managed elsewhere.
|
||||||
|
key_pair, user_data, network
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# k8s worker nodes
|
||||||
|
resource "openstack_compute_instance_v2" "docker_k8s_workers" {
|
||||||
|
count = var.docker_k8s_data.worker_count
|
||||||
|
name = format("%s-%02d", var.docker_k8s_data.worker_name, count.index + 1)
|
||||||
|
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||||
|
flavor_name = var.docker_k8s_data.worker_flavor
|
||||||
|
key_pair = module.ssh_settings.ssh_key_name
|
||||||
|
security_groups = ["default", var.default_security_group_name, openstack_networking_secgroup_v2.k8s_internal_traffic.name,"nfs_share_no_ingress"]
|
||||||
|
scheduler_hints {
|
||||||
|
group = openstack_compute_servergroup_v2.k8s_workers.id
|
||||||
|
}
|
||||||
|
block_device {
|
||||||
|
uuid = var.ubuntu_2204.uuid
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = 10
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
|
||||||
|
block_device {
|
||||||
|
source_type = "blank"
|
||||||
|
volume_size = var.docker_k8s_data.worker_data_disk_size
|
||||||
|
boot_index = -1
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = var.main_private_network.name
|
||||||
|
}
|
||||||
|
network {
|
||||||
|
name = var.k8s_nfs_private_network.network_name
|
||||||
|
}
|
||||||
|
network {
|
||||||
|
name = var.networks_list.shared_postgresql
|
||||||
|
}
|
||||||
|
|
||||||
|
user_data = file("${module.common_variables.ubuntu2204_data_file}")
|
||||||
|
# Do not replace the instance when the ssh key changes
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
# Ignore changes to tags, e.g. because a management agent
|
||||||
|
# updates these based on some ruleset managed elsewhere.
|
||||||
|
key_pair, user_data, network
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -116,7 +116,7 @@ variable "shared_postgresql_server_data" {
|
||||||
type = map(string)
|
type = map(string)
|
||||||
default = {
|
default = {
|
||||||
name = "shared-postgresql-server"
|
name = "shared-postgresql-server"
|
||||||
flavor = "m1.medium"
|
flavor = "m1.large"
|
||||||
vol_data_name = "shared-postgresql-data"
|
vol_data_name = "shared-postgresql-data"
|
||||||
vol_data_size = "100"
|
vol_data_size = "100"
|
||||||
vol_data_device = "/dev/vdb"
|
vol_data_device = "/dev/vdb"
|
||||||
|
|
|
@ -4,7 +4,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
version = "~> 1.53.0"
|
version = ">= 1.54.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ required_version = ">= 0.14.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
version = "~> 1.53.0"
|
version = ">= 1.53.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ required_version = ">= 0.14.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
openstack = {
|
openstack = {
|
||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
version = "~> 1.53.0"
|
version = ">= 1.54.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue