Compare commits

...

10 Commits

103 changed files with 2091 additions and 5318 deletions

View File

@ -1,373 +0,0 @@
#
# This is the security group that should be added to every instance
resource "openstack_networking_secgroup_v2" "default" {
name = var.default_security_group_name
delete_default_rules = "true"
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
}
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
security_group_id = openstack_networking_secgroup_v2.default.id
direction = "egress"
ethertype = "IPv4"
}
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow ICMP from remote"
direction = "ingress"
ethertype = "IPv4"
remote_ip_prefix = "0.0.0.0/0"
protocol = "icmp"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-jump-proxy" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "SSH traffic from the jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Prometheus access to the node exporter"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 9100
port_range_max = 9100
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
}
#
# SSH access to the jump proxy. Used by the jump proxy VM only
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
name = "ssh_access_to_the_jump_node"
delete_default_rules = "true"
description = "Security group that allows SSH access to the jump node from a limited set of sources"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-shell-d4s" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-infrascience-net" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from the InfraScience network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.infrascience_net_cidr
}
# Debug via tunnel from the jump proxy node
resource "openstack_networking_secgroup_v2" "debugging" {
name = "debugging_from_jump_node"
delete_default_rules = "true"
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
}
resource "openstack_networking_secgroup_rule_v2" "shell_8100" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "Tomcat debug on port 8100 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8100
port_range_max = 8100
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_80" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "http debug port 80 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_443" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "https debug port 443 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
# Traffic from the main HAPROXY load balancers
# Use on the web services that are exposed through the main HAPROXY
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
name = "traffic_from_the_main_load_balancers"
delete_default_rules = "true"
description = "Allow traffic from the main L7 HAPROXY load balancers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
# Security group that exposes web services directly. A floating IP is required.
resource "openstack_networking_secgroup_v2" "public_web" {
name = "public_web_service"
delete_default_rules = "true"
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
}
resource "openstack_networking_secgroup_rule_v2" "public_http" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "public_https" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTPS from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = "0.0.0.0/0"
}
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
resource "openstack_networking_secgroup_v2" "restricted_web" {
name = "restricted_web_service"
delete_default_rules = "true"
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
}
resource "openstack_networking_secgroup_rule_v2" "http_from_everywhere" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
name = "prometheus_access_from_grafana"
delete_default_rules = "true"
description = "The public grafana server must be able to get data from Prometheus"
}
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
description = "Allow HTTPS from grafana.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.prometheus_server_data.public_grafana_server_cidr
}

View File

@ -1,186 +0,0 @@
# Main load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "main_lb" {
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_information.main_lb_name
description = var.octavia_information.main_lb_description
flavor_id = var.octavia_information.octavia_flavor_id
vip_address = var.basic_services_ip.octavia_main
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_information.main_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
}
locals {
recordset_name = "${var.octavia_information.main_lb_hostname}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
zone_id = var.dns_zone_id
name = local.recordset_name
description = "Public IP address of the main load balancer"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.main_lb_ip.address]
}
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the main HAPROXY instances"
name = "main_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr,var.ssh_sources.d4s_vpn_2_cidr,var.ssh_sources.s2i2s_vpn_1_cidr,var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 8880
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
name = "main_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTP
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the main HAPROXY instances"
name = "main_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-http"
description = "Pool for the HTTP listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 80
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
name = "main_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTPS
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "main_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-https"
description = "Pool for the HTTPS listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 443
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
name = "main_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "main_loadbalancer_ip" {
description = "Main Load balancer IP address"
value = openstack_lb_loadbalancer_v2.main_lb.vip_address
}

View File

@ -1,47 +0,0 @@
# VM used as jump proxy. A floating IP is required
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
name = var.ssh_jump_proxy.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.ssh_jump_proxy.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 30
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ssh_jump
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "SSH Proxy Jump Server"
}
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
floating_ip = openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address
instance_id = openstack_compute_instance_v2.ssh_jump_proxy.id
}
locals {
ssh_recordset_name = "${var.ssh_jump_proxy.name}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
zone_id = var.dns_zone_id
name = local.ssh_recordset_name
description = "Public IP address of the SSH Proxy Jump server"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address]
}

View File

@ -1,21 +0,0 @@
resource "openstack_compute_instance_v2" "internal_ca" {
name = var.internal_ca_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.internal_ca_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ca
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
}

View File

@ -1,68 +0,0 @@
# Promertheus server. A floating IP is required
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
name = var.prometheus_server_data.vol_data_name
size = var.prometheus_server_data.vol_data_size
}
resource "openstack_compute_instance_v2" "prometheus_server" {
name = var.prometheus_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.prometheus_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.restricted_web.name,openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.prometheus
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
}
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
instance_id = openstack_compute_instance_v2.prometheus_server.id
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
device = var.prometheus_server_data.vol_data_device
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "Prometheus server"
}
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
floating_ip = openstack_networking_floatingip_v2.prometheus_server_ip.address
instance_id = openstack_compute_instance_v2.prometheus_server.id
}
locals {
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
zone_id = var.dns_zone_id
name = local.prometheus_recordset_name
description = "Public IP address of the Prometheus server"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.prometheus_server_ip.address]
}
resource "openstack_dns_recordset_v2" "alertmanager_server_recordset" {
zone_id = var.dns_zone_id
name = local.alertmanager_recordset_name
description = "Prometheus alertmanager"
ttl = 8600
type = "CNAME"
records = [local.prometheus_recordset_name]
}

View File

@ -1,87 +0,0 @@
# PostgreSQL shared server
# Network
resource "openstack_networking_network_v2" "shared_postgresql_net" {
name = var.shared_postgresql_server_data.network_name
admin_state_up = "true"
external = "false"
description = var.shared_postgresql_server_data.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
port_security_enabled = true
shared = false
region = var.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
name = "shared-postgresql-subnet"
description = "subnet used to connect to the shared PostgreSQL service"
network_id = openstack_networking_network_v2.shared_postgresql_net.id
cidr = var.shared_postgresql_server_data.network_cidr
dns_nameservers = var.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.shared_postgresql_server_data.allocation_pool_start
end = var.shared_postgresql_server_data.allocation_pool_end
}
}
# Security group
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
name = "access_to_the_shared_postgresql_service"
delete_default_rules = "true"
description = "Access the shared PostgreSQL service using the dedicated network"
}
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 5432
port_range_max = 5432
remote_ip_prefix = var.shared_postgresql_server_data.network_cidr
}
# Block device
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
name = var.shared_postgresql_server_data.vol_data_name
size = var.shared_postgresql_server_data.vol_data_size
}
# Instance
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
name = var.shared_postgresql_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.shared_postgresql_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.shared_postgresql_access.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.shared_postgresql_server_data.network_name
fixed_ip_v4 = var.shared_postgresql_server_data.server_ip
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
}
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
device = var.shared_postgresql_server_data.vol_data_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
}

View File

@ -1,138 +0,0 @@
#
# HAPROXY L7 behind the main Octavia balancer
#
# FIXME: terraform does not return the Octavia VRRP addresses, so we have to find them before creating the security group that allows the traffic between octavia and the haproxy instances
#
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# 5cc2354e-4465-4a1d-8390-c214e208c6de octavia-lb-vrrp-72392023-a774-4b58-a025-c1e99c5d152a fa:16:3e:62:24:2c [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.34.232'}] ACTIVE
# 8aa4e97f-723d-4a2a-b79f-912fa7651653 octavia-lb-vrrp-fbfcf712-0ceb-4a38-82da-0c9ebef5dff3 fa:16:3e:79:62:a5 [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.33.229'}] ACTIVE
#
# Server group
#
resource "openstack_compute_servergroup_v2" "main_haproxy_l7" {
name = "main_haproxy_l7"
policies = ["anti-affinity"]
}
# Security group
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
name = "traffic_from_main_lb_to_haproxy_l7"
delete_default_rules = "true"
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 1 to l7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 2 to l7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_8080" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_8080" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
# Instance
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
count = var.haproxy_l7_data.vm_count
name = format("%s-%02d", var.haproxy_l7_data.name, count.index+1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.haproxy_l7_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
}
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.main_haproxy_l7_ip.*[count.index]
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
}

View File

@ -1 +0,0 @@
../../common_setups/15-security-groups.tf

View File

@ -1 +0,0 @@
../../common_setups/20-octavia.tf

View File

@ -1 +0,0 @@
../../common_setups/25-ssh-jump-proxy.tf

View File

@ -1 +0,0 @@
../../common_setups/30-internal-ca.tf

View File

@ -1 +0,0 @@
../../common_setups/35-prometheus.tf

View File

@ -1 +0,0 @@
../../common_setups/40-postgresql.tf

View File

@ -1 +0,0 @@
../../common_setups/45-haproxy.tf

View File

@ -1 +0,0 @@
../../modules/docker_swarm/docker-swarm.tf

View File

@ -1,3 +1,5 @@
echo "Do not use"
# Define required providers
terraform {
required_version = ">= 0.14.0"
@ -17,10 +19,7 @@ data "terraform_remote_state" "privnet_dns_router" {
}
}
# module "variables" {
# source = "../variables"
# }
module "d4science_infra_setup" {
source = "../../modules/d4science_infra_setup"
}
# module "basic_setup" {
# source = "../../modules/basic_setup"
# }

View File

@ -1 +0,0 @@
../../modules/docker_swarm/swarm-variables.tf

View File

@ -1 +0,0 @@
../variables/variables-dev.tf

View File

@ -1 +0,0 @@
../../modules/common_variables/variables.tf

View File

@ -1 +0,0 @@
../../modules/common_variables/variables.tf

View File

@ -1,3 +1,28 @@
#The Geoportal Instance
variable "smartgears_service_instances_map" {
type = map(object({
name = string
description = string
flavor = string
networks = list(string)
security_groups = list(string)
block_device_uuid = string
}))
default = {
# Geoportal instance
geoportal_service = {
name = "geoportal-cms",
description = "The Geoportal instance",
flavor = "m1_large",
networks = ["d4s-dev-cloud-main", "postgresql-srv-net"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
}
}
}
#Default smartgears_service is EMPTY. Override it to create a proper smartegears plan
@ -20,42 +45,40 @@
# }
variable "smartgears_service_instances_map" {
type = map(object({
name = string
description = string
flavor = string
networks = list(string)
security_groups = list(string)
block_device_uuid = string
}))
default = {
geoportal_service = {
name = "geoportal-cms",
description = "The Geoportal instance",
flavor = "m1_large",
networks = ["d4s-dev-cloud-main", "postgresql-srv-net"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
},
# URI-Resolver instance 1
uri_resolver_service_i1 = {
name = "data",
description = "The data instance",
flavor = "m1.medium",
networks = ["d4s-dev-cloud-main"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
},
# URI-Resolver instance 2
uri_resolver_service_i2 = {
name = "data1",
description = "The data1 instance",
flavor = "m1.medium",
networks = ["d4s-dev-cloud-main"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
}
}
# locals {
# smartgears_service_instances_map = {
# geoportal_service = {
# name = "geoportal-cms",
# description = "The Geoportal instance",
# flavor = "${var.flavor_list.m1_large}",
# networks = ["d4s-dev-cloud-main", "postgresql-srv-net"],
# security_groups = ["default", "http and https from the load balancers"]
# block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
# },
# # URI-Resolver instance 1
# uri_resolver_service_i1 = {
# name = "data",
# description = "The data instance",
# flavor = "m1.medium",
# networks = ["d4s-dev-cloud-main"],
# security_groups = ["default", "http and https from the load balancers"]
# block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
# }
# }
# }
# variable "smartgears_service_instances_map" {
# type = map(object({
# name = string
# description = string
# flavor = string
# networks = list(string)
# security_groups = list(string)
# block_device_uuid = string
# }))
# default = local.smartgears_service_instances_map
# }
}

View File

@ -0,0 +1,5 @@
orientdb_nodes_count = 3
orientdb_node_flavor = "m1.medium"
orientdb_se_node_flavor = "m1.medium"
orientdb_se_ip = "192.168.12.4"
orientdb_se_cidr = "192.168.12.4/32"

View File

@ -0,0 +1 @@
../variables/dev.auto.tfvars

View File

@ -1,6 +1,6 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
@ -17,10 +17,80 @@ data "terraform_remote_state" "privnet_dns_router" {
}
}
# module "variables" {
# source = "../variables"
# data "terraform_remote_state" "basic_infrastructure" {
# backend = "local"
# config = {
# path = "../basic-infrastructure/terraform.tfstate"
# }
# }
# module "orientdb" {
# source = "../../modules/orientdb"
# os_project_data = {
# id = "e8f8ca72f30648a8b389b4e745ac83a9"
# }
# dns_zone = {
# zone_name = "cloud-dev.d4science.org."
# email = "postmaster@isti.cnr.it"
# description = "DNS primary zone for the d4s-dev-cloud project"
# ttl = 8600
# }
# dns_zone_id = "cbae638a-9d99-44aa-946c-0f5ffb7fc488"
# default_security_group_name = "default"
# main_private_network = {
# name = "d4s-dev-cloud-main"
# description = "D4Science DEV private network (use this as the main network)"
# }
# main_private_subnet = {
# name = "d4s-dev-cloud-sub"
# description = "D4Science DEV main private subnet"
# cidr = "10.1.28.0/22"
# gateway_ip = "10.1.28.1"
# allocation_start = "10.1.28.30"
# allocation_end = "10.1.31.254"
# }
# external_router = {
# name = "d4s-dev-cloud-external-router"
# description = "D4Science DEV main router"
# id = "2ae28c5f-036b-45db-bc9f-5bab8fa3e914"
# }
# main_haproxy_l7_ip = ["10.1.28.50", "10.1.30.241"]
# octavia_information = {
# main_lb_name = "lb-dev-l4"
# main_lb_description = "Main L4 load balancer for the D4Science DEV"
# octavia_flavor = "octavia_amphora-mvcpu-ha"
# octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
# main_lb_hostname = "main-lb"
# }
# basic_services_ip = {
# ca = "10.1.29.247"
# ca_cidr = "10.1.29.247/32"
# ssh_jump = "10.1.29.164"
# ssh_jump_cidr = "10.1.29.164/32"
# prometheus = "10.1.30.129"
# prometheus_cidr = "10.1.30.129/32"
# haproxy_l7_1 = "10.1.28.50"
# haproxy_l7_1_cidr = "10.1.28.50/32"
# haproxy_l7_2 = "10.1.30.241"
# haproxy_l7_2_cidr = "10.1.30.241/32"
# octavia_main = "10.1.28.227"
# octavia_main_cidr = "10.1.28.227/32"
# }
# orientdb_nodes_count = 3
# orientdb_node_flavor = "m1.medium"
# orientdb_se_node_flavor = "m1.medium"
# orientdb_se_ip = "192.168.12.4"
# orientdb_se_cidr = "192.168.12.4/32"
# }

View File

@ -1,7 +0,0 @@
orientdb_nodes_count = 3
orientdb_node_flavor = "m1.medium"
orientdb_se_node_flavor = "m1.medium"
orientdb_se_ip = "192.168.12.4"
orientdb_se_cidr = "192.168.12.4/32"
default_security_group_name = "default"

View File

@ -0,0 +1 @@
../../modules/common_variables/outputs.tf

View File

@ -1,4 +1,4 @@
provider "openstack" {
cloud = "d4s-dev"
cloud = "d4s-dev"
}

View File

@ -1,7 +1,7 @@
{
"version": 4,
"terraform_version": "1.6.4",
"serial": 44,
"serial": 58,
"lineage": "7607c85c-02c0-0227-fd2b-4958c821fe57",
"outputs": {},
"resources": [
@ -138,7 +138,7 @@
"default"
],
"stop_before_destroy": false,
"tags": null,
"tags": [],
"timeouts": null,
"updated": "2023-11-23 10:48:27 +0000 UTC",
"user_data": "bb83b25fd1219aa1b850ece9be8d7b0f31714608",
@ -588,7 +588,7 @@
"region": "isti_area_pi_1",
"segments": [],
"shared": false,
"tags": null,
"tags": [],
"tenant_id": "e8f8ca72f30648a8b389b4e745ac83a9",
"timeouts": null,
"transparent_vlan": false,
@ -672,9 +672,9 @@
"description": "TCP traffic from the load balancers",
"direction": "ingress",
"ethertype": "IPv4",
"id": "6799246f-a17b-4e46-b2fe-8e4e5e21a3de",
"port_range_max": 2424,
"port_range_min": 2424,
"id": "b4cdae7a-0771-4dbf-b6c7-7513eb904acf",
"port_range_max": 2480,
"port_range_min": 2480,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
@ -696,9 +696,9 @@
"description": "TCP traffic from the load balancers",
"direction": "ingress",
"ethertype": "IPv4",
"id": "f2729899-f411-4b13-9c8f-12eb81a9f1e9",
"port_range_max": 2424,
"port_range_min": 2424,
"id": "6e78f200-1ed3-4d70-b803-100ca0f41d0d",
"port_range_max": 2480,
"port_range_min": 2480,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
@ -725,10 +725,10 @@
"index_key": "10.1.29.164/32",
"schema_version": 0,
"attributes": {
"description": "TCP traffic from the resource registries and the SSH jump server",
"description": "TCP traffic from the smart executors and the SSH jump server",
"direction": "ingress",
"ethertype": "IPv4",
"id": "6fd47297-3621-4827-8d19-4cca9db46a05",
"id": "38ffd61d-6ec8-413f-a61a-c95b68687f6c",
"port_range_max": 2490,
"port_range_min": 2424,
"protocol": "tcp",
@ -751,10 +751,10 @@
"index_key": "192.168.12.0/24",
"schema_version": 0,
"attributes": {
"description": "TCP traffic from the resource registries and the SSH jump server",
"description": "TCP traffic from the smart executors and the SSH jump server",
"direction": "ingress",
"ethertype": "IPv4",
"id": "f1c1e536-df8f-4da7-82b9-a4d564ed6744",
"id": "e4f06118-5298-492a-853e-6a45aa97ad13",
"port_range_max": 2490,
"port_range_min": 2424,
"protocol": "tcp",
@ -788,9 +788,9 @@
"description": "TCP traffic from the load balancers",
"direction": "ingress",
"ethertype": "IPv4",
"id": "8180389a-0741-4a6c-9625-0f2ee1e7770f",
"port_range_max": 2424,
"port_range_min": 2424,
"id": "ad34990c-1b8e-4aec-984d-4d68d5dff075",
"port_range_max": 2480,
"port_range_min": 2480,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
@ -812,9 +812,9 @@
"description": "TCP traffic from the load balancers",
"direction": "ingress",
"ethertype": "IPv4",
"id": "a0486ca2-7bf4-471f-9f4b-1b9964eb9dc2",
"port_range_max": 2424,
"port_range_min": 2424,
"id": "0a79ae44-7ff9-47ac-8b07-936e399aa227",
"port_range_max": 2480,
"port_range_min": 2480,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
@ -946,7 +946,7 @@
"attributes": {
"all_tags": [],
"delete_default_rules": true,
"description": "Clients that talk to the OrientDB service",
"description": "Clients that talk to the OrientDB SE service",
"id": "f72e0d63-949e-47b7-95fa-69cc7ff2415a",
"name": "access_to_orientdb_se",
"region": "isti_area_pi_1",
@ -1026,7 +1026,7 @@
"region": "isti_area_pi_1",
"service_types": [],
"subnetpool_id": "",
"tags": null,
"tags": [],
"tenant_id": "e8f8ca72f30648a8b389b4e745ac83a9",
"timeouts": null,
"value_specs": null

View File

@ -1 +0,0 @@
../variables/variables-dev.tf

View File

@ -1,3 +1,4 @@
echo "Do not use."
# Define required providers
terraform {
required_version = ">= 0.14.0"

View File

@ -0,0 +1 @@
../variables/dev.auto.tfvars

View File

@ -1,7 +1,7 @@
{
"version": 4,
"terraform_version": "1.6.4",
"serial": 6,
"serial": 9,
"lineage": "ae8eda4c-51c3-13f0-219b-df8ea9af7818",
"outputs": {},
"resources": [
@ -124,7 +124,7 @@
"default"
],
"stop_before_destroy": false,
"tags": null,
"tags": [],
"timeouts": null,
"updated": "2023-11-23 11:32:53 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
@ -145,7 +145,7 @@
{
"schema_version": 0,
"attributes": {
"access_ip_v4": "10.1.28.203",
"access_ip_v4": "10.1.30.54",
"access_ip_v6": "",
"admin_pass": null,
"all_metadata": {},
@ -168,12 +168,12 @@
}
],
"config_drive": null,
"created": "2023-11-23 11:31:58 +0000 UTC",
"created": "2023-12-01 16:16:53 +0000 UTC",
"flavor_id": "4",
"flavor_name": "m1.medium",
"floating_ip": null,
"force_delete": false,
"id": "deb96f10-afba-414b-bab7-baf0e28fd8c8",
"id": "cf62340b-33dc-4bbb-b20a-712766acd9c7",
"image_id": "Attempt to boot from volume - no image supplied",
"image_name": null,
"key_pair": "adellam",
@ -182,33 +182,43 @@
"network": [
{
"access_network": false,
"fixed_ip_v4": "10.1.28.203",
"fixed_ip_v4": "10.1.30.54",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:53:4f:b2",
"mac": "fa:16:3e:b8:9f:a0",
"name": "d4s-dev-cloud-main",
"port": "",
"uuid": "e0af5eba-f24a-4d0d-8184-bc654b980c4a"
},
{
"access_network": false,
"fixed_ip_v4": "192.168.12.67",
"fixed_ip_v4": "192.168.12.226",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:14:25:f8",
"mac": "fa:16:3e:16:42:de",
"name": "orientdb-se-net",
"port": "",
"uuid": "f3123ccc-f4f9-4b82-95eb-bcd714ad38e6"
},
{
"access_network": false,
"fixed_ip_v4": "192.168.2.113",
"fixed_ip_v4": "192.168.2.95",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:a2:2b:5a",
"mac": "fa:16:3e:8b:a6:5e",
"name": "postgresql-srv-net",
"port": "",
"uuid": "00422a4a-4b8b-4c85-acf9-ef733df842b9"
},
{
"access_network": false,
"fixed_ip_v4": "192.168.11.27",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:1b:0f:2d",
"name": "timescaledb-net",
"port": "",
"uuid": "ec57aa06-17c8-4475-b4c2-3783f3c7ec7a"
}
],
"network_mode": null,
@ -222,7 +232,7 @@
"stop_before_destroy": false,
"tags": null,
"timeouts": null,
"updated": "2023-11-23 11:32:45 +0000 UTC",
"updated": "2023-12-01 16:17:28 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
"vendor_options": [],
"volume": []
@ -306,7 +316,7 @@
"default"
],
"stop_before_destroy": false,
"tags": null,
"tags": [],
"timeouts": null,
"updated": "2023-11-23 11:32:48 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
@ -392,7 +402,7 @@
"default"
],
"stop_before_destroy": false,
"tags": null,
"tags": [],
"timeouts": null,
"updated": "2023-11-23 11:32:50 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
@ -478,7 +488,7 @@
"default"
],
"stop_before_destroy": false,
"tags": null,
"tags": [],
"timeouts": null,
"updated": "2023-11-23 11:32:44 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",

View File

@ -1 +0,0 @@
../variables/variables-dev.tf

View File

@ -0,0 +1,24 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/terraform-provider-openstack/openstack" {
version = "1.53.0"
constraints = "~> 1.53.0"
hashes = [
"h1:YLGvYkSuagyP5orUTyKNK+JhzS17EFTUDpZ5R5/fFv4=",
"zh:09da7ca98ffd3de7b9ce36c4c13446212a6e763ba1162be71b50f95d453cb68e",
"zh:14041bcbb87312411d88612056ed185650bfd01284b8ea0761ce8105a331708e",
"zh:35bf4c788fdbc17c8e40ebc7b33c7de4b45a2fa2efaa657b10f0e3bd37c9627f",
"zh:46ede8ef4cfa12d654c538afc1e1ec34a1f3e8eb4e986ee23dceae398b7176a6",
"zh:59675734990dab1e8d87997853ea75e8104bba730b3f5a7146ac735540c9d6bf",
"zh:6de52428849806498670e827b54810be7510a2a79449602c1aede4235a0ec036",
"zh:78b2a20601272afceffac8f8ca78a6b647b84196c0dd8dc710fae297f6be15a4",
"zh:7c41ed3a4fac09677e676ecf9f9edd1e38eef449e656cb01a848d2c799c6de8f",
"zh:852800228f4118a4aa6cfaa4468b851247cbed6f037fd204f08de69eb1edc149",
"zh:86d618e7f9a07d978b8bc4b190be350a00de64ec535f9c8f5dfe133542a55483",
"zh:963a9e72b66d8bcf43de9b14a674ae3ca3719ce2f829217f7a65b66fc3773397",
"zh:a8e72ab67795071bda61f99a6de3d2d40122fb51971768fd75e1324abe874ced",
"zh:ce1890cf3af17d569af3bc7673cec0a8f78e6f5d701767593f3d29c551f44848",
"zh:e6f1b96eb684f527a47f71923f268c86a36d7894751b31ee9e726d7502a639cd",
]
}

View File

@ -0,0 +1 @@
../../modules/generic_smartgears_service/generic_smartgears_service.tf

View File

@ -0,0 +1,33 @@
#The URI-Resolver instances
variable "smartgears_service_instances_map" {
type = map(object({
name = string
description = string
flavor = string
networks = list(string)
security_groups = list(string)
block_device_uuid = string
}))
default = {
# URI-Resolver instance 1
uri_resolver_service_i1 = {
name = "data",
description = "The data instance",
flavor = "m1.medium",
networks = ["d4s-dev-cloud-main"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
},
# URI-Resolver instance 2
uri_resolver_service_i2 = {
name = "data1",
description = "The data1 instance",
flavor = "m1.medium",
networks = ["d4s-dev-cloud-main"],
security_groups = ["default", "http and https from the load balancers"]
block_device_uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89" #ubuntu_18_04.uuid of DEV
}
}
}

View File

@ -1,6 +1,6 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
@ -17,6 +17,3 @@ data "terraform_remote_state" "privnet_dns_router" {
}
}
module "d4science_infra_setup" {
source = "../../modules/d4science_infra_setup"
}

View File

@ -0,0 +1,3 @@
provider "openstack" {
cloud = "d4s-dev"
}

View File

@ -0,0 +1,60 @@
#
os_project_data = {
id = "e8f8ca72f30648a8b389b4e745ac83a9"
}
dns_zone = {
zone_name = "cloud-dev.d4science.org."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the d4s-dev-cloud project"
ttl = 8600
}
dns_zone_id = "cbae638a-9d99-44aa-946c-0f5ffb7fc488"
default_security_group_name = "default"
main_private_network = {
name = "d4s-dev-cloud-main"
description = "D4Science DEV private network (use this as the main network)"
}
main_private_subnet = {
name = "d4s-dev-cloud-sub"
description = "D4Science DEV main private subnet"
cidr = "10.1.28.0/22"
gateway_ip = "10.1.28.1"
allocation_start = "10.1.28.30"
allocation_end = "10.1.31.254"
}
external_router = {
name = "d4s-dev-cloud-external-router"
description = "D4Science DEV main router"
id = "2ae28c5f-036b-45db-bc9f-5bab8fa3e914"
}
main_haproxy_l7_ip = ["10.1.28.50", "10.1.30.241"]
octavia_information = {
main_lb_name = "lb-dev-l4"
main_lb_description = "Main L4 load balancer for the D4Science DEV"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
}
basic_services_ip = {
ca = "10.1.29.247"
ca_cidr = "10.1.29.247/32"
ssh_jump = "10.1.29.164"
ssh_jump_cidr = "10.1.29.164/32"
prometheus = "10.1.30.129"
prometheus_cidr = "10.1.30.129/32"
haproxy_l7_1 = "10.1.28.50"
haproxy_l7_1_cidr = "10.1.28.50/32"
haproxy_l7_2 = "10.1.30.241"
haproxy_l7_2_cidr = "10.1.30.241/32"
octavia_main = "10.1.28.227"
octavia_main_cidr = "10.1.28.227/32"
}

View File

@ -0,0 +1,7 @@
output "os_project_data" {
value = var.os_project_data
}
output "main_haproxy_l7_ip" {
value = var.main_haproxy_l7_ip
}

View File

@ -87,59 +87,3 @@ variable "basic_services_ip" {
octavia_main_cidr = "10.1.28.227/32"
}
}
variable "orientdb_net" {
type = map(string)
default = {
network_name = "orientdb-net"
network_description = "Network used by the OrientDB cluster and to access the service"
network_cidr = "192.168.10.0/24"
allocation_pool_start = "192.168.10.11"
allocation_pool_end = "192.168.10.254"
}
}
variable "orientdb_se_net" {
type = map(string)
default = {
network_name = "orientdb-se-net"
network_description = "Network used by the OrientDB for Smart Executor"
network_cidr = "192.168.12.0/24"
allocation_pool_start = "192.168.12.11"
allocation_pool_end = "192.168.12.254"
}
}
variable "orientdb_se_secgroup" {
default = "access_to_orientdb_se"
}
variable "postgresql_secgroup" {
default = "PostgreSQL service"
}
#Added by Francesco
variable "security_group_list" {
type = map(string)
default = {
postgreSQL = "PostgreSQL service"
acaland = "acaland's dev machine"
haproxy = "HAPROXY L7"
access_to_orientdb = "access_to_orientdb"
dataminer-publish = "dataminer-publish"
docker_swarm_NFS = "Docker Swarm NFS"
public_HTTPS = "Public HTTPS"
haproxy = "HAPROXY L7"
orientdb_internal_docker_traffic = "orientdb_internal_docker_traffic"
limited_SSH_access = "Limited SSH access"
access_to_the_timescaledb_service = "access_to_the_timescaledb_service"
docker_swarm = "Docker Swarm"
http_and_https_from_the_load_balancers = "http and https from the load balancers"
limited_HTTPS_access = "Limited HTTPS access"
mongo = "mongo"
limited_SSH_access = "Limited SSH access"
default = "default"
cassandra = "Cassandra"
access_to_orientdb_se = "access_to_orientdb_se"
}
}

View File

@ -1,11 +0,0 @@
# Main services
* Load balancer as a service (openstack), L4.
> * Main HAPROXY load balancer
* Two VMs as HAPROXY L7 instances for the main services. The dataminers will be also served by this load balancer.
* A shell server, with floating IP address, that will be used as a proxy to reach all the other VMs.
* A internal CA service.
* A Prometheus instance.
* A PostgreSQL server instance, with a dedicated network

View File

@ -1,3 +0,0 @@
provider "openstack" {
cloud = "d4s-pre"
}

View File

@ -0,0 +1,24 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/terraform-provider-openstack/openstack" {
version = "1.53.0"
constraints = "~> 1.53.0"
hashes = [
"h1:ZSJPqrlaHQ3sj7wyJuPSG+NblFZbAA6Y0d3GjSJf3o8=",
"zh:09da7ca98ffd3de7b9ce36c4c13446212a6e763ba1162be71b50f95d453cb68e",
"zh:14041bcbb87312411d88612056ed185650bfd01284b8ea0761ce8105a331708e",
"zh:35bf4c788fdbc17c8e40ebc7b33c7de4b45a2fa2efaa657b10f0e3bd37c9627f",
"zh:46ede8ef4cfa12d654c538afc1e1ec34a1f3e8eb4e986ee23dceae398b7176a6",
"zh:59675734990dab1e8d87997853ea75e8104bba730b3f5a7146ac735540c9d6bf",
"zh:6de52428849806498670e827b54810be7510a2a79449602c1aede4235a0ec036",
"zh:78b2a20601272afceffac8f8ca78a6b647b84196c0dd8dc710fae297f6be15a4",
"zh:7c41ed3a4fac09677e676ecf9f9edd1e38eef449e656cb01a848d2c799c6de8f",
"zh:852800228f4118a4aa6cfaa4468b851247cbed6f037fd204f08de69eb1edc149",
"zh:86d618e7f9a07d978b8bc4b190be350a00de64ec535f9c8f5dfe133542a55483",
"zh:963a9e72b66d8bcf43de9b14a674ae3ca3719ce2f829217f7a65b66fc3773397",
"zh:a8e72ab67795071bda61f99a6de3d2d40122fb51971768fd75e1324abe874ced",
"zh:ce1890cf3af17d569af3bc7673cec0a8f78e6f5d701767593f3d29c551f44848",
"zh:e6f1b96eb684f527a47f71923f268c86a36d7894751b31ee9e726d7502a639cd",
]
}

View File

@ -0,0 +1,88 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
data "terraform_remote_state" "privnet_dns_router" {
backend = "local"
config = {
path = "../project-setup/terraform.tfstate"
}
}
module "liferay" {
source = "../../modules/liferay"
default_security_group_name = "default_for_all"
# Provided in the output of the project setup
main_private_network_id = "23fd8a99-d551-4ada-8d3a-9859542ebb8c"
main_private_subnet_id = "cd77a2fd-4a36-4254-b1d0-70b3874c6d04"
dns_zone_id = "c1a4b4bc-f167-4387-855d-38f0f99ca05c"
os_project_data = {
id = "6fdc02e2827b405dad99f34698659742"
}
dns_zone = {
zone_name = "cloud-pre.d4science.org."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the d4s-pre-cloud project"
ttl = 8600
id = "c1a4b4bc-f167-4387-855d-38f0f99ca05c"
}
main_private_network = {
name = "d4s-pre-cloud-main"
description = "D4Science Preprod private network (use this as the main network)"
}
main_private_subnet = {
name = "d4s-pre-cloud-main-subnet"
description = "D4Science Preprod main private subnet"
cidr = "10.1.32.0/22"
gateway_ip = "10.1.32.1"
allocation_start = "10.1.32.100"
allocation_end = "10.1.35.254"
}
external_router = {
name = "d4s-pre-cloud-external-router"
description = "D4Science Preprod main router"
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
}
basic_services_ip = {
ca = "10.1.32.4"
ca_cidr = "10.1.32.4/32"
ssh_jump = "10.1.32.5"
ssh_jump_cidr = "10.1.32.5/32"
prometheus = "10.1.32.10"
prometheus_cidr = "10.1.32.10/32"
haproxy_l7_1 = "10.1.32.11"
haproxy_l7_1_cidr = "10.1.32.11/32"
haproxy_l7_2 = "10.1.32.12"
haproxy_l7_2_cidr = "10.1.32.12/32"
octavia_main = "10.1.32.20"
octavia_main_cidr = "10.1.32.20/32"
}
main_haproxy_l7_ip = ["10.1.32.11", "10.1.32.12"]
liferay_data = {
affinity_policy = "soft-anti-affinity"
srv_name = "lr62"
vm_count = 2
vm_flavor = "m1.large"
boot_vol_size = 30
}
liferay_ip_addrs = ["10.1.32.24", "10.1.32.25"]
}

View File

@ -0,0 +1,3 @@
provider "openstack" {
cloud = "d4s-pre"
}

View File

@ -0,0 +1,443 @@
{
"version": 4,
"terraform_version": "1.6.4",
"serial": 16,
"lineage": "2cef4407-f7f5-0a46-74de-03956dd178ed",
"outputs": {},
"resources": [
{
"mode": "data",
"type": "terraform_remote_state",
"name": "privnet_dns_router",
"provider": "provider[\"terraform.io/builtin/terraform\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"backend": "local",
"config": {
"value": {
"path": "../project-setup/terraform.tfstate"
},
"type": [
"object",
{
"path": "string"
}
]
},
"defaults": null,
"outputs": {
"value": {
"almalinux9_img": {
"name": "AlmaLinux-9.0-20220718",
"uuid": "541650fc-dd19-4f38-bb1d-7333ed9dd688"
},
"availability_zone_no_gpu_name": "cnr-isti-nova-a",
"availability_zone_with_gpu_name": "cnr-isti-nova-gpu-a",
"centos7_img": {
"name": "CentOS-7",
"uuid": "f0187a99-64f6-462a-ab5f-ef52fe62f2ca"
},
"dns_zone_id": "c1a4b4bc-f167-4387-855d-38f0f99ca05c",
"el7_datafile": "../../openstack_vm_data_scripts/el7.sh",
"external_gateway_ip": "146.48.30.241",
"external_network_id": "1d2ff137-6ff7-4017-be2b-0d6c4af2353b",
"external_network_name": "external-network",
"main_private_network_id": "23fd8a99-d551-4ada-8d3a-9859542ebb8c",
"main_region_name": "isti_area_pi_1",
"main_subnet_network_id": "cd77a2fd-4a36-4254-b1d0-70b3874c6d04",
"mtu_size_value": 8942,
"resolvers_ip": [
"146.48.29.97",
"146.48.29.98",
"146.48.29.99"
],
"ssh_sources_list": {
"d4s_vpn_1_cidr": "146.48.122.27/32",
"d4s_vpn_2_cidr": "146.48.122.49/32",
"infrascience_net_cidr": "146.48.122.0/23",
"s2i2s_vpn_1_cidr": "146.48.28.10/32",
"s2i2s_vpn_2_cidr": "146.48.28.11/32",
"shell_d4s_cidr": "146.48.122.95/32"
},
"ubuntu1804_datafile": "../../openstack_vm_data_scripts/ubuntu1804.sh",
"ubuntu1804_img": {
"name": "Ubuntu-Bionic-18.04",
"uuid": "7ed6a2cd-2b07-482e-8ce4-f018dff16c89"
},
"ubuntu2204_datafile": "../../openstack_vm_data_scripts/ubuntu2204.sh",
"ubuntu2204_img": {
"name": "Ubuntu-Jammy-22.04",
"uuid": "54768889-8556-4be4-a2eb-82a4d9b34627"
}
},
"type": [
"object",
{
"almalinux9_img": [
"map",
"string"
],
"availability_zone_no_gpu_name": "string",
"availability_zone_with_gpu_name": "string",
"centos7_img": [
"map",
"string"
],
"dns_zone_id": "string",
"el7_datafile": "string",
"external_gateway_ip": "string",
"external_network_id": "string",
"external_network_name": "string",
"main_private_network_id": "string",
"main_region_name": "string",
"main_subnet_network_id": "string",
"mtu_size_value": "number",
"resolvers_ip": [
"list",
"string"
],
"ssh_sources_list": [
"map",
"string"
],
"ubuntu1804_datafile": "string",
"ubuntu1804_img": [
"map",
"string"
],
"ubuntu2204_datafile": "string",
"ubuntu2204_img": [
"map",
"string"
]
}
]
},
"workspace": null
},
"sensitive_attributes": []
}
]
},
{
"module": "module.liferay",
"mode": "managed",
"type": "openstack_compute_instance_v2",
"name": "liferay",
"provider": "provider[\"registry.terraform.io/terraform-provider-openstack/openstack\"]",
"instances": [
{
"index_key": 0,
"schema_version": 0,
"attributes": {
"access_ip_v4": "10.1.32.24",
"access_ip_v6": "",
"admin_pass": null,
"all_metadata": {},
"all_tags": [],
"availability_zone": "cnr-isti-nova-a",
"availability_zone_hints": "cnr-isti-nova-a",
"block_device": [
{
"boot_index": 0,
"delete_on_termination": false,
"destination_type": "volume",
"device_type": "",
"disk_bus": "",
"guest_format": "",
"multiattach": false,
"source_type": "image",
"uuid": "7ed6a2cd-2b07-482e-8ce4-f018dff16c89",
"volume_size": 30,
"volume_type": ""
}
],
"config_drive": null,
"created": "2023-12-01 16:51:07 +0000 UTC",
"flavor_id": "9",
"flavor_name": "m1.large",
"floating_ip": null,
"force_delete": false,
"id": "aaf50b2a-40e2-4bbe-8e4e-39f5d83dd08f",
"image_id": "Attempt to boot from volume - no image supplied",
"image_name": null,
"key_pair": "adellam",
"metadata": null,
"name": "lr62-01",
"network": [
{
"access_network": false,
"fixed_ip_v4": "10.1.32.24",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:93:d4:8f",
"name": "d4s-pre-cloud-main",
"port": "",
"uuid": "23fd8a99-d551-4ada-8d3a-9859542ebb8c"
},
{
"access_network": false,
"fixed_ip_v4": "192.168.2.43",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:32:7c:e5",
"name": "postgresql-srv-net",
"port": "",
"uuid": "e25395f4-f1aa-4819-b5a5-36d25ee5af54"
}
],
"network_mode": null,
"personality": [],
"power_state": "active",
"region": "isti_area_pi_1",
"scheduler_hints": [
{
"additional_properties": {},
"build_near_host_ip": "",
"different_cell": [],
"different_host": [],
"group": "ea1d150d-3dc0-4d03-a09f-b40069d0b70c",
"query": [],
"same_host": [],
"target_cell": ""
}
],
"security_groups": [
"default_for_all",
"liferay_cluster_traffic",
"restricted_web_service",
"traffic_from_the_main_load_balancers"
],
"stop_before_destroy": false,
"tags": [],
"timeouts": null,
"updated": "2023-12-01 16:52:40 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
"vendor_options": [],
"volume": []
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxODAwMDAwMDAwMDAwLCJkZWxldGUiOjE4MDAwMDAwMDAwMDAsInVwZGF0ZSI6MTgwMDAwMDAwMDAwMH19",
"dependencies": [
"module.liferay.openstack_compute_servergroup_v2.liferay",
"module.liferay.openstack_networking_secgroup_v2.liferay_cluster_traffic"
]
},
{
"index_key": 1,
"schema_version": 0,
"attributes": {
"access_ip_v4": "10.1.32.25",
"access_ip_v6": "",
"admin_pass": null,
"all_metadata": {},
"all_tags": [],
"availability_zone": "cnr-isti-nova-a",
"availability_zone_hints": "cnr-isti-nova-a",
"block_device": [
{
"boot_index": 0,
"delete_on_termination": false,
"destination_type": "volume",
"device_type": "",
"disk_bus": "",
"guest_format": "",
"multiattach": false,
"source_type": "image",
"uuid": "7ed6a2cd-2b07-482e-8ce4-f018dff16c89",
"volume_size": 30,
"volume_type": ""
}
],
"config_drive": null,
"created": "2023-12-01 16:51:07 +0000 UTC",
"flavor_id": "9",
"flavor_name": "m1.large",
"floating_ip": null,
"force_delete": false,
"id": "0b9deb27-50ba-409f-a9a7-b3a55b5e5b29",
"image_id": "Attempt to boot from volume - no image supplied",
"image_name": null,
"key_pair": "adellam",
"metadata": null,
"name": "lr62-02",
"network": [
{
"access_network": false,
"fixed_ip_v4": "10.1.32.25",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:93:23:75",
"name": "d4s-pre-cloud-main",
"port": "",
"uuid": "23fd8a99-d551-4ada-8d3a-9859542ebb8c"
},
{
"access_network": false,
"fixed_ip_v4": "192.168.2.233",
"fixed_ip_v6": "",
"floating_ip": "",
"mac": "fa:16:3e:01:04:ec",
"name": "postgresql-srv-net",
"port": "",
"uuid": "e25395f4-f1aa-4819-b5a5-36d25ee5af54"
}
],
"network_mode": null,
"personality": [],
"power_state": "active",
"region": "isti_area_pi_1",
"scheduler_hints": [
{
"additional_properties": {},
"build_near_host_ip": "",
"different_cell": [],
"different_host": [],
"group": "ea1d150d-3dc0-4d03-a09f-b40069d0b70c",
"query": [],
"same_host": [],
"target_cell": ""
}
],
"security_groups": [
"default_for_all",
"liferay_cluster_traffic",
"restricted_web_service",
"traffic_from_the_main_load_balancers"
],
"stop_before_destroy": false,
"tags": [],
"timeouts": null,
"updated": "2023-12-01 16:51:47 +0000 UTC",
"user_data": "47d4769e61324c305c4b70ed6673de4fad84150d",
"vendor_options": [],
"volume": []
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxODAwMDAwMDAwMDAwLCJkZWxldGUiOjE4MDAwMDAwMDAwMDAsInVwZGF0ZSI6MTgwMDAwMDAwMDAwMH19",
"dependencies": [
"module.liferay.openstack_compute_servergroup_v2.liferay",
"module.liferay.openstack_networking_secgroup_v2.liferay_cluster_traffic"
]
}
]
},
{
"module": "module.liferay",
"mode": "managed",
"type": "openstack_compute_servergroup_v2",
"name": "liferay",
"provider": "provider[\"registry.terraform.io/terraform-provider-openstack/openstack\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"id": "ea1d150d-3dc0-4d03-a09f-b40069d0b70c",
"members": [
"aaf50b2a-40e2-4bbe-8e4e-39f5d83dd08f",
"0b9deb27-50ba-409f-a9a7-b3a55b5e5b29"
],
"name": "liferay",
"policies": [
"soft-anti-affinity"
],
"region": "isti_area_pi_1",
"rules": [
{
"max_server_per_host": 0
}
],
"value_specs": null
},
"sensitive_attributes": [],
"private": "bnVsbA=="
}
]
},
{
"module": "module.liferay",
"mode": "managed",
"type": "openstack_networking_secgroup_rule_v2",
"name": "traffic_between_liferay_nodes",
"provider": "provider[\"registry.terraform.io/terraform-provider-openstack/openstack\"]",
"instances": [
{
"index_key": 0,
"schema_version": 0,
"attributes": {
"description": "Traffic between liferay nodes",
"direction": "ingress",
"ethertype": "IPv4",
"id": "c06d140b-d14b-4c31-bf55-3115225ac7bd",
"port_range_max": 0,
"port_range_min": 0,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
"remote_ip_prefix": "10.1.32.24/32",
"security_group_id": "67747d93-a58e-41e2-9486-31ef27d389c4",
"tenant_id": "6fdc02e2827b405dad99f34698659742",
"timeouts": null
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDB9fQ==",
"dependencies": [
"module.liferay.openstack_networking_secgroup_v2.liferay_cluster_traffic"
]
},
{
"index_key": 1,
"schema_version": 0,
"attributes": {
"description": "Traffic between liferay nodes",
"direction": "ingress",
"ethertype": "IPv4",
"id": "1367e3f1-f815-43df-aee9-fd219cb257d9",
"port_range_max": 0,
"port_range_min": 0,
"protocol": "tcp",
"region": "isti_area_pi_1",
"remote_group_id": "",
"remote_ip_prefix": "10.1.32.25/32",
"security_group_id": "67747d93-a58e-41e2-9486-31ef27d389c4",
"tenant_id": "6fdc02e2827b405dad99f34698659742",
"timeouts": null
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDB9fQ==",
"dependencies": [
"module.liferay.openstack_networking_secgroup_v2.liferay_cluster_traffic"
]
}
]
},
{
"module": "module.liferay",
"mode": "managed",
"type": "openstack_networking_secgroup_v2",
"name": "liferay_cluster_traffic",
"provider": "provider[\"registry.terraform.io/terraform-provider-openstack/openstack\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"all_tags": [],
"delete_default_rules": true,
"description": "Traffic between the Liferay cluster nodes",
"id": "67747d93-a58e-41e2-9486-31ef27d389c4",
"name": "liferay_cluster_traffic",
"region": "isti_area_pi_1",
"tags": [],
"tenant_id": "6fdc02e2827b405dad99f34698659742",
"timeouts": null
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDB9fQ=="
}
]
}
],
"check_results": null
}

View File

@ -1 +0,0 @@
../../common_setups/15-security-groups.tf

View File

@ -1 +0,0 @@
../../common_setups/20-octavia.tf

View File

@ -1 +0,0 @@
../../common_setups/25-ssh-jump-proxy.tf

View File

@ -1 +0,0 @@
../../common_setups/30-internal-ca.tf

View File

@ -1 +0,0 @@
../../common_setups/35-prometheus.tf

View File

@ -1 +0,0 @@
../../common_setups/40-postgresql.tf

View File

@ -7,6 +7,6 @@ resource "openstack_blockstorage_volume_v3" "shared_postgresql_backup_vol" {
resource "openstack_compute_volume_attach_v2" "shared_postgresql_backup_attach_vol" {
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_backup_vol.id
device = var.shared_postgresql_server_data.vol_backup_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
device = var.shared_postgresql_server_data.vol_backup_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
}

View File

@ -1 +0,0 @@
../../common_setups/45-haproxy.tf

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/haproxy.tf

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/internal-ca.tf

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/octavia.tf

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/postgresql.tf

View File

@ -1,62 +1,62 @@
default_security_group_name = "default_for_all"
shared_postgresql_server_data = {
name ="shared-postgresql-server"
flavor = "m1.large"
vol_data_name = "shared-postgresql-data"
vol_data_size = "300"
vol_data_device = "/dev/vdb"
vol_backup_name = "shared-postgresql-backup-data"
vol_backup_size = "100"
vol_backup_device = "/dev/vdc"
network_name = "postgresql-srv-net"
network_description = "Network used to communicate with the shared postgresql service"
network_cidr = "192.168.0.0/22"
name = "shared-postgresql-server"
flavor = "m1.large"
vol_data_name = "shared-postgresql-data"
vol_data_size = "300"
vol_data_device = "/dev/vdb"
vol_backup_name = "shared-postgresql-backup-data"
vol_backup_size = "100"
vol_backup_device = "/dev/vdc"
network_name = "postgresql-srv-net"
network_description = "Network used to communicate with the shared postgresql service"
network_cidr = "192.168.0.0/22"
allocation_pool_start = "192.168.0.100"
allocation_pool_end = "192.168.3.254"
server_ip = "192.168.0.5"
server_cidr = "192.168.0.5/22"
allocation_pool_end = "192.168.3.254"
server_ip = "192.168.0.5"
server_cidr = "192.168.0.5/22"
}
# Provided in the output of the project setup
main_private_network_id = "020df98d-ae72-452a-b376-3b6dc289acac"
main_private_subnet_id = "5d7b83ad-e058-4a3a-bfd8-d20ba6d42e1a"
dns_zone_id = "74135b34-1a9c-4c01-8cf0-22450a5660c4"
main_private_subnet_id = "5d7b83ad-e058-4a3a-bfd8-d20ba6d42e1a"
dns_zone_id = "74135b34-1a9c-4c01-8cf0-22450a5660c4"
octavia_information = {
main_lb_name = "d4s-production-cloud-l4-load-balancer"
main_lb_description = "Main L4 load balancer for the D4Science production"
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = "10.1.42.119/32"
octavia_vrrp_ip_2 = "10.1.42.188/32"
main_lb_name = "d4s-production-cloud-l4-load-balancer"
main_lb_description = "Main L4 load balancer for the D4Science production"
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = "10.1.42.119/32"
octavia_vrrp_ip_2 = "10.1.42.188/32"
}
docker_swarm_data = {
mgr_name = "swarm-mgr"
mgr1_ip = "10.1.40.31"
mgr1_cidr = "10.1.40.31/32"
mgr2_ip = "10.1.40.32"
mgr2_cidr = "10.1.40.32/32"
mgr3_ip = "10.1.40.33"
mgr3_cidr = "10.1.40.33/32"
mgr_count = 3
mgr_flavor = "m1.large"
mgr_data_disk_size = 100
worker_name = "swarm-worker"
worker_count = 8
worker_flavor = "m1.xxl"
worker_data_disk_size = 200
nfs_server_name = "swarm-nfs-server"
nfs_server_flavor = "m1.medium"
nfs_server_data_disk_name = "Swarm NFS server data Disk"
nfs_server_data_disk_size = 200
nfs_server_data_disk_device = "/dev/vdb"
mgr_name = "swarm-mgr"
mgr1_ip = "10.1.40.31"
mgr1_cidr = "10.1.40.31/32"
mgr2_ip = "10.1.40.32"
mgr2_cidr = "10.1.40.32/32"
mgr3_ip = "10.1.40.33"
mgr3_cidr = "10.1.40.33/32"
mgr_count = 3
mgr_flavor = "m1.large"
mgr_data_disk_size = 100
worker_name = "swarm-worker"
worker_count = 8
worker_flavor = "m1.xxl"
worker_data_disk_size = 200
nfs_server_name = "swarm-nfs-server"
nfs_server_flavor = "m1.medium"
nfs_server_data_disk_name = "Swarm NFS server data Disk"
nfs_server_data_disk_size = 200
nfs_server_data_disk_device = "/dev/vdb"
}
swarm_managers_ip = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
swarm_managers_ip = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]

View File

@ -1,15 +1,15 @@
octavia_swarm_data = {
swarm_lb_name = "d4s-production-cloud-swarm-l4"
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
swarm_lb_hostname = "swarm-lb"
swarm_octavia_main_ip = "10.1.40.30"
swarm_octavia_main_cidr = "10.1.40.30/32"
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = "10.1.43.97/32"
octavia_vrrp_ip_2 = "10.1.44.78/32"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
swarm_lb_hostname = "swarm-lb"
swarm_octavia_main_ip = "10.1.40.30"
swarm_octavia_main_cidr = "10.1.40.30/32"
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = "10.1.43.97/32"
octavia_vrrp_ip_2 = "10.1.44.78/32"
}

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/prometheus.tf

View File

@ -1,6 +1,6 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
@ -10,5 +10,5 @@ required_version = ">= 0.14.0"
}
provider "openstack" {
cloud = "d4s-production"
cloud = "d4s-production"
}

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/security-groups.tf

View File

@ -0,0 +1 @@
../../modules/d4science_infra_setup/ssh-jump-proxy.tf

View File

@ -6,13 +6,13 @@ variable "os_project_data" {
}
variable "dns_zone" {
type = map(string)
type = map(string)
default = {
zone_name = "cloud.d4science.org."
email = "postmaster@isti.cnr.it"
zone_name = "cloud.d4science.org."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the d4s-production-cloud project"
ttl = 8600
}
ttl = 8600
}
}
variable "dns_zone_id" {
@ -27,9 +27,9 @@ variable "default_security_group_name" {
variable "main_private_network" {
type = map(string)
default = {
name = "d4s-production-cloud-main"
name = "d4s-production-cloud-main"
description = "D4Science Production private network (use this as the main network)"
}
}
}
variable "main_private_network_id" {
@ -40,13 +40,13 @@ variable "main_private_network_id" {
variable "main_private_subnet" {
type = map(string)
default = {
name = "d4s-production-cloud-main-subnet"
description = "D4Science Production main private subnet"
cidr = "10.1.40.0/21"
gateway_ip = "10.1.40.1"
name = "d4s-production-cloud-main-subnet"
description = "D4Science Production main private subnet"
cidr = "10.1.40.0/21"
gateway_ip = "10.1.40.1"
allocation_start = "10.1.41.100"
allocation_end = "10.1.47.254"
}
allocation_end = "10.1.47.254"
}
}
variable "main_private_subnet_id" {
@ -57,45 +57,45 @@ variable "main_private_subnet_id" {
variable "external_router" {
type = map(string)
default = {
name = "d4s-production-cloud-external-router"
name = "d4s-production-cloud-external-router"
description = "D4Science Production main router"
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
}
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
}
}
variable "basic_services_ip" {
type = map(string)
default = {
ca = "10.1.40.4"
ca_cidr = "10.1.40.4/32"
ssh_jump = "10.1.40.5"
ssh_jump_cidr = "10.1.40.5/32"
prometheus = "10.1.40.10"
prometheus_cidr = "10.1.40.10/32"
haproxy_l7_1 = "10.1.40.11"
ca = "10.1.40.4"
ca_cidr = "10.1.40.4/32"
ssh_jump = "10.1.40.5"
ssh_jump_cidr = "10.1.40.5/32"
prometheus = "10.1.40.10"
prometheus_cidr = "10.1.40.10/32"
haproxy_l7_1 = "10.1.40.11"
haproxy_l7_1_cidr = "10.1.40.11/32"
haproxy_l7_2 = "10.1.40.12"
haproxy_l7_2 = "10.1.40.12"
haproxy_l7_2_cidr = "10.1.40.12/32"
octavia_main = "10.1.40.20"
octavia_main = "10.1.40.20"
octavia_main_cidr = "10.1.40.20/32"
}
}
variable "main_haproxy_l7_ip" {
type = list(string)
type = list(string)
default = ["10.1.40.11", "10.1.40.12"]
}
variable "octavia_information" {
type = map(string)
default = {
main_lb_name = "d4s-production-cloud-l4-load-balancer"
main_lb_name = "d4s-production-cloud-l4-load-balancer"
main_lb_description = "Main L4 load balancer for the D4Science production"
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
}
}

View File

@ -62,14 +62,14 @@ variable "networks_with_d4s_services" {
}
variable "dns_zone" {
type = map(string)
type = map(string)
default = {
zone_name = ""
email = "postmaster@isti.cnr.it"
zone_name = ""
email = "postmaster@isti.cnr.it"
description = ""
ttl = 8600
id = ""
}
ttl = 8600
id = ""
}
}
variable "dns_zone_id" {
@ -80,9 +80,9 @@ variable "dns_zone_id" {
variable "main_private_network" {
type = map(string)
default = {
name = ""
name = ""
description = ""
}
}
}
variable "main_private_network_id" {
@ -93,13 +93,13 @@ variable "main_private_network_id" {
variable "main_private_subnet" {
type = map(string)
default = {
name = ""
description = ""
cidr = ""
gateway_ip = ""
name = ""
description = ""
cidr = ""
gateway_ip = ""
allocation_start = ""
allocation_end = ""
}
allocation_end = ""
}
}
variable "main_private_subnet_id" {
@ -110,10 +110,10 @@ variable "main_private_subnet_id" {
variable "external_router" {
type = map(string)
default = {
name = ""
name = ""
description = ""
id = ""
}
id = ""
}
}
variable "ubuntu_1804" {
@ -247,42 +247,42 @@ variable "default_security_group_name" {
variable "basic_services_ip" {
type = map(string)
default = {
ca = ""
ca_cidr = ""
ssh_jump = ""
ssh_jump_cidr = ""
prometheus = ""
prometheus_cidr = ""
haproxy_l7_1 = ""
ca = ""
ca_cidr = ""
ssh_jump = ""
ssh_jump_cidr = ""
prometheus = ""
prometheus_cidr = ""
haproxy_l7_1 = ""
haproxy_l7_1_cidr = ""
haproxy_l7_2 = ""
haproxy_l7_2 = ""
haproxy_l7_2_cidr = ""
octavia_main = ""
octavia_main = ""
octavia_main_cidr = ""
}
}
variable "main_haproxy_l7_ip" {
type = list(string)
type = list(string)
default = []
}
variable "octavia_information" {
type = map(string)
default = {
main_lb_name = ""
main_lb_name = ""
main_lb_description = ""
swarm_lb_name = ""
octavia_flavor = ""
octavia_flavor_id = ""
main_lb_hostname = ""
swarm_lb_name = ""
octavia_flavor = ""
octavia_flavor_id = ""
main_lb_hostname = ""
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = ""
octavia_vrrp_ip_2 = ""
}
}
}
# Added by Francesco
@ -327,3 +327,41 @@ variable "flavor_list" {
m2_large = "m2.large" #RAM 32 - VCPUs 8
}
}
# Added by Francesco
variable "security_group_list" {
type = map(string)
default = {
postgreSQL = "PostgreSQL service"
acaland = "acaland's dev machine"
haproxy = "HAPROXY L7"
access_to_orientdb = "access_to_orientdb"
dataminer-publish = "dataminer-publish"
docker_swarm_NFS = "Docker Swarm NFS"
public_HTTPS = "Public HTTPS"
haproxy = "HAPROXY L7"
orientdb_internal_docker_traffic = "orientdb_internal_docker_traffic"
limited_SSH_access = "Limited SSH access"
access_to_the_timescaledb_service = "access_to_the_timescaledb_service"
docker_swarm = "Docker Swarm"
http_and_https_from_the_load_balancers = "http and https from the load balancers"
limited_HTTPS_access = "Limited HTTPS access"
mongo = "mongo"
limited_SSH_access = "Limited SSH access"
default = "default"
cassandra = "Cassandra"
access_to_orientdb_se = "access_to_orientdb_se"
}
}
variable "networks_list" {
type = map(string)
default = {
shared_postgresql = "postgresql-srv-net"
swarm = "swarm-nfs-net"
timescaledb = "timescaledb-net"
orientdb = "orientdb-net"
orientdb_se = "orientdb-se-net"
}
}

View File

@ -15,113 +15,113 @@ resource "openstack_compute_servergroup_v2" "main_haproxy_l7" {
}
# Security group
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
name = "traffic_from_main_lb_to_haproxy_l7"
delete_default_rules = "true"
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
name = "traffic_from_main_lb_to_haproxy_l7"
delete_default_rules = "true"
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 1 to l7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 1 to l7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 2 to l7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 2 to l7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_8080" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_1
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_8080" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = module.common_variables.octavia_information.octavia_vrrp_ip_2
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
}
# Instance
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
count = module.common_variables.haproxy_l7_data.vm_count
name = format("%s-%02d", module.common_variables.haproxy_l7_data.name, count.index+1)
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = module.common_variables.haproxy_l7_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
count = var.haproxy_l7_data.vm_count
name = format("%s-%02d", var.haproxy_l7_data.name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.haproxy_l7_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
}
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
@ -130,9 +130,9 @@ resource "openstack_compute_instance_v2" "main_haproxy_l7" {
}
network {
name = module.common_variables.main_private_network.name
fixed_ip_v4 = module.common_variables.main_haproxy_l7_ip.*[count.index]
name = var.main_private_network.name
fixed_ip_v4 = var.main_haproxy_l7_ip.* [count.index]
}
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
user_data = file("${var.ubuntu2204_data_file}")
}

View File

@ -1,11 +1,11 @@
resource "openstack_compute_instance_v2" "internal_ca" {
name = module.common_variables.internal_ca_data.name
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = module.common_variables.internal_ca_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [module.common_variables.default_security_group_name]
name = var.internal_ca_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.internal_ca_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
@ -14,8 +14,8 @@ resource "openstack_compute_instance_v2" "internal_ca" {
}
network {
name = module.common_variables.main_private_network.name
fixed_ip_v4 = module.common_variables.basic_services_ip.ca
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ca
}
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
user_data = file("${var.ubuntu2204_data_file}")
}

View File

@ -1,33 +1,33 @@
# Main load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "main_lb" {
vip_subnet_id = module.common_variables.main_private_subnet_id
name = module.common_variables.octavia_information.main_lb_name
description = module.common_variables.octavia_information.main_lb_description
flavor_id = module.common_variables.octavia_information.octavia_flavor_id
vip_address = module.common_variables.basic_services_ip.octavia_main
loadbalancer_provider = "amphora"
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_information.main_lb_name
description = var.octavia_information.main_lb_description
flavor_id = var.octavia_information.octavia_flavor_id
vip_address = var.basic_services_ip.octavia_main
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = module.common_variables.dns_zone.zone_name
description = module.common_variables.octavia_information.main_lb_description
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_information.main_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
}
locals {
recordset_name = "${module.common_variables.octavia_information.main_lb_hostname}.${module.common_variables.dns_zone.zone_name}"
recordset_name = "${var.octavia_information.main_lb_hostname}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
zone_id = module.common_variables.dns_zone_id
zone_id = var.dns_zone_id
name = local.recordset_name
description = "Public IP address of the main load balancer"
ttl = 8600
@ -37,146 +37,146 @@ resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the main HAPROXY instances"
name = "main_haproxy_stats_listener"
allowed_cidrs = [module.common_variables.ssh_sources.d4s_vpn_1_cidr,module.common_variables.ssh_sources.d4s_vpn_2_cidr,module.common_variables.ssh_sources.s2i2s_vpn_1_cidr,module.common_variables.ssh_sources.s2i2s_vpn_2_cidr]
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the main HAPROXY instances"
name = "main_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
member {
name = "haproxy l7 1"
address = module.common_variables.basic_services_ip.haproxy_l7_1
protocol_port = 8880
}
member {
name = "haproxy l7 2"
address = module.common_variables.basic_services_ip.haproxy_l7_2
protocol_port = 8880
}
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 8880
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
name = "main_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
name = "main_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTP
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the main HAPROXY instances"
name = "main_haproxy_http_listener"
admin_state_up = true
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the main HAPROXY instances"
name = "main_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-http"
description = "Pool for the HTTP listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-http"
description = "Pool for the HTTP listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
member {
name = "haproxy l7 1"
address = module.common_variables.basic_services_ip.haproxy_l7_1
protocol_port = 80
}
member {
name = "haproxy l7 2"
address = module.common_variables.basic_services_ip.haproxy_l7_2
protocol_port = 80
}
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 80
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
name = "main_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
name = "main_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTPS
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "main_haproxy_https_listener"
admin_state_up = true
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "main_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-https"
description = "Pool for the HTTPS listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-https"
description = "Pool for the HTTPS listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
member {
name = "haproxy l7 1"
address = module.common_variables.basic_services_ip.haproxy_l7_1
protocol_port = 443
}
member {
name = "haproxy l7 2"
address = module.common_variables.basic_services_ip.haproxy_l7_2
protocol_port = 443
}
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 443
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
name = "main_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
name = "main_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "main_loadbalancer_ip" {

View File

@ -0,0 +1 @@
../common_variables/outputs.tf

View File

@ -1,66 +1,66 @@
# PostgreSQL shared server
# Network
resource "openstack_networking_network_v2" "shared_postgresql_net" {
name = module.common_variables.shared_postgresql_server_data.network_name
admin_state_up = "true"
external = "false"
description = module.common_variables.shared_postgresql_server_data.network_description
dns_domain = module.common_variables.dns_zone.zone_name
mtu = module.common_variables.mtu_size
name = var.shared_postgresql_server_data.network_name
admin_state_up = "true"
external = "false"
description = var.shared_postgresql_server_data.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
port_security_enabled = true
shared = false
region = module.common_variables.main_region
shared = false
region = var.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
name = "shared-postgresql-subnet"
description = "subnet used to connect to the shared PostgreSQL service"
description = "subnet used to connect to the shared PostgreSQL service"
network_id = openstack_networking_network_v2.shared_postgresql_net.id
cidr = module.common_variables.shared_postgresql_server_data.network_cidr
dns_nameservers = module.common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
cidr = var.shared_postgresql_server_data.network_cidr
dns_nameservers = var.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = module.common_variables.shared_postgresql_server_data.allocation_pool_start
end = module.common_variables.shared_postgresql_server_data.allocation_pool_end
start = var.shared_postgresql_server_data.allocation_pool_start
end = var.shared_postgresql_server_data.allocation_pool_end
}
}
# Security group
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
name = "access_to_the_shared_postgresql_service"
delete_default_rules = "true"
description = "Access the shared PostgreSQL service using the dedicated network"
name = "access_to_the_shared_postgresql_service"
delete_default_rules = "true"
description = "Access the shared PostgreSQL service using the dedicated network"
}
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 5432
port_range_max = 5432
remote_ip_prefix = module.common_variables.shared_postgresql_server_data.network_cidr
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 5432
port_range_max = 5432
remote_ip_prefix = var.shared_postgresql_server_data.network_cidr
}
# Block device
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
name = module.common_variables.shared_postgresql_server_data.vol_data_name
size = module.common_variables.shared_postgresql_server_data.vol_data_size
name = var.shared_postgresql_server_data.vol_data_name
size = var.shared_postgresql_server_data.vol_data_size
}
# Instance
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
name = module.common_variables.shared_postgresql_server_data.name
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = module.common_variables.shared_postgresql_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.shared_postgresql_access.name]
name = var.shared_postgresql_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.shared_postgresql_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.shared_postgresql_access.name]
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
@ -69,19 +69,19 @@ resource "openstack_compute_instance_v2" "shared_postgresql_server" {
}
network {
name = module.common_variables.main_private_network.name
name = var.main_private_network.name
}
network {
name = module.common_variables.shared_postgresql_server_data.network_name
fixed_ip_v4 = module.common_variables.shared_postgresql_server_data.server_ip
name = var.shared_postgresql_server_data.network_name
fixed_ip_v4 = var.shared_postgresql_server_data.server_ip
}
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
user_data = file("${var.ubuntu2204_data_file}")
}
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
device = module.common_variables.shared_postgresql_server_data.vol_data_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
device = var.shared_postgresql_server_data.vol_data_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
}

View File

@ -1,17 +1,17 @@
# Promertheus server. A floating IP is required
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
name = module.common_variables.prometheus_server_data.vol_data_name
size = module.common_variables.prometheus_server_data.vol_data_size
name = var.prometheus_server_data.vol_data_name
size = var.prometheus_server_data.vol_data_size
}
resource "openstack_compute_instance_v2" "prometheus_server" {
name = module.common_variables.prometheus_server_data.name
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = module.common_variables.prometheus_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.restricted_web.name,openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
name = var.prometheus_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.prometheus_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.restricted_web.name, openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
@ -20,23 +20,23 @@ resource "openstack_compute_instance_v2" "prometheus_server" {
}
network {
name = module.common_variables.main_private_network.name
fixed_ip_v4 = module.common_variables.basic_services_ip.prometheus
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.prometheus
}
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
user_data = file("${var.ubuntu2204_data_file}")
}
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
instance_id = openstack_compute_instance_v2.prometheus_server.id
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
device = module.common_variables.prometheus_server_data.vol_data_device
device = var.prometheus_server_data.vol_data_device
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "Prometheus server"
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "Prometheus server"
}
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
@ -45,12 +45,12 @@ resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
}
locals {
prometheus_recordset_name = "${module.common_variables.prometheus_server_data.name}.${module.common_variables.dns_zone.zone_name}"
alertmanager_recordset_name = "alertmanager.${module.common_variables.dns_zone.zone_name}"
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
zone_id = module.common_variables.dns_zone_id
zone_id = var.dns_zone_id
name = local.prometheus_recordset_name
description = "Public IP address of the Prometheus server"
ttl = 8600
@ -59,7 +59,7 @@ resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
}
resource "openstack_dns_recordset_v2" "alertmanager_server_recordset" {
zone_id = module.common_variables.dns_zone_id
zone_id = var.dns_zone_id
name = local.alertmanager_recordset_name
description = "Prometheus alertmanager"
ttl = 8600

View File

@ -1,373 +1,373 @@
#
# This is the security group that should be added to every instance
resource "openstack_networking_secgroup_v2" "default" {
name = module.common_variables.default_security_group_name
delete_default_rules = "true"
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
name = var.default_security_group_name
delete_default_rules = "true"
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
}
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
security_group_id = openstack_networking_secgroup_v2.default.id
direction = "egress"
ethertype = "IPv4"
security_group_id = openstack_networking_secgroup_v2.default.id
direction = "egress"
ethertype = "IPv4"
}
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow ICMP from remote"
direction = "ingress"
ethertype = "IPv4"
remote_ip_prefix = "0.0.0.0/0"
protocol = "icmp"
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow ICMP from remote"
direction = "ingress"
ethertype = "IPv4"
remote_ip_prefix = "0.0.0.0/0"
protocol = "icmp"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-jump-proxy" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "SSH traffic from the jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
security_group_id = openstack_networking_secgroup_v2.default.id
description = "SSH traffic from the jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Prometheus access to the node exporter"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 9100
port_range_max = 9100
remote_ip_prefix = module.common_variables.basic_services_ip.prometheus_cidr
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Prometheus access to the node exporter"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 9100
port_range_max = 9100
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
}
#
# SSH access to the jump proxy. Used by the jump proxy VM only
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
name = "ssh_access_to_the_jump_node"
delete_default_rules = "true"
description = "Security group that allows SSH access to the jump node from a limited set of sources"
name = "ssh_access_to_the_jump_node"
delete_default_rules = "true"
description = "Security group that allows SSH access to the jump node from a limited set of sources"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_1_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_2_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_1_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_2_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-shell-d4s" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.shell_d4s_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-infrascience-net" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from the InfraScience network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = module.common_variables.ssh_sources.infrascience_net_cidr
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from the InfraScience network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.infrascience_net_cidr
}
# Debug via tunnel from the jump proxy node
resource "openstack_networking_secgroup_v2" "debugging" {
name = "debugging_from_jump_node"
delete_default_rules = "true"
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
name = "debugging_from_jump_node"
delete_default_rules = "true"
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
}
resource "openstack_networking_secgroup_rule_v2" "shell_8100" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "Tomcat debug on port 8100 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8100
port_range_max = 8100
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "Tomcat debug on port 8100 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8100
port_range_max = 8100
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_80" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "http debug port 80 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "http debug port 80 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_443" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "https debug port 443 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.basic_services_ip.ssh_jump_cidr
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "https debug port 443 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
# Traffic from the main HAPROXY load balancers
# Use on the web services that are exposed through the main HAPROXY
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
name = "traffic_from_the_main_load_balancers"
delete_default_rules = "true"
description = "Allow traffic from the main L7 HAPROXY load balancers"
name = "traffic_from_the_main_load_balancers"
delete_default_rules = "true"
description = "Allow traffic from the main L7 HAPROXY load balancers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_1_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = module.common_variables.basic_services_ip.haproxy_l7_2_cidr
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
# Security group that exposes web services directly. A floating IP is required.
resource "openstack_networking_secgroup_v2" "public_web" {
name = "public_web_service"
delete_default_rules = "true"
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
name = "public_web_service"
delete_default_rules = "true"
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
}
resource "openstack_networking_secgroup_rule_v2" "public_http" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "public_https" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTPS from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTPS from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = "0.0.0.0/0"
}
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
resource "openstack_networking_secgroup_v2" "restricted_web" {
name = "restricted_web_service"
delete_default_rules = "true"
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
name = "restricted_web_service"
delete_default_rules = "true"
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
}
resource "openstack_networking_secgroup_rule_v2" "http_from_everywhere" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_1_cidr
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.ssh_sources.d4s_vpn_2_cidr
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_1_cidr
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.ssh_sources.s2i2s_vpn_2_cidr
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.ssh_sources.shell_d4s_cidr
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
name = "prometheus_access_from_grafana"
delete_default_rules = "true"
description = "The public grafana server must be able to get data from Prometheus"
name = "prometheus_access_from_grafana"
delete_default_rules = "true"
description = "The public grafana server must be able to get data from Prometheus"
}
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
description = "Allow HTTPS from grafana.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = module.common_variables.prometheus_server_data.public_grafana_server_cidr
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
description = "Allow HTTPS from grafana.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.prometheus_server_data.public_grafana_server_cidr
}

View File

@ -1,12 +1,12 @@
# VM used as jump proxy. A floating IP is required
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
name = module.common_variables.ssh_jump_proxy.name
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = module.common_variables.ssh_jump_proxy.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [module.common_variables.default_security_group_name,openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
name = var.ssh_jump_proxy.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.ssh_jump_proxy.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 30
boot_index = 0
@ -15,17 +15,17 @@ resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
}
network {
name = module.common_variables.main_private_network.name
fixed_ip_v4 = module.common_variables.basic_services_ip.ssh_jump
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ssh_jump
}
user_data = "${file("${module.common_variables.ubuntu2204_data_file}")}"
user_data = file("${var.ubuntu2204_data_file}")
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
pool = module.common_variables.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "SSH Proxy Jump Server"
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "SSH Proxy Jump Server"
}
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
@ -34,11 +34,11 @@ resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
}
locals {
ssh_recordset_name = "${module.common_variables.ssh_jump_proxy.name}.${module.common_variables.dns_zone.zone_name}"
ssh_recordset_name = "${var.ssh_jump_proxy.name}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
zone_id = module.common_variables.dns_zone_id
zone_id = var.dns_zone_id
name = local.ssh_recordset_name
description = "Public IP address of the SSH Proxy Jump server"
ttl = 8600

View File

@ -17,11 +17,11 @@ data "terraform_remote_state" "privnet_dns_router" {
}
}
module "common_variables" {
source = "../../modules/common_variables"
}
# module "common_variables" {
# source = "../../modules/common_variables"
# }
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# module "ssh_settings" {
# source = "../../modules/ssh-key-ref"
# }

View File

@ -0,0 +1 @@
../common_variables/variables.tf

View File

@ -13,27 +13,27 @@ resource "openstack_compute_servergroup_v2" "swarm_workers" {
# Network for the NFS traffic
#
resource "openstack_networking_network_v2" "swarm_nfs_net" {
name = var.swarm_nfs_private_network.network_name
admin_state_up = "true"
external = "false"
description = var.swarm_nfs_private_network.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
name = var.swarm_nfs_private_network.network_name
admin_state_up = "true"
external = "false"
description = var.swarm_nfs_private_network.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
port_security_enabled = true
shared = false
region = var.main_region
shared = false
region = var.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
name = "swarm-nfs-net"
description = "Subnet used by the Swarm cluster and the NFS service"
description = "Subnet used by the Swarm cluster and the NFS service"
network_id = openstack_networking_network_v2.swarm_nfs_net.id
cidr = var.swarm_nfs_private_network.network_cidr
dns_nameservers = var.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.swarm_nfs_private_network.allocation_pool_start
end = var.swarm_nfs_private_network.allocation_pool_end
@ -44,46 +44,46 @@ resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
# Security groups
#
resource "openstack_networking_secgroup_v2" "swarm_internal_traffic" {
name = "swarm_internal_docker_traffic"
delete_default_rules = "true"
description = "Traffic between the Docker Swarm nodes"
name = "swarm_internal_docker_traffic"
delete_default_rules = "true"
description = "Traffic between the Docker Swarm nodes"
}
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "UDP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.main_private_subnet.cidr
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "UDP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "TCP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.main_private_subnet.cidr
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "TCP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_v2" "swarm_nfs_traffic" {
name = "docker_swarm_nfs"
delete_default_rules = "true"
description = "Traffic between Docker Swarm and the NFS service"
name = "docker_swarm_nfs"
delete_default_rules = "true"
description = "Traffic between Docker Swarm and the NFS service"
}
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_udp" {
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "UDP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "UDP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
}
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "TCP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "TCP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
}
#
@ -91,12 +91,12 @@ resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
#
# Instance
resource "openstack_compute_instance_v2" "docker_swarm_managers" {
count = var.docker_swarm_data.mgr_count
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index+1)
count = var.docker_swarm_data.mgr_count
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.mgr_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_internal_traffic.name]
flavor_name = var.docker_swarm_data.mgr_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.swarm_masters.id
}
@ -118,25 +118,25 @@ resource "openstack_compute_instance_v2" "docker_swarm_managers" {
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.swarm_managers_ip.*[count.index]
name = var.main_private_network.name
fixed_ip_v4 = var.swarm_managers_ip.* [count.index]
}
network {
name = var.swarm_nfs_private_network.network_name
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
}
# Swarm worker nodes
resource "openstack_compute_instance_v2" "docker_swarm_workers" {
count = var.docker_swarm_data.worker_count
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index+1)
count = var.docker_swarm_data.worker_count
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.worker_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_internal_traffic.name]
flavor_name = var.docker_swarm_data.worker_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.swarm_workers.id
}
@ -164,8 +164,8 @@ resource "openstack_compute_instance_v2" "docker_swarm_workers" {
name = var.swarm_nfs_private_network.network_name
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
}
# NFS server
@ -177,11 +177,11 @@ resource "openstack_blockstorage_volume_v3" "swarm_nfs_data_vol" {
# Instance
resource "openstack_compute_instance_v2" "swarm_nfs_server" {
name = var.docker_swarm_data.nfs_server_name
name = var.docker_swarm_data.nfs_server_name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.nfs_server_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
flavor_name = var.docker_swarm_data.nfs_server_flavor
key_pair = var.ssh_key_file.name
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
@ -195,20 +195,20 @@ resource "openstack_compute_instance_v2" "swarm_nfs_server" {
name = var.main_private_network.name
}
network {
name = var.swarm_nfs_private_network.network_name
name = var.swarm_nfs_private_network.network_name
fixed_ip_v4 = var.swarm_nfs_private_network.server_ip
}
user_data = "${file("${var.ubuntu2204_data_file}")}"
depends_on = [ openstack_networking_subnet_v2.swarm_nfs_subnet ]
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
}
# Attach the additional volume
resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
instance_id = openstack_compute_instance_v2.swarm_nfs_server.id
volume_id = openstack_blockstorage_volume_v3.swarm_nfs_data_vol.id
device = var.docker_swarm_data.nfs_server_data_disk_device
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
device = var.docker_swarm_data.nfs_server_data_disk_device
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
}
#
@ -216,33 +216,33 @@ resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
#
# Swarm load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_swarm_data.swarm_lb_name
description = var.octavia_swarm_data.swarm_lb_description
flavor_id = var.octavia_swarm_data.octavia_flavor_id
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
loadbalancer_provider = "amphora"
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_swarm_data.swarm_lb_name
description = var.octavia_swarm_data.swarm_lb_description
flavor_id = var.octavia_swarm_data.octavia_flavor_id
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_swarm_data.swarm_lb_description
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_swarm_data.swarm_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
}
locals {
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
}
@ -293,215 +293,215 @@ resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr,var.ssh_sources.d4s_vpn_2_cidr,var.ssh_sources.s2i2s_vpn_1_cidr,var.ssh_sources.s2i2s_vpn_2_cidr]
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8880
}
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
name = "swarm_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
name = "swarm_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_http_listener"
admin_state_up = true
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http"
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http"
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 80
}
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
name = "swarm_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
name = "swarm_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTPS
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "swarm_haproxy_https_listener"
admin_state_up = true
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "swarm_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-https"
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-https"
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 443
}
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
name = "swarm_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
name = "swarm_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP on port 8080
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8080
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_8080_listener"
admin_state_up = true
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8080
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_8080_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http-8080"
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http-8080"
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8080
}
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8080
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
name = "swarm_haproxy_8080_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
name = "swarm_haproxy_8080_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "swarm_loadbalancer_ip" {

View File

@ -1,57 +1,57 @@
variable "docker_swarm_data" {
type = map(string)
default = {
mgr_name = "swarm-mgr"
mgr1_ip = "10.1.40.31"
mgr1_cidr = "10.1.40.31/32"
mgr2_ip = "10.1.40.32"
mgr2_cidr = "10.1.40.32/32"
mgr3_ip = "10.1.40.33"
mgr3_cidr = "10.1.40.33/32"
mgr_count = 3
mgr_flavor = "m1.large"
mgr_data_disk_size = 100
worker_name = "swarm-worker"
worker_count = 5
worker_flavor = "m1.xlarge"
worker_data_disk_size = 100
nfs_server_name = "swarm-nfs-server"
nfs_server_flavor = "m1.medium"
nfs_server_data_disk_name = "Swarm NFS server data Disk"
nfs_server_data_disk_size = 100
mgr_name = "swarm-mgr"
mgr1_ip = "10.1.40.31"
mgr1_cidr = "10.1.40.31/32"
mgr2_ip = "10.1.40.32"
mgr2_cidr = "10.1.40.32/32"
mgr3_ip = "10.1.40.33"
mgr3_cidr = "10.1.40.33/32"
mgr_count = 3
mgr_flavor = "m1.large"
mgr_data_disk_size = 100
worker_name = "swarm-worker"
worker_count = 5
worker_flavor = "m1.xlarge"
worker_data_disk_size = 100
nfs_server_name = "swarm-nfs-server"
nfs_server_flavor = "m1.medium"
nfs_server_data_disk_name = "Swarm NFS server data Disk"
nfs_server_data_disk_size = 100
nfs_server_data_disk_device = "/dev/vdb"
}
}
variable "swarm_managers_ip" {
type = list(string)
type = list(string)
default = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
}
variable "octavia_swarm_data" {
type = map(string)
default = {
swarm_lb_name = "d4s-production-cloud-swarm-l4"
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
swarm_lb_hostname = "swarm-lb"
swarm_octavia_main_ip = "10.1.40.30"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
swarm_lb_hostname = "swarm-lb"
swarm_octavia_main_ip = "10.1.40.30"
swarm_octavia_main_cidr = "10.1.40.30/32"
}
}
}
variable "swarm_nfs_private_network" {
type = map(string)
default = {
network_name = "swarm-nfs-net"
network_description = "Network used by the swarm nodes and the NFS service"
network_cidr = "192.168.4.0/23"
allocation_pool_start = "192.168.4.100"
allocation_pool_end = "192.168.5.254"
server_ip = "192.168.4.10"
server_cidr = "192.168.4.5/23"
}
network_name = "swarm-nfs-net"
network_description = "Network used by the swarm nodes and the NFS service"
network_cidr = "192.168.4.0/23"
allocation_pool_start = "192.168.4.100"
allocation_pool_end = "192.168.5.254"
server_ip = "192.168.4.10"
server_cidr = "192.168.4.5/23"
}
}

View File

@ -1,8 +1,17 @@
# Generic martgears_service instance
# Generic smartgears_service instance
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
module "common_variables" {
source = "../../modules/common_variables"
}
resource "openstack_compute_instance_v2" "smartgears_service" {
for_each = var.smartgears_service_instances_map
name = each.value.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
availability_zone_hints = module.common_variables.availability_zone_no_gpu_name
flavor_name = each.value.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = each.value.security_groups
@ -22,5 +31,5 @@ resource "openstack_compute_instance_v2" "smartgears_service" {
}
}
user_data = file("${var.ubuntu1804_data_file}")
user_data = file("${module.common_variables.ubuntu1804_data_file}")
}

View File

@ -0,0 +1 @@
../common_variables/outputs.tf

View File

@ -4,7 +4,7 @@ variable "geoserver_basic" {
default = {
name = "geoserver"
description = "Geoserver instance"
flavor = "c1.medium"
flavor = "m1.medium"
}
}

View File

@ -0,0 +1,15 @@
variable "liferay_data" {
type = map(string)
default = {
affinity_policy = "soft-anti-affinity"
srv_name = "lr62"
vm_count = 1
vm_flavor = "m1.large"
boot_vol_size = 30
}
}
variable "liferay_ip_addrs" {
type = list(string)
default = []
}

View File

@ -0,0 +1,59 @@
#
# Liferay nodes
#
#
# Security group
#
resource "openstack_networking_secgroup_v2" "liferay_cluster_traffic" {
name = "liferay_cluster_traffic"
delete_default_rules = "true"
description = "Traffic between the Liferay cluster nodes"
}
resource "openstack_networking_secgroup_rule_v2" "traffic_between_liferay_nodes" {
count = var.liferay_data.vm_count
security_group_id = openstack_networking_secgroup_v2.liferay_cluster_traffic.id
description = "Traffic between liferay nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = join("/", [element(var.liferay_ip_addrs.*, count.index), "32"])
}
#
# Server group
#
resource "openstack_compute_servergroup_v2" "liferay" {
name = "liferay"
policies = [var.liferay_data.affinity_policy]
}
# Instance(s)
resource "openstack_compute_instance_v2" "liferay" {
count = var.liferay_data.vm_count
name = format("%s-%02d", var.liferay_data.srv_name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.liferay_data.vm_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.liferay_cluster_traffic.name, "traffic_from_the_main_load_balancers", "restricted_web_service"]
scheduler_hints {
group = openstack_compute_servergroup_v2.liferay.id
}
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = var.liferay_data.boot_vol_size
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.liferay_ip_addrs.*[count.index]
}
network {
name = var.shared_postgresql_server_data.network_name
}
user_data = file("${var.ubuntu1804_data_file}")
}

View File

@ -0,0 +1 @@
../common_variables/outputs.tf

View File

@ -0,0 +1,11 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}

View File

@ -0,0 +1 @@
../common_variables/variables.tf

View File

@ -1,153 +1,4 @@
# Define required providers
# terraform {
# required_version = ">= 0.14.0"
# required_providers {
# openstack = {
# source = "terraform-provider-openstack/openstack"
# version = "~> 1.53.0"
# }
# }
# }
#
# module "common_variables" {
# source = "../../modules/common_variables"
# }
#
# Server group
#
# resource "openstack_compute_servergroup_v2" "orientdb_cluster" {
# name = "orientdb_cluster"
# policies = ["soft-anti-affinity"]
# }
# #
# # Network for the cluster traffic
# #
# resource "openstack_networking_network_v2" "orientdb_network" {
# name = var.orientdb_net.network_name
# admin_state_up = "true"
# external = "false"
# description = var.orientdb_net.network_description
# mtu = module.common_variables.mtu_size_value
# port_security_enabled = true
# shared = false
# region = module.common_variables.main_region_name
# }
# # Subnet
# resource "openstack_networking_subnet_v2" "orientdb_subnet" {
# name = "orientdb-subnet"
# description = "Subnet used by the OrientDB service"
# network_id = openstack_networking_network_v2.orientdb_network.id
# cidr = var.orientdb_net.network_cidr
# dns_nameservers = module.common_variables.resolvers_ip
# ip_version = 4
# enable_dhcp = true
# no_gateway = true
# allocation_pool {
# start = var.orientdb_net.allocation_pool_start
# end = var.orientdb_net.allocation_pool_end
# }
# }
# #
# # Security groups
# #
# # Between OrientDB nodes
# resource "openstack_networking_secgroup_v2" "orientdb_internal_traffic" {
# name = "orientdb_internal_docker_traffic"
# delete_default_rules = "true"
# description = "Traffic between the OrientDB nodes"
# }
# resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
# count = var.orientdb_nodes_count
# security_group_id = openstack_networking_secgroup_v2.orientdb_internal_traffic.id
# description = "UDP traffic between OrientDB nodes"
# direction = "ingress"
# ethertype = "IPv4"
# protocol = "udp"
# remote_ip_prefix = var.orientdb_ip.*[count.index]/32
# }
# resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
# count = var.orientdb_nodes_count
# security_group_id = openstack_networking_secgroup_v2.orientdb_internal_traffic.id
# description = "TCP traffic between OrientDB nodes"
# direction = "ingress"
# ethertype = "IPv4"
# protocol = "tcp"
# remote_ip_prefix = var.orientdb_ip.*[count.index]/32
# }
# resource "openstack_networking_secgroup_v2" "access_to_orientdb" {
# name = "access_to_orientdb"
# delete_default_rules = "true"
# description = "Clients that talk to the OrientDB service"
# }
# resource "openstack_networking_secgroup_rule_v2" "access_to_orient_udp" {
# security_group_id = openstack_networking_secgroup_v2.access_to_orientdb.id
# description = "UDP traffic"
# direction = "ingress"
# ethertype = "IPv4"
# protocol = "udp"
# remote_ip_prefix = openstack_networking_subnet_v2.orientdb_subnet.cidr
# }
# resource "openstack_networking_secgroup_rule_v2" "access_to_orient_tcp" {
# security_group_id = openstack_networking_secgroup_v2.access_to_orientdb.id
# description = "TCP traffic"
# direction = "ingress"
# ethertype = "IPv4"
# protocol = "tcp"
# remote_ip_prefix = openstack_networking_subnet_v2.orientdb_subnet.cidr
# }
# #
# # OrientDB
# #
# # Instance
# resource "openstack_compute_instance_v2" "orientdb_servers" {
# count = local.orientdb_nodes_count
# name = format("%s-%02d", var.orientdb_data.node_name, count.index+1)
# availability_zone_hints = module.common_variables.availability_zone_no_gpu_name
# flavor_name = var.orientdb_data.node_flavor
# key_pair = module.common_variables.ssh_key_file_config
# security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.orientdb_internal_traffic.name]
# scheduler_hints {
# group = openstack_compute_servergroup_v2.orientdb_cluster.id
# }
# block_device {
# uuid = module.ubuntu2204.uuid
# source_type = "image"
# volume_size = 10
# boot_index = 0
# destination_type = "volume"
# delete_on_termination = false
# }
# block_device {
# source_type = "blank"
# volume_size = var.orientdb_data.node_data_disk_size
# boot_index = -1
# destination_type = "volume"
# delete_on_termination = false
# }
# network {
# name = var.main_private_network.name
# }
# network {
# name = var.orientdb_net.network_name
# fixed_ip_v4 = var.orientdb_ip.*[count.index]
# }
# user_data = "${file("${module.common_variables.ubuntu2204_datafile}")}"
# depends_on = [ openstack_networking_subnet_v2.orientdb_subnet ]
# }
# locals {
# orientdb_nodes_count = 3
# }
#
# Not using modules here
# OrientDB and OrientDB for the smart executors
#
resource "openstack_compute_servergroup_v2" "orientdb_cluster" {
name = "orientdb_cluster"
@ -256,8 +107,8 @@ resource "openstack_networking_secgroup_rule_v2" "access_to_orient_from_haproxy"
for_each = toset( [var.basic_services_ip.haproxy_l7_1_cidr, var.basic_services_ip.haproxy_l7_2_cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb.id
description = "TCP traffic from the load balancers"
port_range_min = 2424
port_range_max = 2424
port_range_min = 2480
port_range_max = 2480
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
@ -269,12 +120,12 @@ resource "openstack_networking_secgroup_rule_v2" "access_to_orient_from_haproxy"
resource "openstack_networking_secgroup_v2" "access_to_orientdb_se" {
name = "access_to_orientdb_se"
delete_default_rules = "true"
description = "Clients that talk to the OrientDB service"
description = "Clients that talk to the OrientDB SE service"
}
resource "openstack_networking_secgroup_rule_v2" "access_to_orient_se_from_clients" {
for_each = toset([var.basic_services_ip.ssh_jump_cidr, openstack_networking_subnet_v2.orientdb_se_subnet.cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb_se.id
description = "TCP traffic from the resource registries and the SSH jump server"
description = "TCP traffic from the smart executors and the SSH jump server"
port_range_min = 2424
port_range_max = 2490
direction = "ingress"
@ -286,8 +137,8 @@ resource "openstack_networking_secgroup_rule_v2" "access_to_orient_se_from_hapro
for_each = toset( [var.basic_services_ip.haproxy_l7_1_cidr, var.basic_services_ip.haproxy_l7_2_cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb_se.id
description = "TCP traffic from the load balancers"
port_range_min = 2424
port_range_max = 2424
port_range_min = 2480
port_range_max = 2480
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
@ -299,11 +150,11 @@ resource "openstack_networking_secgroup_rule_v2" "access_to_orient_se_from_hapro
#
# Instances used by the resource registry
resource "openstack_compute_instance_v2" "orientdb_servers" {
count = local.orientdb_nodes_count
count = var.orientdb_nodes_count
name = format("%s-%02d", var.orientdb_data.node_name, count.index+1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.orientdb_node_flavor
key_pair = var.ssh_key_file.name
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.orientdb_internal_traffic.name,openstack_networking_secgroup_v2.access_to_orientdb.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.orientdb_cluster.id
@ -342,7 +193,7 @@ resource "openstack_compute_instance_v2" "orientdb_se_server" {
name = "orientdb-se"
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.orientdb_se_node_flavor
key_pair = var.ssh_key_file.name
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name,openstack_networking_secgroup_v2.access_to_orientdb_se.name]
block_device {
uuid = var.ubuntu_2204.uuid
@ -373,7 +224,3 @@ resource "openstack_compute_instance_v2" "orientdb_se_server" {
depends_on = [ openstack_networking_subnet_v2.orientdb_se_subnet ]
}
locals {
orientdb_nodes_count = var.orientdb_nodes_count
}

View File

@ -0,0 +1 @@
../common_variables/outputs.tf

View File

@ -0,0 +1,11 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}

View File

@ -35,3 +35,34 @@ variable "orientdb_se_ip" {
variable "orientdb_se_cidr" {
default = ""
}
variable "orientdb_net" {
type = map(string)
default = {
network_name = "orientdb-net"
network_description = "Network used by the OrientDB cluster and to access the service"
network_cidr = "192.168.10.0/24"
allocation_pool_start = "192.168.10.11"
allocation_pool_end = "192.168.10.254"
}
}
variable "orientdb_se_net" {
type = map(string)
default = {
network_name = "orientdb-se-net"
network_description = "Network used by the OrientDB for Smart Executor"
network_cidr = "192.168.12.0/24"
allocation_pool_start = "192.168.12.11"
allocation_pool_end = "192.168.12.254"
}
}
variable "orientdb_se_secgroup" {
default = "access_to_orientdb_se"
}
variable "postgresql_secgroup" {
default = "PostgreSQL service"
}

View File

@ -0,0 +1 @@
../common_variables/variables.tf

Some files were not shown because too many files have changed in this diff Show More