infrastructure-as-code/openstack-tf/d4s-preprod/variables/preprod.auto.tfvars

94 lines
2.9 KiB
HCL

default_security_group_name = "default_for_all"
# Provided in the output of the project setup
main_private_network_id = "23fd8a99-d551-4ada-8d3a-9859542ebb8c"
main_private_subnet_id = "cd77a2fd-4a36-4254-b1d0-70b3874c6d04"
dns_zone_id = "c1a4b4bc-f167-4387-855d-38f0f99ca05c"
octavia_information = {
main_lb_name = "d4s-pre-cloud-l4-load-balancer"
main_lb_description = "Main L4 load balancer for the D4Science PRE production"
swarm_lb_name = "d4s-pre-cloud-l4-swarm-load-balancer"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
main_lb_hostname = "main-lb"
# The following aren't available when the module runs so we have to get them with the command
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# This means that the execution will fail
octavia_vrrp_ip_1 = "10.1.34.232/32"
octavia_vrrp_ip_2 = "10.1.33.229/32"
}
os_project_data = {
id = "6fdc02e2827b405dad99f34698659742"
}
dns_zone = {
zone_name = "cloud-pre.d4science.org."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the d4s-pre-cloud project"
ttl = 8600
id = "c1a4b4bc-f167-4387-855d-38f0f99ca05c"
}
main_private_network = {
name = "d4s-pre-cloud-main"
description = "D4Science Preprod private network (use this as the main network)"
}
main_private_subnet = {
name = "d4s-pre-cloud-main-subnet"
description = "D4Science Preprod main private subnet"
cidr = "10.1.32.0/22"
gateway_ip = "10.1.32.1"
allocation_start = "10.1.32.100"
allocation_end = "10.1.35.254"
}
external_router = {
name = "d4s-pre-cloud-external-router"
description = "D4Science Preprod main router"
id = "cc26064a-bb08-4c0b-929f-d0cb39f934a3"
}
basic_services_ip = {
ca = "10.1.32.4"
ca_cidr = "10.1.32.4/32"
ssh_jump = "10.1.32.5"
ssh_jump_cidr = "10.1.32.5/32"
prometheus = "10.1.32.10"
prometheus_cidr = "10.1.32.10/32"
haproxy_l7_1 = "10.1.32.11"
haproxy_l7_1_cidr = "10.1.32.11/32"
haproxy_l7_2 = "10.1.32.12"
haproxy_l7_2_cidr = "10.1.32.12/32"
octavia_main = "10.1.32.20"
octavia_main_cidr = "10.1.32.20/32"
}
main_haproxy_l7_ip = ["10.1.32.11", "10.1.32.12"]
# docker_swarm_data = {
# mgr_name = "swarm-mgr"
# mgr1_ip = "10.1.32.31"
# mgr1_cidr = "10.1.32.31/32"
# mgr2_ip = "10.1.32.32"
# mgr2_cidr = "10.1.32.32/32"
# mgr3_ip = "10.1.32.33"
# mgr3_cidr = "10.1.32.33/32"
# mgr_count = 3
# mgr_flavor = "m1.large"
# mgr_data_disk_size = 100
# worker_name = "swarm-worker"
# worker_count = 3
# worker_flavor = "m1.large"
# worker_data_disk_size = 200
# nfs_server_name = "swarm-nfs-server"
# nfs_server_flavor = "m1.medium"
# nfs_server_data_disk_name = "Swarm NFS server data Disk"
# nfs_server_data_disk_size = 100
# nfs_server_data_disk_device = "/dev/vdb"
# }
# swarm_managers_ip = ["10.1.32.31", "10.1.32.32", "10.1.32.33"]