2023-11-16 18:55:24 +01:00
|
|
|
default_security_group_name = "default_for_all"
|
|
|
|
|
2023-11-06 13:06:47 +01:00
|
|
|
shared_postgresql_server_data = {
|
|
|
|
name ="shared-postgresql-server"
|
|
|
|
flavor = "m1.large"
|
|
|
|
vol_data_name = "shared-postgresql-data"
|
|
|
|
vol_data_size = "300"
|
|
|
|
vol_data_device = "/dev/vdb"
|
|
|
|
vol_backup_name = "shared-postgresql-backup-data"
|
|
|
|
vol_backup_size = "100"
|
|
|
|
vol_backup_device = "/dev/vdc"
|
|
|
|
network_name = "postgresql-srv-net"
|
|
|
|
network_description = "Network used to communicate with the shared postgresql service"
|
|
|
|
network_cidr = "192.168.0.0/22"
|
|
|
|
allocation_pool_start = "192.168.0.100"
|
|
|
|
allocation_pool_end = "192.168.3.254"
|
|
|
|
server_ip = "192.168.0.5"
|
|
|
|
server_cidr = "192.168.0.5/22"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Provided in the output of the project setup
|
|
|
|
main_private_network_id = "020df98d-ae72-452a-b376-3b6dc289acac"
|
2023-11-06 16:35:41 +01:00
|
|
|
main_private_subnet_id = "5d7b83ad-e058-4a3a-bfd8-d20ba6d42e1a"
|
2023-11-06 13:06:47 +01:00
|
|
|
dns_zone_id = "74135b34-1a9c-4c01-8cf0-22450a5660c4"
|
2023-11-06 16:35:41 +01:00
|
|
|
|
|
|
|
octavia_information = {
|
|
|
|
main_lb_name = "d4s-production-cloud-l4-load-balancer"
|
|
|
|
main_lb_description = "Main L4 load balancer for the D4Science production"
|
|
|
|
swarm_lb_name = "d4s-production-cloud-l4-swarm-load-balancer"
|
|
|
|
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
|
|
|
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
|
|
|
main_lb_hostname = "main-lb"
|
|
|
|
# The following aren't available when the module runs so we have to get them with the command
|
|
|
|
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
|
|
|
# This means that the execution will fail
|
|
|
|
octavia_vrrp_ip_1 = "10.1.42.119/32"
|
|
|
|
octavia_vrrp_ip_2 = "10.1.42.188/32"
|
|
|
|
}
|
2023-11-14 18:52:30 +01:00
|
|
|
|
|
|
|
docker_swarm_data = {
|
|
|
|
mgr_name = "swarm-mgr"
|
|
|
|
mgr1_ip = "10.1.40.31"
|
|
|
|
mgr1_cidr = "10.1.40.31/32"
|
|
|
|
mgr2_ip = "10.1.40.32"
|
|
|
|
mgr2_cidr = "10.1.40.32/32"
|
|
|
|
mgr3_ip = "10.1.40.33"
|
|
|
|
mgr3_cidr = "10.1.40.33/32"
|
|
|
|
mgr_count = 3
|
|
|
|
mgr_flavor = "m1.large"
|
|
|
|
mgr_data_disk_size = 100
|
|
|
|
worker_name = "swarm-worker"
|
|
|
|
worker_count = 8
|
|
|
|
worker_flavor = "m1.xxl"
|
|
|
|
worker_data_disk_size = 200
|
|
|
|
nfs_server_name = "swarm-nfs-server"
|
|
|
|
nfs_server_flavor = "m1.medium"
|
|
|
|
nfs_server_data_disk_name = "Swarm NFS server data Disk"
|
|
|
|
nfs_server_data_disk_size = 200
|
|
|
|
nfs_server_data_disk_device = "/dev/vdb"
|
|
|
|
}
|
|
|
|
|
|
|
|
swarm_managers_ip = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
|