Pre production setup.
This commit is contained in:
parent
da199a5be2
commit
58225078f7
|
@ -1,3 +1,4 @@
|
|||
#
|
||||
# This is the security group that should be added to every instance
|
||||
resource "openstack_networking_secgroup_v2" "default" {
|
||||
name = "default_for_all"
|
||||
|
@ -42,7 +43,8 @@ resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
|
|||
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
|
||||
}
|
||||
|
||||
# SSH access to the jump proxy
|
||||
#
|
||||
# SSH access to the jump proxy. Used by the jump proxy VM only
|
||||
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
|
||||
name = "ssh_access_to_the_jump_node"
|
||||
delete_default_rules = "true"
|
||||
|
@ -156,6 +158,7 @@ resource "openstack_networking_secgroup_rule_v2" "shell_443" {
|
|||
}
|
||||
|
||||
# Traffic from the main HAPROXY load balancers
|
||||
# Use on the web services that are exposed through the main HAPROXY
|
||||
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
|
||||
name = "traffic_from_the_main_load_balancers"
|
||||
delete_default_rules = "true"
|
||||
|
@ -250,6 +253,7 @@ resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
|
|||
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||
}
|
||||
|
||||
# Security group that exposes web services directly. A floating IP is required.
|
||||
resource "openstack_networking_secgroup_v2" "public_web" {
|
||||
name = "public_web_service"
|
||||
delete_default_rules = "true"
|
||||
|
@ -278,6 +282,7 @@ resource "openstack_networking_secgroup_rule_v2" "public_https" {
|
|||
remote_ip_prefix = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
|
||||
resource "openstack_networking_secgroup_v2" "restricted_web" {
|
||||
name = "restricted_web_service"
|
||||
delete_default_rules = "true"
|
||||
|
@ -393,3 +398,20 @@ resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
|
|||
port_range_max = 443
|
||||
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
|
||||
name = "prometheus_access_from_grafana"
|
||||
delete_default_rules = "true"
|
||||
description = "The public grafana server must be able to get data from Prometheus"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
|
||||
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
|
||||
description = "Allow HTTPS from grafana.d4science.org"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = var.prometheus_server_data.public_grafana_server_cidr
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
resource "openstack_compute_keypair_v2" "initial_ssh_key" {
|
||||
name = var.ssh_key_file.name
|
||||
public_key = "${file("${var.ssh_key_file.file}.pub")}"
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
# Main load balancer. L4, backed by Octavia
|
||||
resource "openstack_lb_loadbalancer_v2" "main_lb" {
|
||||
vip_network_id = var.main_private_network.id
|
||||
name = var.octavia_information.main_lb_name
|
||||
description = var.octavia_information.main_lb_description
|
||||
flavor_id = var.octavia_information.octavia_flavor_id
|
||||
vip_address = var.basic_services_ip.octavia_main
|
||||
loadbalancer_provider = "amphora"
|
||||
}
|
||||
|
||||
# Allocate a floating IP
|
||||
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
|
||||
pool = var.floating_ip_pools.main_public_ip_pool
|
||||
# The DNS association does not work because of a bug in the OpenStack API
|
||||
# dns_name = "main-lb"
|
||||
# dns_domain = var.dns_zone.zone_name
|
||||
description = var.octavia_information.main_lb_description
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
|
||||
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
|
||||
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
|
||||
}
|
||||
|
||||
locals {
|
||||
recordset_name = "${var.octavia_information.main_lb_hostname}.${var.dns_zone.zone_name}"
|
||||
}
|
||||
|
||||
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
|
||||
zone_id = var.dns_zone.id
|
||||
name = local.recordset_name
|
||||
description = "Public IP address of the main load balancer"
|
||||
ttl = 8600
|
||||
type = "A"
|
||||
records = [openstack_networking_floatingip_v2.main_lb_ip.address]
|
||||
}
|
||||
|
||||
# Main HAPROXY stats listener
|
||||
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
|
||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||
protocol = "TCP"
|
||||
protocol_port = 8880
|
||||
description = "Listener for the stats of the main HAPROXY instances"
|
||||
name = "main_haproxy_stats_listener"
|
||||
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr,var.ssh_sources.d4s_vpn_2_cidr,var.ssh_sources.s2i2s_vpn_1_cidr,var.ssh_sources.s2i2s_vpn_2_cidr]
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
|
||||
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
|
||||
protocol = "TCP"
|
||||
lb_method = "LEAST_CONNECTIONS"
|
||||
name = "main-haproxy-lb-stats"
|
||||
description = "Pool for the stats of the main HAPROXY instances"
|
||||
persistence {
|
||||
type = "SOURCE_IP"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
||||
member {
|
||||
name = "haproxy l7 1"
|
||||
address = var.basic_services_ip.haproxy_l7_1
|
||||
protocol_port = 8880
|
||||
}
|
||||
member {
|
||||
name = "haproxy l7 2"
|
||||
address = var.basic_services_ip.haproxy_l7_2
|
||||
protocol_port = 8880
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
|
||||
name = "main_haproxy_stats_monitor"
|
||||
type = "TCP"
|
||||
delay = 20
|
||||
timeout = 5
|
||||
max_retries = 3
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
# Main HAPROXY HTTP
|
||||
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
|
||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||
protocol = "TCP"
|
||||
protocol_port = 80
|
||||
description = "HTTP listener of the main HAPROXY instances"
|
||||
name = "main_haproxy_http_listener"
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
|
||||
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
|
||||
protocol = "PROXYV2"
|
||||
lb_method = "LEAST_CONNECTIONS"
|
||||
name = "main-haproxy-lb-http"
|
||||
description = "Pool for the HTTP listener of the main HAPROXY instances"
|
||||
persistence {
|
||||
type = "SOURCE_IP"
|
||||
}
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
||||
member {
|
||||
name = "haproxy l7 1"
|
||||
address = var.basic_services_ip.haproxy_l7_1
|
||||
protocol_port = 80
|
||||
}
|
||||
member {
|
||||
name = "haproxy l7 2"
|
||||
address = var.basic_services_ip.haproxy_l7_2
|
||||
protocol_port = 80
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
|
||||
name = "main_haproxy_http_monitor"
|
||||
type = "HTTP"
|
||||
http_method = "GET"
|
||||
url_path = "/_haproxy_health_check"
|
||||
expected_codes = "200"
|
||||
delay = 20
|
||||
timeout = 5
|
||||
max_retries = 3
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
# Main HAPROXY HTTPS
|
||||
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
|
||||
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
|
||||
protocol = "TCP"
|
||||
protocol_port = 443
|
||||
description = "HTTPS listener of the main HAPROXY instances"
|
||||
name = "main_haproxy_https_listener"
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
|
||||
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
|
||||
protocol = "PROXYV2"
|
||||
lb_method = "LEAST_CONNECTIONS"
|
||||
name = "main-haproxy-lb-https"
|
||||
description = "Pool for the HTTPS listener of the main HAPROXY instances"
|
||||
persistence {
|
||||
type = "SOURCE_IP"
|
||||
}
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
||||
member {
|
||||
name = "haproxy l7 1"
|
||||
address = var.basic_services_ip.haproxy_l7_1
|
||||
protocol_port = 443
|
||||
}
|
||||
member {
|
||||
name = "haproxy l7 2"
|
||||
address = var.basic_services_ip.haproxy_l7_2
|
||||
protocol_port = 443
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
|
||||
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
|
||||
name = "main_haproxy_https_monitor"
|
||||
type = "HTTPS"
|
||||
http_method = "GET"
|
||||
url_path = "/_haproxy_health_check"
|
||||
expected_codes = "200"
|
||||
delay = 20
|
||||
timeout = 5
|
||||
max_retries = 3
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
output "main_loadbalancer_ip" {
|
||||
description = "Main Load balancer IP address"
|
||||
value = openstack_lb_loadbalancer_v2.main_lb.vip_address
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# VM used as jump proxy. A floating IP is required
|
||||
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
|
||||
name = var.ssh_jump_proxy.name
|
||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||
flavor_name = var.ssh_jump_proxy.flavor
|
||||
key_pair = openstack_compute_keypair_v2.initial_ssh_key.name
|
||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
|
||||
block_device {
|
||||
uuid = var.ubuntu_2204.uuid
|
||||
source_type = "image"
|
||||
volume_size = 30
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.main_private_network.name
|
||||
fixed_ip_v4 = var.basic_services_ip.ssh_jump
|
||||
}
|
||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
||||
}
|
||||
|
||||
# Floating IP and DNS record
|
||||
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
|
||||
pool = var.floating_ip_pools.main_public_ip_pool
|
||||
# The DNS association does not work because of a bug in the OpenStack API
|
||||
description = "SSH Proxy Jump Server"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
|
||||
floating_ip = openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address
|
||||
instance_id = openstack_compute_instance_v2.ssh_jump_proxy.id
|
||||
}
|
||||
|
||||
locals {
|
||||
ssh_recordset_name = "${var.ssh_jump_proxy.name}.${var.dns_zone.zone_name}"
|
||||
}
|
||||
|
||||
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
|
||||
zone_id = var.dns_zone.id
|
||||
name = local.ssh_recordset_name
|
||||
description = "Public IP address of the SSH Proxy Jump server"
|
||||
ttl = 8600
|
||||
type = "A"
|
||||
records = [openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address]
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
resource "openstack_compute_instance_v2" "internal_ca" {
|
||||
name = var.internal_ca_data.name
|
||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||
flavor_name = var.internal_ca_data.flavor
|
||||
key_pair = openstack_compute_keypair_v2.initial_ssh_key.name
|
||||
security_groups = [openstack_networking_secgroup_v2.default.name]
|
||||
block_device {
|
||||
uuid = var.ubuntu_2204.uuid
|
||||
source_type = "image"
|
||||
volume_size = 10
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.main_private_network.name
|
||||
fixed_ip_v4 = var.basic_services_ip.ca
|
||||
}
|
||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
# Promertheus server. A floating IP is required
|
||||
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
|
||||
name = var.prometheus_server_data.vol_data_name
|
||||
size = var.prometheus_server_data.vol_data_size
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "prometheus_server" {
|
||||
name = var.prometheus_server_data.name
|
||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||
flavor_name = var.prometheus_server_data.flavor
|
||||
key_pair = openstack_compute_keypair_v2.initial_ssh_key.name
|
||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.restricted_web.name,openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
|
||||
block_device {
|
||||
uuid = var.ubuntu_2204.uuid
|
||||
source_type = "image"
|
||||
volume_size = 10
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.main_private_network.name
|
||||
fixed_ip_v4 = var.basic_services_ip.prometheus
|
||||
}
|
||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
|
||||
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
||||
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
|
||||
device = var.prometheus_server_data.vol_data_device
|
||||
}
|
||||
|
||||
# Floating IP and DNS record
|
||||
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
|
||||
pool = var.floating_ip_pools.main_public_ip_pool
|
||||
# The DNS association does not work because of a bug in the OpenStack API
|
||||
description = "Prometheus server"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
|
||||
floating_ip = openstack_networking_floatingip_v2.prometheus_server_ip.address
|
||||
instance_id = openstack_compute_instance_v2.prometheus_server.id
|
||||
}
|
||||
|
||||
locals {
|
||||
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
|
||||
}
|
||||
|
||||
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
|
||||
zone_id = var.dns_zone.id
|
||||
name = local.prometheus_recordset_name
|
||||
description = "Public IP address of the Prometheus server"
|
||||
ttl = 8600
|
||||
type = "A"
|
||||
records = [openstack_networking_floatingip_v2.prometheus_server_ip.address]
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
# PostgreSQL shared server
|
||||
# Network
|
||||
resource "openstack_networking_network_v2" "shared_postgresql_net" {
|
||||
name = var.shared_postgresql_server_data.network_name
|
||||
admin_state_up = "true"
|
||||
external = "false"
|
||||
description = var.shared_postgresql_server_data.network_description
|
||||
dns_domain = var.dns_zone.zone_name
|
||||
mtu = var.mtu_size
|
||||
port_security_enabled = true
|
||||
shared = false
|
||||
region = var.main_region
|
||||
}
|
||||
|
||||
# Subnet
|
||||
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
|
||||
name = "shared-postgresql-subnet"
|
||||
description = "subnet used to connect to the shared PostgreSQL service"
|
||||
network_id = openstack_networking_network_v2.shared_postgresql_net.id
|
||||
cidr = var.shared_postgresql_server_data.network_cidr
|
||||
dns_nameservers = var.resolvers_ip
|
||||
ip_version = 4
|
||||
enable_dhcp = true
|
||||
no_gateway = true
|
||||
allocation_pool {
|
||||
start = var.shared_postgresql_server_data.allocation_pool_start
|
||||
end = var.shared_postgresql_server_data.allocation_pool_end
|
||||
}
|
||||
}
|
||||
|
||||
# Security group
|
||||
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
|
||||
name = "access_to_the_shared_postgresql_service"
|
||||
delete_default_rules = "true"
|
||||
description = "Access the shared PostgreSQL service using the dedicated network"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
|
||||
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
|
||||
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 5432
|
||||
port_range_max = 5432
|
||||
remote_ip_prefix = var.shared_postgresql_server_data.network_cidr
|
||||
}
|
||||
|
||||
# Block device
|
||||
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
|
||||
name = var.shared_postgresql_server_data.vol_data_name
|
||||
size = var.shared_postgresql_server_data.vol_data_size
|
||||
}
|
||||
|
||||
# Instance
|
||||
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
|
||||
name = var.shared_postgresql_server_data.name
|
||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||
flavor_name = var.shared_postgresql_server_data.flavor
|
||||
key_pair = openstack_compute_keypair_v2.initial_ssh_key.name
|
||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.shared_postgresql_access.name]
|
||||
block_device {
|
||||
uuid = var.ubuntu_2204.uuid
|
||||
source_type = "image"
|
||||
volume_size = 10
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.main_private_network.name
|
||||
}
|
||||
network {
|
||||
name = var.shared_postgresql_server_data.network_name
|
||||
fixed_ip_v4 = var.shared_postgresql_server_data.server_ip
|
||||
}
|
||||
|
||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
|
||||
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
|
||||
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
|
||||
device = var.shared_postgresql_server_data.vol_data_device
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
#
|
||||
# HAPROXY L7 behind the main Octavia balancer
|
||||
#
|
||||
# FIXME: terraform does not return the Octavia VRRP addresses, so we have to find them before creating the security group that allows the traffic between octavia and the haproxy instances
|
||||
#
|
||||
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
||||
# 5cc2354e-4465-4a1d-8390-c214e208c6de octavia-lb-vrrp-72392023-a774-4b58-a025-c1e99c5d152a fa:16:3e:62:24:2c [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.34.232'}] ACTIVE
|
||||
# 8aa4e97f-723d-4a2a-b79f-912fa7651653 octavia-lb-vrrp-fbfcf712-0ceb-4a38-82da-0c9ebef5dff3 fa:16:3e:79:62:a5 [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.33.229'}] ACTIVE
|
||||
#
|
||||
# Security group
|
||||
# VMs
|
||||
# Security group
|
||||
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
|
||||
name = "traffic_from_main_lb_to_haproxy_l7"
|
||||
delete_default_rules = "true"
|
||||
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Peer traffic from haproxy l7 1 to l7 2"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10000
|
||||
port_range_max = 10000
|
||||
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Peer traffic from haproxy l7 2 to l7 1"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10000
|
||||
port_range_max = 10000
|
||||
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_80" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 80"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_443" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 443"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_1_haproxy_l7_8080" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 1 port 8080"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 8080
|
||||
port_range_max = 8080
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_80" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 80"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_443" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 443"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb3_2_haproxy_l7_8080" {
|
||||
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
|
||||
description = "Traffic from the first main lb instance to HAPROXY l7 2 port 8080"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 8080
|
||||
port_range_max = 8080
|
||||
remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_2
|
||||
}
|
||||
|
||||
|
||||
# Instance
|
||||
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
|
||||
count = var.haproxy_l7_data.vm_count
|
||||
name = format("%s-%02d", var.haproxy_l7_data.name, count.index+1)
|
||||
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
|
||||
flavor_name = var.haproxy_l7_data.flavor
|
||||
key_pair = openstack_compute_keypair_v2.initial_ssh_key.name
|
||||
security_groups = [openstack_networking_secgroup_v2.default.name,openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
|
||||
block_device {
|
||||
uuid = var.ubuntu_2204.uuid
|
||||
source_type = "image"
|
||||
volume_size = 10
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.main_private_network.name
|
||||
fixed_ip_v4 = var.main_haproxy_l7_ip.*[count.index]
|
||||
}
|
||||
|
||||
user_data = "${file("${var.ubuntu2204_data_file}")}"
|
||||
}
|
|
@ -28,3 +28,12 @@ variable "mtu_size" {
|
|||
type = number
|
||||
default = 8942
|
||||
}
|
||||
|
||||
variable "availability_zones_names" {
|
||||
type = map(string)
|
||||
default = {
|
||||
availability_zone_no_gpu = "cnr-isti-nova-a"
|
||||
availability_zone_with_gpu = "cnr-isti-nova-gpu-a"
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
variable "ssh_key_file" {
|
||||
type = map(string)
|
||||
default = {
|
||||
file = "~/.ssh/id_ed25519"
|
||||
name = "adellam"
|
||||
}
|
||||
}
|
||||
|
||||
variable "ubuntu_1804" {
|
||||
type = map(string)
|
||||
|
||||
default = {
|
||||
name = "Ubuntu-Bionic-18.04"
|
||||
uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89"
|
||||
}
|
||||
}
|
||||
|
||||
variable "ubuntu_2204" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name = "Ubuntu-Jammy-22.04"
|
||||
uuid = "54768889-8556-4be4-a2eb-82a4d9b34627"
|
||||
}
|
||||
}
|
||||
|
||||
variable "centos_7" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name = "CentOS-7"
|
||||
uuid = "f0187a99-64f6-462a-ab5f-ef52fe62f2ca"
|
||||
}
|
||||
}
|
||||
|
||||
variable "almalinux_9" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name = "AlmaLinux-9.0-20220718"
|
||||
uuid = "541650fc-dd19-4f38-bb1d-7333ed9dd688"
|
||||
}
|
||||
}
|
||||
|
||||
variable "ubuntu1804_data_file" {
|
||||
default = "../../openstack_vm_data_scripts/ubuntu1804.sh"
|
||||
}
|
||||
|
||||
variable "ubuntu2204_data_file" {
|
||||
default = "../../openstack_vm_data_scripts/ubuntu2204.sh"
|
||||
}
|
||||
|
||||
variable "el7_data_file" {
|
||||
default = "../../openstack_vm_data_scripts/el7.sh"
|
||||
}
|
||||
|
||||
variable "ssh_jump_proxy" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name = "ssh-jump-proxy"
|
||||
flavor = "m2.small"
|
||||
}
|
||||
}
|
||||
|
||||
variable "internal_ca_data" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name ="ca"
|
||||
flavor = "m1.small"
|
||||
}
|
||||
}
|
||||
|
||||
variable "prometheus_server_data" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name ="prometheus"
|
||||
flavor = "m1.medium"
|
||||
vol_data_name = "prometheus-data"
|
||||
vol_data_size = "100"
|
||||
vol_data_device = "/dev/vdb"
|
||||
public_grafana_server_cidr = "146.48.122.132/32"
|
||||
}
|
||||
}
|
||||
|
||||
variable "shared_postgresql_server_data" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name ="shared-postgresql-server"
|
||||
flavor = "m1.medium"
|
||||
vol_data_name = "shared-postgresql-data"
|
||||
vol_data_size = "100"
|
||||
vol_data_device = "/dev/vdb"
|
||||
network_name = "postgresql-srv-net"
|
||||
network_description = "Network used to communicate with the shared postgresql service"
|
||||
network_cidr = "192.168.0.0/22"
|
||||
allocation_pool_start = "192.168.0.100"
|
||||
allocation_pool_end = "192.168.3.254"
|
||||
server_ip = "192.168.0.5"
|
||||
server_cidr = "192.168.0.5/22"
|
||||
}
|
||||
}
|
||||
|
||||
variable "haproxy_l7_data" {
|
||||
type = map(string)
|
||||
default = {
|
||||
name = "main-haproxy-l7"
|
||||
haproxy_1 = "haproxy-l7-1"
|
||||
haproxy_2 = "haproxy-l7-2"
|
||||
flavor = "m1.medium"
|
||||
vm_count = "2"
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa_cloud_key"
|
||||
}
|
||||
|
||||
variable "smartgears_legacy_os_image" {
|
||||
default = "Ubuntu-Bionic-18.04"
|
||||
}
|
||||
|
||||
variable "smartgears_4_os_image" {
|
||||
default = "Ubuntu-Jammy-22.04"
|
||||
}
|
||||
|
||||
variable "ubuntu-2204" {
|
||||
default = "Ubuntu-Jammy-22.04"
|
||||
}
|
||||
|
||||
variable "almalinux-9" {
|
||||
default = "AlmaLinux-9.0-20220718"
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/00-terraform-provider.tf
|
|
@ -4,7 +4,7 @@ provider "openstack" {
|
|||
cloud = "d4s-dev"
|
||||
}
|
||||
|
||||
variable "dns-zone" {
|
||||
variable "dns_zone" {
|
||||
type = string
|
||||
default = "cloud-dev.d4science.org."
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/01-external-network-and-resolvers.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/external-network-and-resolvers.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/terraform-provider.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/00-terraform-provider.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/01-external-network-and-resolvers.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/05-projects-and-users-vars.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/15-security-groups.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/16-ssh-keys.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/20-octavia.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/25-ssh-jump-proxy.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/30-internal-ca.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/35-prometheus.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/40-postgresql.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_setups/45-haproxy.tf
|
|
@ -1,13 +1,11 @@
|
|||
# Main services
|
||||
|
||||
* Load balancer as a service (openstack), L4. Three instances of it:
|
||||
* Load balancer as a service (openstack), L4.
|
||||
|
||||
> * Main HAPROXY load balancer
|
||||
> * HAPROXY used as ingress of the swarm cluster
|
||||
|
||||
* Two VMs as HAPROXY L7 instances for the main services. The dataminers will be also served by this load balancer.
|
||||
* A shell server, with floating IP address, that will be used as a proxy to reach all the other VMs.
|
||||
* A internal CA service.
|
||||
* A Prometheus instance.
|
||||
* A Docker Swarm cluster, with a NFS server in a dedicated network
|
||||
* A PostgreSQL server instance, with a dedicated network
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/docker-swarm.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/external-network-and-resolvers.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/haproxy.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/internal-ca.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/octavia.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/postgresql.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/projects-and-users-vars.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/prometheus.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/security-groups.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_setups/ssh-jump-proxy.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/terraform-provider.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/00-terraform-provider.tf
|
|
@ -0,0 +1 @@
|
|||
../../common_variables/01-external-network-and-resolvers.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/external-network-and-resolvers.tf
|
|
@ -1 +0,0 @@
|
|||
../../common_variables/terraform-provider.tf
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Configure the OpenStack Provider
|
||||
provider "openstack" {
|
||||
cloud = "d4s-pre"
|
||||
|
@ -11,6 +10,7 @@ variable "dns_zone" {
|
|||
email = "postmaster@isti.cnr.it"
|
||||
description = "DNS primary zone for the d4s-pre-cloud project"
|
||||
ttl = 8600
|
||||
id = "c1a4b4bc-f167-4387-855d-38f0f99ca05c"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ variable "main_private_network" {
|
|||
default = {
|
||||
name = "d4s-pre-cloud-main"
|
||||
description = "D4Science Preprod private network (use this as the main network)"
|
||||
id = "23fd8a99-d551-4ada-8d3a-9859542ebb8c"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,19 +47,26 @@ variable "external_router" {
|
|||
variable "basic_services_ip" {
|
||||
type = map(string)
|
||||
default = {
|
||||
haproxy_l7_1 = "10.1.32.11"
|
||||
haproxy_l7_1_cidr = "10.1.32.11/32"
|
||||
haproxy_l7_2 = "10.1.32.12"
|
||||
haproxy_l7_2_cidr = "10.1.32.12/32"
|
||||
ca = "10.1.32.4"
|
||||
ca_cidr = "10.1.32.4/32"
|
||||
ssh_jump = "10.1.32.5"
|
||||
ssh_jump_cidr = "10.1.32.5/32"
|
||||
prometheus = "10.1.32.10"
|
||||
prometheus_cidr = "10.1.32.10/32"
|
||||
ca = "10.1.32.4"
|
||||
ca_cidr = "10.1.32.4/32"
|
||||
haproxy_l7_1 = "10.1.32.11"
|
||||
haproxy_l7_1_cidr = "10.1.32.11/32"
|
||||
haproxy_l7_2 = "10.1.32.12"
|
||||
haproxy_l7_2_cidr = "10.1.32.12/32"
|
||||
octavia_main = "10.1.32.20"
|
||||
octavia_main_cidr = "10.1.32.20/32"
|
||||
}
|
||||
}
|
||||
|
||||
variable "main_haproxy_l7_ip" {
|
||||
type = list(string)
|
||||
default = ["10.1.32.11", "10.1.32.12"]
|
||||
|
||||
}
|
||||
variable "ssh_sources" {
|
||||
type = map(string)
|
||||
default = {
|
||||
|
@ -70,3 +78,20 @@ variable "ssh_sources" {
|
|||
infrascience_net_cidr = "146.48.122.0/23"
|
||||
}
|
||||
}
|
||||
|
||||
variable "octavia_information" {
|
||||
type = map(string)
|
||||
default = {
|
||||
main_lb_name = "d4s-pre-cloud-l4-load-balancer"
|
||||
main_lb_description = "Main L4 load balancer for the D4Science preproduction"
|
||||
swarm_lb_name = "d4s-pre-cloud-l4-swarm-load-balancer"
|
||||
octavia_flavor = "octavia_amphora-mvcpu-ha"
|
||||
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
|
||||
main_lb_hostname = "main-lb"
|
||||
# The following aren't available when the module runs so we have to get them with the command
|
||||
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
|
||||
# This means that the execution will fail
|
||||
octavia_vrrp_ip_1 = "10.1.34.232"
|
||||
octavia_vrrp_ip_2 = "10.1.33.229"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
yum -y install python policycoreutils-python
|
||||
|
||||
/sbin/useradd --system --home-dir /srv/ansible -m --shell /bin/bash -c "Used for the Ansible provisioning tasks" ansible
|
||||
|
||||
# SSH keys of users authorized to execute ansible playbooks.
|
||||
# The ones in the example belong to Andrea Dell'Amico and Tommaso Piccioli.
|
||||
# Feel free to add yours if you are entitled to run the ansible provisioning on that server
|
||||
|
||||
mkdir /srv/ansible/.ssh
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom" > /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente" >> /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvkwppFE+K5MjKqtkGJN63wkcwaqZG4HkgPqMSWrXmCfDPJ3FxjDHV9aQRJYVKZObc9+SsFc9IYXwB2A8FI0XwPkCH2hfFKDVNO4TktO/SrM+4tXbEfEDWX/PduBQLootYaMEVj++p2+s/mxVnxTAMzsR4txC9tkWR4JO4VJ2cpZfM8po4p1wA4YteW6Oiv0PqUEsLtPtBHGuCgovo8WS+qxcxpeBBnewEssgis2dzDSqx5HUmaOETAxxEHflapHWQLum0JjvXsG5jlf9jL44XJPkcHXAYk3gnhtyM0moJpUya+GX7+ttfWWvwxs0tYNDXNMRn91r1hMLWmas4D+T/Q== rcirillo@rcirillo-cnr" >> /srv/ansible/.ssh/authorized_keys
|
||||
/bin/chown -R ansible:ansible /srv/ansible
|
||||
/bin/chmod 700 /srv/ansible/.ssh
|
||||
mkdir -p /etc/sudoers.d
|
||||
echo "ansible ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/ansible-user
|
||||
/bin/chmod 600 /etc/sudoers.d/ansible-user
|
||||
|
||||
semanage fcontext -a -e /home /srv/ansible ; restorecon -vR /srv/ansible
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
apt-get update -y
|
||||
# Not needed anymore on Ubuntu 20.04+
|
||||
apt-get install -y python
|
||||
|
||||
/usr/sbin/adduser --system --home /srv/ansible --shell /bin/bash --gecos "Used for the Ansible provisioning tasks" --group ansible
|
||||
|
||||
# SSH keys of users authorized to execute ansible playbooks.
|
||||
# The ones in the example belong to Andrea Dell'Amico and Tommaso Piccioli.
|
||||
# Feel free to add yours if you are entitled to run the ansible provisioning on that server
|
||||
|
||||
mkdir /srv/ansible/.ssh
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom" > /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente" >> /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvkwppFE+K5MjKqtkGJN63wkcwaqZG4HkgPqMSWrXmCfDPJ3FxjDHV9aQRJYVKZObc9+SsFc9IYXwB2A8FI0XwPkCH2hfFKDVNO4TktO/SrM+4tXbEfEDWX/PduBQLootYaMEVj++p2+s/mxVnxTAMzsR4txC9tkWR4JO4VJ2cpZfM8po4p1wA4YteW6Oiv0PqUEsLtPtBHGuCgovo8WS+qxcxpeBBnewEssgis2dzDSqx5HUmaOETAxxEHflapHWQLum0JjvXsG5jlf9jL44XJPkcHXAYk3gnhtyM0moJpUya+GX7+ttfWWvwxs0tYNDXNMRn91r1hMLWmas4D+T/Q== rcirillo@rcirillo-cnr" >> /srv/ansible/.ssh/authorized_keys
|
||||
/bin/chown -R ansible:ansible /srv/ansible
|
||||
/bin/chmod 700 /srv/ansible/.ssh
|
||||
mkdir -p /etc/sudoers.d
|
||||
echo "ansible ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/ansible-user
|
||||
/bin/chmod 600 /etc/sudoers.d/ansible-user
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
/usr/sbin/adduser --system --home /srv/ansible --shell /bin/bash --gecos "Used for the Ansible provisioning tasks" --group ansible
|
||||
|
||||
# SSH keys of users authorized to execute ansible playbooks.
|
||||
# The ones in the example belong to Andrea Dell'Amico and Tommaso Piccioli.
|
||||
# Feel free to add yours if you are entitled to run the ansible provisioning on that server
|
||||
|
||||
mkdir /srv/ansible/.ssh
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom" > /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente" >> /srv/ansible/.ssh/authorized_keys
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvkwppFE+K5MjKqtkGJN63wkcwaqZG4HkgPqMSWrXmCfDPJ3FxjDHV9aQRJYVKZObc9+SsFc9IYXwB2A8FI0XwPkCH2hfFKDVNO4TktO/SrM+4tXbEfEDWX/PduBQLootYaMEVj++p2+s/mxVnxTAMzsR4txC9tkWR4JO4VJ2cpZfM8po4p1wA4YteW6Oiv0PqUEsLtPtBHGuCgovo8WS+qxcxpeBBnewEssgis2dzDSqx5HUmaOETAxxEHflapHWQLum0JjvXsG5jlf9jL44XJPkcHXAYk3gnhtyM0moJpUya+GX7+ttfWWvwxs0tYNDXNMRn91r1hMLWmas4D+T/Q== rcirillo@rcirillo-cnr" >> /srv/ansible/.ssh/authorized_keys
|
||||
/bin/chown -R ansible:ansible /srv/ansible
|
||||
/bin/chmod 700 /srv/ansible/.ssh
|
||||
mkdir -p /etc/sudoers.d
|
||||
echo "ansible ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/ansible-user
|
||||
/bin/chmod 600 /etc/sudoers.d/ansible-user
|
||||
|
Loading…
Reference in New Issue