Fix AWS SG issues, add all server addresses to TLS SAN (#108)

* fix SG issues, add all server addresses to tls san

* add specific distro + rancher port rules

* add specific rancher-related ports to private SG
This commit is contained in:
Iramis Valentin 2025-10-03 17:40:01 -04:00 committed by GitHub
parent 790bd050cc
commit ea95c35c53
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 187 additions and 13 deletions

View File

@ -22,6 +22,18 @@ locals {
secondary_private_subnet_id = (local.create_vpc && var.secondary_availability_zone != null) ? aws_subnet.secondary_private[0].id : (!local.create_vpc && var.secondary_availability_zone != null) ? data.aws_subnet.secondary_private[0].id : null
create_vpc = var.existing_vpc_name == null
myip = "${chomp(data.http.myip.response_body)}/32"
}
data "http" "myip" {
url = "https://ipv4.icanhazip.com"
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
resource "aws_internet_gateway" "main" {
@ -192,8 +204,8 @@ resource "aws_security_group" "ssh_ipv4" {
resource "aws_vpc_security_group_ingress_rule" "prefix_ipv4" {
count = var.ssh_prefix_list != null ? 1 : 0
description = "SSH access for Approved Prefix List Public IPv4s"
ip_protocol = "-1"
description = "Full access for Approved Prefix List Public IPv4s"
ip_protocol = "-1" # semantically equivalent to all ports
prefix_list_id = data.aws_ec2_managed_prefix_list.this[0].id
security_group_id = aws_security_group.ssh_ipv4.id
}
@ -209,7 +221,7 @@ resource "aws_vpc_security_group_ingress_rule" "vpc_ssh" {
resource "aws_vpc_security_group_ingress_rule" "vpc_ssh_cidrs" {
for_each = toset([
"3.0.0.0/8", "52.0.0.0/8", "13.0.0.0/8", "18.0.0.0/8",
"3.0.0.0/8", "52.0.0.0/8", "13.0.0.0/8", "18.0.0.0/8", "54.0.0.0/8", local.myip
])
description = "SSH from Approved CIDR range (${each.value})"
from_port = 22
@ -228,6 +240,79 @@ resource "aws_vpc_security_group_ingress_rule" "public_https" {
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_rancher_webhook" {
description = "Allow all traffic to Rancher webhook"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 8443
to_port = 8443
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_tcp_weave" {
description = "Allow all traffic to Weave port"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 6783
to_port = 6783
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_udp_weave" {
description = "Allow all UDP traffic for Weave"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 6783
to_port = 6784
ip_protocol = "udp"
}
resource "aws_vpc_security_group_ingress_rule" "public_k8s" {
description = "Allow all traffic to k8s API port"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 6443
to_port = 6443
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_rke2" {
description = "Allow all traffic for RKE2 node registration"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 9345
to_port = 9345
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_probes" {
description = "Allow all traffic for liveness/readiness probes, monitoring, kubelet, scheduler, controller-manager, proxy"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 9099
to_port = 10260
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_tcp_nodeports" {
description = "Allow all TCP traffic for Kubernetes NodePorts"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 30000
to_port = 32767
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "public_udp_nodeports" {
description = "Allow all UDP traffic for Kubernetes NodePorts"
security_group_id = aws_security_group.public.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 30000
to_port = 32767
ip_protocol = "udp"
}
resource "aws_vpc_security_group_ingress_rule" "public_vpc_cidr" {
description = "Allow all traffic from VPC CIDR"
security_group_id = aws_security_group.public.id
@ -249,6 +334,87 @@ resource "aws_vpc_security_group_egress_rule" "public_traffic_ipv4" {
ip_protocol = "-1" # semantically equivalent to all ports
}
resource "aws_vpc_security_group_ingress_rule" "private_https" {
description = "Allow HTTPS from all sources"
security_group_id = aws_security_group.private.id
cidr_ipv4 = "0.0.0.0/0"
from_port = 443
to_port = 443
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_rancher_webhook" {
description = "Allow traffic from this machine to Rancher webhook"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 8443
to_port = 8443
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_tcp_weave" {
description = "Allow traffic from this machine to Weave port"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 6783
to_port = 6783
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_udp_weave" {
description = "Allow UDP traffic from this machine for Weave"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 6783
to_port = 6784
ip_protocol = "udp"
}
resource "aws_vpc_security_group_ingress_rule" "private_k8s" {
description = "Allow traffic from this machine to k8s API port"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 6443
to_port = 6443
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_rke2" {
description = "Allow traffic from this machine for RKE2 node registration"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 9345
to_port = 9345
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_probes" {
description = "Allow traffic from this machine for liveness/readiness probes, monitoring, kubelet, scheduler, controller-manager, proxy"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 9099
to_port = 10260
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_tcp_nodeports" {
description = "Allow TCP traffic from this machine for Kubernetes NodePorts"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 30000
to_port = 32767
ip_protocol = "tcp"
}
resource "aws_vpc_security_group_ingress_rule" "private_udp_nodeports" {
description = "Allow UDP traffic from this machine for Kubernetes NodePorts"
security_group_id = aws_security_group.private.id
cidr_ipv4 = local.myip
from_port = 30000
to_port = 32767
ip_protocol = "udp"
}
resource "aws_vpc_security_group_ingress_rule" "private_vpc_cidr" {
description = "Allow all traffic from VPC CIDR"
security_group_id = aws_security_group.private.id
@ -326,4 +492,10 @@ module "bastion" {
ssh_bastion_host : null
ssh_bastion_user : null
}
depends_on = [
aws_nat_gateway.nat,
aws_route_table_association.public,
aws_route_table_association.private
]
}

View File

@ -23,14 +23,14 @@ resource "null_resource" "host_configuration" {
depends_on = [aws_instance.instance]
connection {
host = var.network_config.ssh_bastion_host == null ? aws_instance.instance.public_dns : aws_instance.instance.private_dns
host = var.public ? aws_instance.instance.public_dns : aws_instance.instance.private_dns
private_key = file(var.ssh_private_key_path)
user = var.ssh_user
bastion_host = var.network_config.ssh_bastion_host
bastion_user = var.network_config.ssh_bastion_user
bastion_private_key = file(var.ssh_private_key_path)
timeout = "240s"
timeout = "60s"
}
provisioner "file" {

View File

@ -72,7 +72,7 @@ resource "ssh_sensitive_resource" "first_server_installation" {
content = templatefile("${path.module}/install_k3s.sh", {
get_k3s_path = local.get_k3s_path
distro_version = var.distro_version,
sans = concat([module.server_nodes[0].private_name], var.sans)
sans = local.sans
exec = "server"
token = null
server_url = null
@ -128,7 +128,7 @@ resource "ssh_resource" "additional_server_installation" {
content = templatefile("${path.module}/install_k3s.sh", {
get_k3s_path = local.get_k3s_path
distro_version = var.distro_version,
sans = [module.server_nodes[count.index + 1].private_name]
sans = local.sans
exec = "server"
token = ssh_sensitive_resource.first_server_installation[0].result
server_url = "https://${module.server_nodes[0].private_name}:6443"
@ -176,7 +176,7 @@ resource "ssh_resource" "agent_installation" {
content = templatefile("${path.module}/install_k3s.sh", {
get_k3s_path = local.get_k3s_path
distro_version = var.distro_version,
sans = [module.agent_nodes[count.index].private_name]
sans = concat([module.agent_nodes[count.index].private_name], local.sans)
exec = "agent"
token = ssh_sensitive_resource.first_server_installation[0].result
server_url = "https://${module.server_nodes[0].private_name}:6443"
@ -207,10 +207,11 @@ resource "ssh_resource" "agent_installation" {
]
}
locals {
get_k3s_path = "/tmp/get_k3s.sh"
local_kubernetes_api_url = var.create_tunnels ? "https://${var.sans[0]}:${var.local_kubernetes_api_port}" : "https://${module.server_nodes[0].public_name}:6443"
public_sans = concat(module.server_nodes[*].public_name, module.server_nodes[*].public_ip)
sans = distinct(concat(var.sans, var.public ? local.public_sans : [], module.server_nodes[*].private_ip, module.server_nodes[*].private_name))
}
resource "local_file" "kubeconfig" {

View File

@ -72,7 +72,7 @@ resource "ssh_sensitive_resource" "first_server_installation" {
content = templatefile("${path.module}/install_rke2.sh", {
get_rke2_path = local.get_rke2_path
distro_version = var.distro_version,
sans = concat([module.server_nodes[0].private_name], var.sans)
sans = local.sans
type = "server"
token = null
server_url = null
@ -126,7 +126,7 @@ resource "ssh_resource" "additional_server_installation" {
content = templatefile("${path.module}/install_rke2.sh", {
get_rke2_path = local.get_rke2_path
distro_version = var.distro_version,
sans = [module.server_nodes[count.index + 1].private_name]
sans = local.sans
type = "server"
token = ssh_sensitive_resource.first_server_installation[0].result
server_url = "https://${module.server_nodes[0].private_name}:9345"
@ -172,7 +172,7 @@ resource "ssh_resource" "agent_installation" {
content = templatefile("${path.module}/install_rke2.sh", {
get_rke2_path = local.get_rke2_path
distro_version = var.distro_version,
sans = [module.agent_nodes[count.index].private_name]
sans = concat([module.agent_nodes[count.index].private_name], local.sans)
type = "agent"
token = ssh_sensitive_resource.first_server_installation[0].result
server_url = "https://${module.server_nodes[0].private_name}:9345"
@ -204,6 +204,8 @@ resource "ssh_resource" "agent_installation" {
locals {
get_rke2_path = "/tmp/get_rke2.sh"
local_kubernetes_api_url = var.create_tunnels ? "https://${var.sans[0]}:${var.local_kubernetes_api_port}" : "https://${module.server_nodes[0].public_name}:6443"
public_sans = concat(module.server_nodes[*].public_name, module.server_nodes[*].public_ip)
sans = distinct(concat(var.sans, var.public ? local.public_sans : [], module.server_nodes[*].private_ip, module.server_nodes[*].private_name))
}
resource "local_file" "kubeconfig" {

View File

@ -64,7 +64,6 @@ module "tester_cluster" {
node_module_variables = var.tester_cluster.node_module_variables
}
module "downstream_clusters" {
count = length(local.downstream_clusters)
source = "../../${var.downstream_cluster_distro_module}"