From 577df0addf4ce1bdbc50ba34899d28c2ff9bb371 Mon Sep 17 00:00:00 2001 From: Peter Rifel Date: Sun, 28 Feb 2021 12:34:54 -0600 Subject: [PATCH 1/2] Add support for defining a transit gateway for a default route I opted to just use a *string for the TGW ID in the route task rather than a full TGW task because kops will never be creating TGWs itself, they will always be defined by ID. --- docs/cluster_spec.md | 12 +++++++++ pkg/apis/kops/cluster.go | 4 ++- pkg/apis/kops/validation/validation.go | 4 +-- pkg/model/network.go | 8 ++++-- upup/pkg/fi/cloudup/awstasks/route.go | 36 +++++++++++++++++++------- 5 files changed, 50 insertions(+), 14 deletions(-) diff --git a/docs/cluster_spec.md b/docs/cluster_spec.md index f4d648db73..a19a1d5a9c 100644 --- a/docs/cluster_spec.md +++ b/docs/cluster_spec.md @@ -304,6 +304,18 @@ spec: zone: us-east-1a ``` +Specifying an existing AWS Transit gateways is also supported as of kOps 1.20.0: + +```yaml +spec: + subnets: + - cidr: 10.20.64.0/21 + name: us-east-1a + egress: tgw-0123456789abcdef0 + type: Private + zone: us-east-1a +``` + In the case that you don't use NAT gateways or internet gateways, kOps 1.12.0 introduced the "External" flag for egress to force kOps to ignore egress for the subnet. This can be useful when other tools are used to manage egress for the subnet such as virtual private gateways. Please note that your cluster may need to have access to the internet upon creation, so egress must be available upon initializing a cluster. This is intended for use when egress is managed external to kOps, typically with an existing cluster. ```yaml diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 9b19d1b094..4d8a8ea570 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -574,8 +574,10 @@ const ( EgressNatGateway = "nat" // EgressElasticIP means that egress configuration is using a NAT Gateway with an existing Elastic IP EgressElasticIP = "eipalloc" - // EgressElasticIP means that egress configuration is using an existing NAT Instance + // EgressNatInstance means that egress configuration is using an existing NAT Instance EgressNatInstance = "i" + // EgressTransitGateway means that egress configuration is using a Transit Gateway + EgressTransitGateway = "tgw" // EgressExternal means that egress configuration is done externally (preconfigured) EgressExternal = "External" ) diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index b2b22c3b5b..c281eeaa80 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -359,9 +359,9 @@ func validateSubnet(subnet *kops.ClusterSubnetSpec, fieldPath *field.Path) field if subnet.Egress != "" { egressType := strings.Split(subnet.Egress, "-")[0] - if egressType != kops.EgressNatGateway && egressType != kops.EgressElasticIP && egressType != kops.EgressNatInstance && egressType != kops.EgressExternal { + if egressType != kops.EgressNatGateway && egressType != kops.EgressElasticIP && egressType != kops.EgressNatInstance && egressType != kops.EgressExternal && egressType != kops.EgressTransitGateway { allErrs = append(allErrs, field.Invalid(fieldPath.Child("egress"), subnet.Egress, - "egress must be of type NAT Gateway, NAT Gateway with existing ElasticIP, NAT EC2 Instance or External")) + "egress must be of type NAT Gateway, NAT Gateway with existing ElasticIP, NAT EC2 Instance, Transit Gateway, or External")) } if subnet.Egress != kops.EgressExternal && subnet.Type != "Private" { allErrs = append(allErrs, field.Forbidden(fieldPath.Child("egress"), "egress can only be specified for private subnets")) diff --git a/pkg/model/network.go b/pkg/model/network.go index a4c2ed0299..0382546ce9 100644 --- a/pkg/model/network.go +++ b/pkg/model/network.go @@ -303,6 +303,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } var ngw *awstasks.NatGateway + var tgwID *string var in *awstasks.Instance if egress != "" { if strings.HasPrefix(egress, "nat-") { @@ -353,7 +354,8 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } c.AddTask(in) - + } else if strings.HasPrefix(egress, "tgw-") { + tgwID = &egress } else if egress == "External" { // Nothing to do here } else { @@ -439,7 +441,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { Lifecycle: b.Lifecycle, CIDR: s("0.0.0.0/0"), RouteTable: rt, - NatGateway: ngw, + // Only one of these will be not nil + NatGateway: ngw, + TransitGatewayID: tgwID, } } c.AddTask(r) diff --git a/upup/pkg/fi/cloudup/awstasks/route.go b/upup/pkg/fi/cloudup/awstasks/route.go index 192b60c1ff..43303f275a 100644 --- a/upup/pkg/fi/cloudup/awstasks/route.go +++ b/upup/pkg/fi/cloudup/awstasks/route.go @@ -37,10 +37,11 @@ type Route struct { Instance *Instance CIDR *string - // Either an InternetGateway or a NAT Gateway + // Exactly one of the below fields // MUST be provided. - InternetGateway *InternetGateway - NatGateway *NatGateway + InternetGateway *InternetGateway + NatGateway *NatGateway + TransitGatewayID *string } func (e *Route) Find(c *fi.Context) (*Route, error) { @@ -88,12 +89,16 @@ func (e *Route) Find(c *fi.Context) (*Route, error) { if r.InstanceId != nil { actual.Instance = &Instance{ID: r.InstanceId} } + if r.TransitGatewayId != nil { + actual.TransitGatewayID = r.TransitGatewayId + } if aws.StringValue(r.State) == "blackhole" { klog.V(2).Infof("found route is a blackhole route") // These should be nil anyway, but just in case... actual.Instance = nil actual.InternetGateway = nil + actual.TransitGatewayID = nil } // Prevent spurious changes @@ -130,11 +135,14 @@ func (s *Route) CheckChanges(a, e, changes *Route) error { if e.NatGateway != nil { targetCount++ } + if e.TransitGatewayID != nil { + targetCount++ + } if targetCount == 0 { - return fmt.Errorf("InternetGateway or Instance or NatGateway is required") + return fmt.Errorf("InternetGateway, Instance, NatGateway, or TransitGateway is required") } if targetCount != 1 { - return fmt.Errorf("Cannot set more than 1 InternetGateway or Instance or NatGateway") + return fmt.Errorf("Cannot set more than 1 InternetGateway, Instance, NatGateway, or TransitGateway") } } @@ -155,12 +163,14 @@ func (_ *Route) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Route) error { request.RouteTableId = checkNotNil(e.RouteTable.ID) request.DestinationCidrBlock = checkNotNil(e.CIDR) - if e.InternetGateway == nil && e.NatGateway == nil { + if e.InternetGateway == nil && e.NatGateway == nil && e.TransitGatewayID == nil { return fmt.Errorf("missing target for route") } else if e.InternetGateway != nil { request.GatewayId = checkNotNil(e.InternetGateway.ID) } else if e.NatGateway != nil { request.NatGatewayId = checkNotNil(e.NatGateway.ID) + } else if e.TransitGatewayID != nil { + request.TransitGatewayId = e.TransitGatewayID } if e.Instance != nil { @@ -188,12 +198,14 @@ func (_ *Route) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Route) error { request.RouteTableId = checkNotNil(e.RouteTable.ID) request.DestinationCidrBlock = checkNotNil(e.CIDR) - if e.InternetGateway == nil && e.NatGateway == nil { + if e.InternetGateway == nil && e.NatGateway == nil && e.TransitGatewayID == nil { return fmt.Errorf("missing target for route") } else if e.InternetGateway != nil { request.GatewayId = checkNotNil(e.InternetGateway.ID) } else if e.NatGateway != nil { request.NatGatewayId = checkNotNil(e.NatGateway.ID) + } else if e.TransitGatewayID != nil { + request.TransitGatewayId = e.TransitGatewayID } if e.Instance != nil { @@ -228,6 +240,7 @@ type terraformRoute struct { CIDR *string `json:"destination_cidr_block,omitempty" cty:"destination_cidr_block"` InternetGatewayID *terraform.Literal `json:"gateway_id,omitempty" cty:"gateway_id"` NATGatewayID *terraform.Literal `json:"nat_gateway_id,omitempty" cty:"nat_gateway_id"` + TransitGatewayID *string `json:"transit_gateway_id,omitempty" cty:"transit_gateway_id"` InstanceID *terraform.Literal `json:"instance_id,omitempty" cty:"instance_id"` } @@ -237,12 +250,14 @@ func (_ *Route) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Rou RouteTableID: e.RouteTable.TerraformLink(), } - if e.InternetGateway == nil && e.NatGateway == nil { + if e.InternetGateway == nil && e.NatGateway == nil && e.TransitGatewayID == nil { return fmt.Errorf("missing target for route") } else if e.InternetGateway != nil { tf.InternetGatewayID = e.InternetGateway.TerraformLink() } else if e.NatGateway != nil { tf.NATGatewayID = e.NatGateway.TerraformLink() + } else if e.TransitGatewayID != nil { + tf.TransitGatewayID = e.TransitGatewayID } if e.Instance != nil { @@ -260,6 +275,7 @@ type cloudformationRoute struct { CIDR *string `json:"DestinationCidrBlock,omitempty"` InternetGatewayID *cloudformation.Literal `json:"GatewayId,omitempty"` NATGatewayID *cloudformation.Literal `json:"NatGatewayId,omitempty"` + TransitGatewayID *string `json:"TransitGatewayId,omitempty"` InstanceID *cloudformation.Literal `json:"InstanceId,omitempty"` } @@ -269,12 +285,14 @@ func (_ *Route) RenderCloudformation(t *cloudformation.CloudformationTarget, a, RouteTableID: e.RouteTable.CloudformationLink(), } - if e.InternetGateway == nil && e.NatGateway == nil { + if e.InternetGateway == nil && e.NatGateway == nil && e.TransitGatewayID == nil { return fmt.Errorf("missing target for route") } else if e.InternetGateway != nil { tf.InternetGatewayID = e.InternetGateway.CloudformationLink() } else if e.NatGateway != nil { tf.NATGatewayID = e.NatGateway.CloudformationLink() + } else if e.TransitGatewayID != nil { + tf.TransitGatewayID = e.TransitGatewayID } if e.Instance != nil { From 2ebd448602c8f218ec941908e1b5a923092c8b8a Mon Sep 17 00:00:00 2001 From: Peter Rifel Date: Sun, 28 Feb 2021 12:36:00 -0600 Subject: [PATCH 2/2] Add integration test for transit gateway support --- cloudmock/aws/mockec2/routetable.go | 1 + .../complex/cloudformation.json | 146 ++++++++++++++++++ .../complex/in-legacy-v1alpha2.yaml | 9 ++ .../update_cluster/complex/in-v1alpha2.yaml | 9 ++ .../update_cluster/complex/kubernetes.tf | 103 ++++++++++-- 5 files changed, 253 insertions(+), 15 deletions(-) diff --git a/cloudmock/aws/mockec2/routetable.go b/cloudmock/aws/mockec2/routetable.go index 4d2b6b7653..e42c9e6826 100644 --- a/cloudmock/aws/mockec2/routetable.go +++ b/cloudmock/aws/mockec2/routetable.go @@ -164,6 +164,7 @@ func (m *MockEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOu InstanceId: request.InstanceId, NatGatewayId: request.NatGatewayId, NetworkInterfaceId: request.NetworkInterfaceId, + TransitGatewayId: request.TransitGatewayId, VpcPeeringConnectionId: request.VpcPeeringConnectionId, } diff --git a/tests/integration/update_cluster/complex/cloudformation.json b/tests/integration/update_cluster/complex/cloudformation.json index 2ee5e35d52..a2c84b7374 100644 --- a/tests/integration/update_cluster/complex/cloudformation.json +++ b/tests/integration/update_cluster/complex/cloudformation.json @@ -596,6 +596,50 @@ ] } }, + "AWSEC2RouteTableprivateustest1acomplexexamplecom": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCcomplexexamplecom" + }, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "complex.example.com" + }, + { + "Key": "Name", + "Value": "private-us-test-1a.complex.example.com" + }, + { + "Key": "Owner", + "Value": "John Doe" + }, + { + "Key": "foo/bar", + "Value": "fib+baz" + }, + { + "Key": "kubernetes.io/cluster/complex.example.com", + "Value": "owned" + }, + { + "Key": "kubernetes.io/kops/role", + "Value": "private-us-test-1a" + } + ] + } + }, + "AWSEC2Routeprivateustest1a00000": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "AWSEC2RouteTableprivateustest1acomplexexamplecom" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "TransitGatewayId": "tgw-123456" + } + }, "AWSEC2SecurityGroupEgressfrommasterscomplexexamplecomegressall0to000000": { "Type": "AWS::EC2::SecurityGroupEgress", "Properties": { @@ -1018,6 +1062,28 @@ ] } }, + "AWSEC2SubnetRouteTableAssociationprivateuseast1aprivatecomplexexamplecom": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "AWSEC2Subnetuseast1aprivatecomplexexamplecom" + }, + "RouteTableId": { + "Ref": "AWSEC2RouteTableprivateustest1acomplexexamplecom" + } + } + }, + "AWSEC2SubnetRouteTableAssociationuseast1autilitycomplexexamplecom": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "AWSEC2Subnetuseast1autilitycomplexexamplecom" + }, + "RouteTableId": { + "Ref": "AWSEC2RouteTablecomplexexamplecom" + } + } + }, "AWSEC2SubnetRouteTableAssociationustest1acomplexexamplecom": { "Type": "AWS::EC2::SubnetRouteTableAssociation", "Properties": { @@ -1029,6 +1095,86 @@ } } }, + "AWSEC2Subnetuseast1aprivatecomplexexamplecom": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCcomplexexamplecom" + }, + "CidrBlock": "172.20.64.0/19", + "AvailabilityZone": "us-test-1a", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "complex.example.com" + }, + { + "Key": "Name", + "Value": "us-east-1a-private.complex.example.com" + }, + { + "Key": "Owner", + "Value": "John Doe" + }, + { + "Key": "SubnetType", + "Value": "Private" + }, + { + "Key": "foo/bar", + "Value": "fib+baz" + }, + { + "Key": "kubernetes.io/cluster/complex.example.com", + "Value": "owned" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "AWSEC2Subnetuseast1autilitycomplexexamplecom": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCcomplexexamplecom" + }, + "CidrBlock": "172.20.96.0/19", + "AvailabilityZone": "us-test-1a", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "complex.example.com" + }, + { + "Key": "Name", + "Value": "us-east-1a-utility.complex.example.com" + }, + { + "Key": "Owner", + "Value": "John Doe" + }, + { + "Key": "SubnetType", + "Value": "Utility" + }, + { + "Key": "foo/bar", + "Value": "fib+baz" + }, + { + "Key": "kubernetes.io/cluster/complex.example.com", + "Value": "owned" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, "AWSEC2Subnetustest1acomplexexamplecom": { "Type": "AWS::EC2::Subnet", "Properties": { diff --git a/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml b/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml index a39d22e007..38367fcc36 100644 --- a/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml +++ b/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml @@ -71,6 +71,15 @@ spec: name: us-test-1a type: Public zone: us-test-1a + - cidr: 172.20.64.0/19 + name: us-east-1a-private + type: Private + zone: us-test-1a + egress: tgw-123456 + - cidr: 172.20.96.0/19 + name: us-east-1a-utility + type: Utility + zone: us-test-1a --- diff --git a/tests/integration/update_cluster/complex/in-v1alpha2.yaml b/tests/integration/update_cluster/complex/in-v1alpha2.yaml index f1e80a900e..f89bea9915 100644 --- a/tests/integration/update_cluster/complex/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/complex/in-v1alpha2.yaml @@ -71,6 +71,15 @@ spec: name: us-test-1a type: Public zone: us-test-1a + - cidr: 172.20.64.0/19 + name: us-east-1a-private + type: Private + zone: us-test-1a + egress: tgw-123456 + - cidr: 172.20.96.0/19 + name: us-east-1a-utility + type: Utility + zone: us-test-1a --- diff --git a/tests/integration/update_cluster/complex/kubernetes.tf b/tests/integration/update_cluster/complex/kubernetes.tf index 53b552d1d8..7f6869a805 100644 --- a/tests/integration/update_cluster/complex/kubernetes.tf +++ b/tests/integration/update_cluster/complex/kubernetes.tf @@ -1,19 +1,22 @@ locals { - cluster_name = "complex.example.com" - master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-complex-example-com.id] - master_security_group_ids = [aws_security_group.masters-complex-example-com.id, "sg-exampleid5", "sg-exampleid6"] - masters_role_arn = aws_iam_role.masters-complex-example-com.arn - masters_role_name = aws_iam_role.masters-complex-example-com.name - node_autoscaling_group_ids = [aws_autoscaling_group.nodes-complex-example-com.id] - node_security_group_ids = [aws_security_group.nodes-complex-example-com.id, "sg-exampleid3", "sg-exampleid4"] - node_subnet_ids = [aws_subnet.us-test-1a-complex-example-com.id] - nodes_role_arn = aws_iam_role.nodes-complex-example-com.arn - nodes_role_name = aws_iam_role.nodes-complex-example-com.name - region = "us-test-1" - route_table_public_id = aws_route_table.complex-example-com.id - subnet_us-test-1a_id = aws_subnet.us-test-1a-complex-example-com.id - vpc_cidr_block = aws_vpc.complex-example-com.cidr_block - vpc_id = aws_vpc.complex-example-com.id + cluster_name = "complex.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-complex-example-com.id] + master_security_group_ids = [aws_security_group.masters-complex-example-com.id, "sg-exampleid5", "sg-exampleid6"] + masters_role_arn = aws_iam_role.masters-complex-example-com.arn + masters_role_name = aws_iam_role.masters-complex-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-complex-example-com.id] + node_security_group_ids = [aws_security_group.nodes-complex-example-com.id, "sg-exampleid3", "sg-exampleid4"] + node_subnet_ids = [aws_subnet.us-test-1a-complex-example-com.id] + nodes_role_arn = aws_iam_role.nodes-complex-example-com.arn + nodes_role_name = aws_iam_role.nodes-complex-example-com.name + region = "us-test-1" + route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-complex-example-com.id + route_table_public_id = aws_route_table.complex-example-com.id + subnet_us-east-1a-private_id = aws_subnet.us-east-1a-private-complex-example-com.id + subnet_us-east-1a-utility_id = aws_subnet.us-east-1a-utility-complex-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-complex-example-com.id + vpc_cidr_block = aws_vpc.complex-example-com.cidr_block + vpc_id = aws_vpc.complex-example-com.id } output "cluster_name" { @@ -60,10 +63,22 @@ output "region" { value = "us-test-1" } +output "route_table_private-us-test-1a_id" { + value = aws_route_table.private-us-test-1a-complex-example-com.id +} + output "route_table_public_id" { value = aws_route_table.complex-example-com.id } +output "subnet_us-east-1a-private_id" { + value = aws_subnet.us-east-1a-private-complex-example-com.id +} + +output "subnet_us-east-1a-utility_id" { + value = aws_subnet.us-east-1a-utility-complex-example-com.id +} + output "subnet_us-test-1a_id" { value = aws_subnet.us-test-1a-complex-example-com.id } @@ -564,6 +579,16 @@ resource "aws_route53_record" "api-complex-example-com" { zone_id = "/hostedzone/Z1AFAKE1ZON3YO" } +resource "aws_route_table_association" "private-us-east-1a-private-complex-example-com" { + route_table_id = aws_route_table.private-us-test-1a-complex-example-com.id + subnet_id = aws_subnet.us-east-1a-private-complex-example-com.id +} + +resource "aws_route_table_association" "us-east-1a-utility-complex-example-com" { + route_table_id = aws_route_table.complex-example-com.id + subnet_id = aws_subnet.us-east-1a-utility-complex-example-com.id +} + resource "aws_route_table_association" "us-test-1a-complex-example-com" { route_table_id = aws_route_table.complex-example-com.id subnet_id = aws_subnet.us-test-1a-complex-example-com.id @@ -581,12 +606,30 @@ resource "aws_route_table" "complex-example-com" { vpc_id = aws_vpc.complex-example-com.id } +resource "aws_route_table" "private-us-test-1a-complex-example-com" { + tags = { + "KubernetesCluster" = "complex.example.com" + "Name" = "private-us-test-1a.complex.example.com" + "Owner" = "John Doe" + "foo/bar" = "fib+baz" + "kubernetes.io/cluster/complex.example.com" = "owned" + "kubernetes.io/kops/role" = "private-us-test-1a" + } + vpc_id = aws_vpc.complex-example-com.id +} + resource "aws_route" "route-0-0-0-0--0" { destination_cidr_block = "0.0.0.0/0" gateway_id = aws_internet_gateway.complex-example-com.id route_table_id = aws_route_table.complex-example-com.id } +resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + route_table_id = aws_route_table.private-us-test-1a-complex-example-com.id + transit_gateway_id = "tgw-123456" +} + resource "aws_security_group_rule" "from-1-1-1-0--24-ingress-tcp-443to443-masters-complex-example-com" { cidr_blocks = ["1.1.1.0/24"] from_port = 443 @@ -860,6 +903,36 @@ resource "aws_security_group" "nodes-complex-example-com" { vpc_id = aws_vpc.complex-example-com.id } +resource "aws_subnet" "us-east-1a-private-complex-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.64.0/19" + tags = { + "KubernetesCluster" = "complex.example.com" + "Name" = "us-east-1a-private.complex.example.com" + "Owner" = "John Doe" + "SubnetType" = "Private" + "foo/bar" = "fib+baz" + "kubernetes.io/cluster/complex.example.com" = "owned" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.complex-example-com.id +} + +resource "aws_subnet" "us-east-1a-utility-complex-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.96.0/19" + tags = { + "KubernetesCluster" = "complex.example.com" + "Name" = "us-east-1a-utility.complex.example.com" + "Owner" = "John Doe" + "SubnetType" = "Utility" + "foo/bar" = "fib+baz" + "kubernetes.io/cluster/complex.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } + vpc_id = aws_vpc.complex-example-com.id +} + resource "aws_subnet" "us-test-1a-complex-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19"