Merge pull request #5979 from a8m/master

terraform: Fix resource formatting for IPv6 CIDRs
This commit is contained in:
k8s-ci-robot 2018-10-20 12:15:52 -07:00 committed by GitHub
commit ad25ff7812
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 581 additions and 4 deletions

View File

@ -57,6 +57,11 @@ func TestMinimal(t *testing.T) {
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, nil)
}
// TestRestrictAccess runs the test on a simple SG configuration, similar to kops create cluster minimal.example.com --ssh-access=$(IPS) --admin-access=$(IPS) --master-count=3
func TestRestrictAccess(t *testing.T) {
runTestAWS(t, "restrictaccess.example.com", "restrict_access", "v1alpha2", false, 1, true, nil)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, nil)

View File

@ -90,7 +90,7 @@ resource "aws_autoscaling_attachment" "extlb-my-other-elb-master-us-test-1a" {
autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-externallb-example-com.id}"
}
resource "aws_autoscaling_attachment" "exttg-aws:my-tg--0123456789abcdef-master-us-test-1a" {
resource "aws_autoscaling_attachment" "exttg-aws_my-tg--0123456789abcdef-master-us-test-1a" {
alb_target_group_arn = "aws:arn:elasticloadbalancing:us-test-1a:123456789012:targetgroup/my-tg/0123456789abcdef"
autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-externallb-example-com.id}"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,79 @@
apiVersion: kops/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: restrictaccess.example.com
spec:
kubernetesApiAccess:
- 1.1.1.1/0
- 2001:0:85a3::/40
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/restrictaccess.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubernetesVersion: v1.8.0
masterInternalName: api.internal.restrictaccess.example.com
masterPublicName: api.restrictaccess.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 1.1.1.1/0
- 2001:0:85a3::/40
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: restrictaccess.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: restrictaccess.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -0,0 +1,494 @@
locals = {
cluster_name = "restrictaccess.example.com"
master_autoscaling_group_ids = ["${aws_autoscaling_group.master-us-test-1a-masters-restrictaccess-example-com.id}"]
master_security_group_ids = ["${aws_security_group.masters-restrictaccess-example-com.id}"]
masters_role_arn = "${aws_iam_role.masters-restrictaccess-example-com.arn}"
masters_role_name = "${aws_iam_role.masters-restrictaccess-example-com.name}"
node_autoscaling_group_ids = ["${aws_autoscaling_group.nodes-restrictaccess-example-com.id}"]
node_security_group_ids = ["${aws_security_group.nodes-restrictaccess-example-com.id}"]
node_subnet_ids = ["${aws_subnet.us-test-1a-restrictaccess-example-com.id}"]
nodes_role_arn = "${aws_iam_role.nodes-restrictaccess-example-com.arn}"
nodes_role_name = "${aws_iam_role.nodes-restrictaccess-example-com.name}"
region = "us-test-1"
route_table_public_id = "${aws_route_table.restrictaccess-example-com.id}"
subnet_us-test-1a_id = "${aws_subnet.us-test-1a-restrictaccess-example-com.id}"
vpc_cidr_block = "${aws_vpc.restrictaccess-example-com.cidr_block}"
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
}
output "cluster_name" {
value = "restrictaccess.example.com"
}
output "master_autoscaling_group_ids" {
value = ["${aws_autoscaling_group.master-us-test-1a-masters-restrictaccess-example-com.id}"]
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-restrictaccess-example-com.id}"]
}
output "masters_role_arn" {
value = "${aws_iam_role.masters-restrictaccess-example-com.arn}"
}
output "masters_role_name" {
value = "${aws_iam_role.masters-restrictaccess-example-com.name}"
}
output "node_autoscaling_group_ids" {
value = ["${aws_autoscaling_group.nodes-restrictaccess-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-restrictaccess-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-restrictaccess-example-com.id}"]
}
output "nodes_role_arn" {
value = "${aws_iam_role.nodes-restrictaccess-example-com.arn}"
}
output "nodes_role_name" {
value = "${aws_iam_role.nodes-restrictaccess-example-com.name}"
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = "${aws_route_table.restrictaccess-example-com.id}"
}
output "subnet_us-test-1a_id" {
value = "${aws_subnet.us-test-1a-restrictaccess-example-com.id}"
}
output "vpc_cidr_block" {
value = "${aws_vpc.restrictaccess-example-com.cidr_block}"
}
output "vpc_id" {
value = "${aws_vpc.restrictaccess-example-com.id}"
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-restrictaccess-example-com" {
name = "master-us-test-1a.masters.restrictaccess.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-restrictaccess-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.us-test-1a-restrictaccess-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "restrictaccess.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "master-us-test-1a.masters.restrictaccess.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/master"
value = "1"
propagate_at_launch = true
}
metrics_granularity = "1Minute"
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
}
resource "aws_autoscaling_group" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
launch_configuration = "${aws_launch_configuration.nodes-restrictaccess-example-com.id}"
max_size = 2
min_size = 2
vpc_zone_identifier = ["${aws_subnet.us-test-1a-restrictaccess-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "restrictaccess.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "nodes.restrictaccess.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/node"
value = "1"
propagate_at_launch = true
}
metrics_granularity = "1Minute"
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-restrictaccess-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "us-test-1a.etcd-events.restrictaccess.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-restrictaccess-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "us-test-1a.etcd-main.restrictaccess.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
role = "${aws_iam_role.masters-restrictaccess-example-com.name}"
}
resource "aws_iam_instance_profile" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
role = "${aws_iam_role.nodes-restrictaccess-example-com.name}"
}
resource "aws_iam_role" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.restrictaccess.example.com_policy")}"
}
resource "aws_iam_role" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.restrictaccess.example.com_policy")}"
}
resource "aws_iam_role_policy" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
role = "${aws_iam_role.masters-restrictaccess-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.restrictaccess.example.com_policy")}"
}
resource "aws_iam_role_policy" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
role = "${aws_iam_role.nodes-restrictaccess-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.restrictaccess.example.com_policy")}"
}
resource "aws_internet_gateway" "restrictaccess-example-com" {
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_key_pair" "kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.restrictaccess.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.restrictaccess.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
}
resource "aws_launch_configuration" "master-us-test-1a-masters-restrictaccess-example-com" {
name_prefix = "master-us-test-1a.masters.restrictaccess.example.com-"
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = "${aws_key_pair.kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.masters-restrictaccess-example-com.id}"
security_groups = ["${aws_security_group.masters-restrictaccess-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.restrictaccess.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 64
delete_on_termination = true
}
ephemeral_block_device = {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
lifecycle = {
create_before_destroy = true
}
enable_monitoring = false
}
resource "aws_launch_configuration" "nodes-restrictaccess-example-com" {
name_prefix = "nodes.restrictaccess.example.com-"
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = "${aws_key_pair.kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.nodes-restrictaccess-example-com.id}"
security_groups = ["${aws_security_group.nodes-restrictaccess-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.restrictaccess.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 128
delete_on_termination = true
}
lifecycle = {
create_before_destroy = true
}
enable_monitoring = false
}
resource "aws_route" "0-0-0-0--0" {
route_table_id = "${aws_route_table.restrictaccess-example-com.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.restrictaccess-example-com.id}"
}
resource "aws_route_table" "restrictaccess-example-com" {
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
}
resource "aws_route_table_association" "us-test-1a-restrictaccess-example-com" {
subnet_id = "${aws_subnet.us-test-1a-restrictaccess-example-com.id}"
route_table_id = "${aws_route_table.restrictaccess-example-com.id}"
}
resource "aws_security_group" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
description = "Security group for masters"
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "masters.restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_security_group" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
description = "Security group for nodes"
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "nodes.restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_security_group_rule" "all-master-to-master" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-master-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-node-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "https-external-to-master-1-1-1-1--0" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["1.1.1.1/0"]
}
resource "aws_security_group_rule" "https-external-to-master-2001_0_85a3__--40" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["2001:0:85a3::/40"]
}
resource "aws_security_group_rule" "master-egress" {
type = "egress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-egress" {
type = "egress"
security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 2382
to_port = 4000
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 4003
to_port = 65535
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 1
to_port = 65535
protocol = "udp"
}
resource "aws_security_group_rule" "ssh-external-to-master-1-1-1-1--0" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["1.1.1.1/0"]
}
resource "aws_security_group_rule" "ssh-external-to-master-2001_0_85a3__--40" {
type = "ingress"
security_group_id = "${aws_security_group.masters-restrictaccess-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["2001:0:85a3::/40"]
}
resource "aws_security_group_rule" "ssh-external-to-node-1-1-1-1--0" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["1.1.1.1/0"]
}
resource "aws_security_group_rule" "ssh-external-to-node-2001_0_85a3__--40" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-restrictaccess-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["2001:0:85a3::/40"]
}
resource "aws_subnet" "us-test-1a-restrictaccess-example-com" {
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
cidr_block = "172.20.32.0/19"
availability_zone = "us-test-1a"
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "us-test-1a.restrictaccess.example.com"
SubnetType = "Public"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
}
resource "aws_vpc" "restrictaccess-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options" "restrictaccess-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
KubernetesCluster = "restrictaccess.example.com"
Name = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "restrictaccess-example-com" {
vpc_id = "${aws_vpc.restrictaccess-example-com.id}"
dhcp_options_id = "${aws_vpc_dhcp_options.restrictaccess-example-com.id}"
}
terraform = {
required_version = ">= 0.9.3"
}

View File

@ -82,9 +82,7 @@ type terraformOutputVariable struct {
// A TF name can't have dots in it (if we want to refer to it from a literal),
// so we replace them
func tfSanitize(name string) string {
name = strings.Replace(name, ".", "-", -1)
name = strings.Replace(name, "/", "--", -1)
return name
return strings.NewReplacer(".", "-", "/", "--", ":", "_").Replace(name)
}
func (t *TerraformTarget) AddFile(resourceType string, resourceName string, key string, r fi.Resource) (*Literal, error) {