Merge pull request #1935 from justinsb/terraform_variable_output

Output variables from terraform, for reuse in a module
This commit is contained in:
Justin Santa Barbara 2017-02-24 09:06:20 -05:00 committed by GitHub
commit bf2edddb8d
18 changed files with 1170 additions and 21 deletions

View File

@ -45,9 +45,15 @@ import (
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimal(t *testing.T) {
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false)
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false)
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false)
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false, 1)
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false, 1)
runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false, 1)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha1", false, 3)
runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha2", false, 3)
}
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@ -59,34 +65,34 @@ func TestMinimalCloudformation(t *testing.T) {
// TestMinimal_141 runs the test on a configuration from 1.4.1 release
func TestMinimal_141(t *testing.T) {
runTest(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false)
runTest(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false, 1)
}
// TestPrivateWeave runs the test on a configuration with private topology, weave networking
func TestPrivateWeave(t *testing.T) {
runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true)
runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true)
runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true, 1)
runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true, 1)
}
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true)
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true)
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true, 1)
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true, 1)
}
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true)
runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true)
runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true, 1)
runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true, 1)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true)
runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true)
runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true, 1)
runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true, 1)
}
func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool) {
func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
var stdout bytes.Buffer
inputYAML := "in-" + version + ".yaml"
@ -194,10 +200,15 @@ func runTest(t *testing.T, clusterName string, srcDir string, version string, pr
"aws_iam_role_policy_masters." + clusterName + "_policy",
"aws_iam_role_policy_nodes." + clusterName + "_policy",
"aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
"aws_launch_configuration_master-us-test-1a.masters." + clusterName + "_user_data",
"aws_launch_configuration_nodes." + clusterName + "_user_data",
}
for i := 0; i < zones; i++ {
zone := "us-test-1" + string([]byte{byte('a') + byte(i)})
s := "aws_launch_configuration_master-" + zone + ".masters." + clusterName + "_user_data"
expectedFilenames = append(expectedFilenames, s)
}
if private {
expectedFilenames = append(expectedFilenames, []string{
"aws_iam_role_bastions." + clusterName + "_policy",

View File

@ -18,6 +18,7 @@ package model
import (
"fmt"
"net"
"strings"
"github.com/blang/semver"
@ -26,7 +27,7 @@ import (
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/model/components"
"net"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
)
type KopsModelContext struct {
@ -148,15 +149,15 @@ func (m *KopsModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (ma
// The system tags take priority because the cluster likely breaks without them...
if ig.Spec.Role == kops.InstanceGroupRoleMaster {
labels["k8s.io/role/master"] = "1"
labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleMaster))] = "1"
}
if ig.Spec.Role == kops.InstanceGroupRoleNode {
labels["k8s.io/role/node"] = "1"
labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleNode))] = "1"
}
if ig.Spec.Role == kops.InstanceGroupRoleBastion {
labels["k8s.io/role/bastion"] = "1"
labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleBastion))] = "1"
}
return labels, nil

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,122 @@
apiVersion: kops/v1alpha1
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: ha.example.com
spec:
adminAccess:
- 0.0.0.0/0
api:
dns: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/ha.example.com
etcdClusters:
- etcdMembers:
- name: a
zone: us-test-1a
- name: b
zone: us-test-1b
- name: c
zone: us-test-1c
name: main
- etcdMembers:
- name: a
zone: us-test-1a
- name: b
zone: us-test-1b
- name: c
zone: us-test-1c
name: events
kubernetesVersion: v1.4.8
masterPublicName: api.ha.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
topology:
dns:
type: Public
masters: public
nodes: public
zones:
- cidr: 172.20.32.0/19
name: us-test-1a
- cidr: 172.20.64.0/19
name: us-test-1b
- cidr: 172.20.96.0/19
name: us-test-1c
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1a
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
zones:
- us-test-1a
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1b
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
zones:
- us-test-1b
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1c
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
zones:
- us-test-1c
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: nodes
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
zones:
- us-test-1a
- us-test-1b
- us-test-1c

View File

@ -0,0 +1,130 @@
apiVersion: kops/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: ha.example.com
spec:
api:
dns: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/ha.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: a
- instanceGroup: master-us-test-1b
name: b
- instanceGroup: master-us-test-1c
name: c
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: a
- instanceGroup: master-us-test-1b
name: b
- instanceGroup: master-us-test-1c
name: c
name: events
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: v1.4.8
masterPublicName: api.ha.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
- cidr: 172.20.64.0/19
name: us-test-1b
type: Public
zone: us-test-1b
- cidr: 172.20.96.0/19
name: us-test-1c
type: Public
zone: us-test-1c
topology:
dns:
type: Public
masters: public
nodes: public
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1a
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1b
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1b
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: master-us-test-1c
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1c
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: ha.example.com
name: nodes
spec:
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
- us-test-1b
- us-test-1c

View File

@ -0,0 +1,557 @@
output "cluster_name" {
value = "ha.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-ha-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-ha-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-ha-example-com.id}", "${aws_subnet.us-test-1b-ha-example-com.id}", "${aws_subnet.us-test-1c-ha-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.ha-example-com.id}"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-ha-example-com" {
name = "master-us-test-1a.masters.ha.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-ha-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.us-test-1a-ha-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "ha.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "master-us-test-1a.masters.ha.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/master"
value = "1"
propagate_at_launch = true
}
}
resource "aws_autoscaling_group" "master-us-test-1b-masters-ha-example-com" {
name = "master-us-test-1b.masters.ha.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1b-masters-ha-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.us-test-1b-ha-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "ha.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "master-us-test-1b.masters.ha.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/master"
value = "1"
propagate_at_launch = true
}
}
resource "aws_autoscaling_group" "master-us-test-1c-masters-ha-example-com" {
name = "master-us-test-1c.masters.ha.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1c-masters-ha-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.us-test-1c-ha-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "ha.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "master-us-test-1c.masters.ha.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/master"
value = "1"
propagate_at_launch = true
}
}
resource "aws_autoscaling_group" "nodes-ha-example-com" {
name = "nodes.ha.example.com"
launch_configuration = "${aws_launch_configuration.nodes-ha-example-com.id}"
max_size = 2
min_size = 2
vpc_zone_identifier = ["${aws_subnet.us-test-1a-ha-example-com.id}", "${aws_subnet.us-test-1b-ha-example-com.id}", "${aws_subnet.us-test-1c-ha-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "ha.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "nodes.ha.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/node"
value = "1"
propagate_at_launch = true
}
}
resource "aws_ebs_volume" "a-etcd-events-ha-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "a.etcd-events.ha.example.com"
"k8s.io/etcd/events" = "a/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "a-etcd-main-ha-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "a.etcd-main.ha.example.com"
"k8s.io/etcd/main" = "a/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "b-etcd-events-ha-example-com" {
availability_zone = "us-test-1b"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "b.etcd-events.ha.example.com"
"k8s.io/etcd/events" = "b/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "b-etcd-main-ha-example-com" {
availability_zone = "us-test-1b"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "b.etcd-main.ha.example.com"
"k8s.io/etcd/main" = "b/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "c-etcd-events-ha-example-com" {
availability_zone = "us-test-1c"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "c.etcd-events.ha.example.com"
"k8s.io/etcd/events" = "c/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "c-etcd-main-ha-example-com" {
availability_zone = "us-test-1c"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "ha.example.com"
Name = "c.etcd-main.ha.example.com"
"k8s.io/etcd/main" = "c/a,b,c"
"k8s.io/role/master" = "1"
}
}
resource "aws_iam_instance_profile" "masters-ha-example-com" {
name = "masters.ha.example.com"
roles = ["${aws_iam_role.masters-ha-example-com.name}"]
}
resource "aws_iam_instance_profile" "nodes-ha-example-com" {
name = "nodes.ha.example.com"
roles = ["${aws_iam_role.nodes-ha-example-com.name}"]
}
resource "aws_iam_role" "masters-ha-example-com" {
name = "masters.ha.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.ha.example.com_policy")}"
}
resource "aws_iam_role" "nodes-ha-example-com" {
name = "nodes.ha.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.ha.example.com_policy")}"
}
resource "aws_iam_role_policy" "masters-ha-example-com" {
name = "masters.ha.example.com"
role = "${aws_iam_role.masters-ha-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.ha.example.com_policy")}"
}
resource "aws_iam_role_policy" "nodes-ha-example-com" {
name = "nodes.ha.example.com"
role = "${aws_iam_role.nodes-ha-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.ha.example.com_policy")}"
}
resource "aws_internet_gateway" "ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
tags = {
KubernetesCluster = "ha.example.com"
Name = "ha.example.com"
}
}
resource "aws_key_pair" "kubernetes-ha-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.ha.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.ha.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
}
resource "aws_launch_configuration" "master-us-test-1a-masters-ha-example-com" {
name_prefix = "master-us-test-1a.masters.ha.example.com-"
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = "${aws_key_pair.kubernetes-ha-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.masters-ha-example-com.id}"
security_groups = ["${aws_security_group.masters-ha-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.ha.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
ephemeral_block_device = {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "master-us-test-1b-masters-ha-example-com" {
name_prefix = "master-us-test-1b.masters.ha.example.com-"
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = "${aws_key_pair.kubernetes-ha-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.masters-ha-example-com.id}"
security_groups = ["${aws_security_group.masters-ha-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1b.masters.ha.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
ephemeral_block_device = {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "master-us-test-1c-masters-ha-example-com" {
name_prefix = "master-us-test-1c.masters.ha.example.com-"
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = "${aws_key_pair.kubernetes-ha-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.masters-ha-example-com.id}"
security_groups = ["${aws_security_group.masters-ha-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1c.masters.ha.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
ephemeral_block_device = {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "nodes-ha-example-com" {
name_prefix = "nodes.ha.example.com-"
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = "${aws_key_pair.kubernetes-ha-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.nodes-ha-example-com.id}"
security_groups = ["${aws_security_group.nodes-ha-example-com.id}"]
associate_public_ip_address = true
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.ha.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_route" "0-0-0-0--0" {
route_table_id = "${aws_route_table.ha-example-com.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.ha-example-com.id}"
}
resource "aws_route_table" "ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
tags = {
KubernetesCluster = "ha.example.com"
Name = "ha.example.com"
}
}
resource "aws_route_table_association" "us-test-1a-ha-example-com" {
subnet_id = "${aws_subnet.us-test-1a-ha-example-com.id}"
route_table_id = "${aws_route_table.ha-example-com.id}"
}
resource "aws_route_table_association" "us-test-1b-ha-example-com" {
subnet_id = "${aws_subnet.us-test-1b-ha-example-com.id}"
route_table_id = "${aws_route_table.ha-example-com.id}"
}
resource "aws_route_table_association" "us-test-1c-ha-example-com" {
subnet_id = "${aws_subnet.us-test-1c-ha-example-com.id}"
route_table_id = "${aws_route_table.ha-example-com.id}"
}
resource "aws_security_group" "masters-ha-example-com" {
name = "masters.ha.example.com"
vpc_id = "${aws_vpc.ha-example-com.id}"
description = "Security group for masters"
tags = {
KubernetesCluster = "ha.example.com"
Name = "masters.ha.example.com"
}
}
resource "aws_security_group" "nodes-ha-example-com" {
name = "nodes.ha.example.com"
vpc_id = "${aws_vpc.ha-example-com.id}"
description = "Security group for nodes"
tags = {
KubernetesCluster = "ha.example.com"
Name = "nodes.ha.example.com"
}
}
resource "aws_security_group_rule" "all-master-to-master" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
source_security_group_id = "${aws_security_group.masters-ha-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-master-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
source_security_group_id = "${aws_security_group.masters-ha-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-node-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "https-external-to-master-0-0-0-0--0" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "master-egress" {
type = "egress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-egress" {
type = "egress"
security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 4194
to_port = 4194
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-443" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
}
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_subnet" "us-test-1a-ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
cidr_block = "172.20.32.0/19"
availability_zone = "us-test-1a"
tags = {
KubernetesCluster = "ha.example.com"
Name = "us-test-1a.ha.example.com"
}
}
resource "aws_subnet" "us-test-1b-ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
cidr_block = "172.20.64.0/19"
availability_zone = "us-test-1b"
tags = {
KubernetesCluster = "ha.example.com"
Name = "us-test-1b.ha.example.com"
}
}
resource "aws_subnet" "us-test-1c-ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
cidr_block = "172.20.96.0/19"
availability_zone = "us-test-1c"
tags = {
KubernetesCluster = "ha.example.com"
Name = "us-test-1c.ha.example.com"
}
}
resource "aws_vpc" "ha-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
KubernetesCluster = "ha.example.com"
Name = "ha.example.com"
}
}
resource "aws_vpc_dhcp_options" "ha-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
KubernetesCluster = "ha.example.com"
Name = "ha.example.com"
}
}
resource "aws_vpc_dhcp_options_association" "ha-example-com" {
vpc_id = "${aws_vpc.ha-example-com.id}"
dhcp_options_id = "${aws_vpc_dhcp_options.ha-example-com.id}"
}

View File

@ -1,3 +1,27 @@
output "cluster_name" {
value = "minimal-141.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-minimal-141-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-minimal-141-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-minimal-141-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.minimal-141-example-com.id}"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-141-example-com" {
name = "master-us-test-1a.masters.minimal-141.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-minimal-141-example-com.id}"

View File

@ -1,3 +1,27 @@
output "cluster_name" {
value = "minimal.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-minimal-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-minimal-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-minimal-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.minimal-example-com.id}"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" {
name = "master-us-test-1a.masters.minimal.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-minimal-example-com.id}"

View File

@ -1,3 +1,31 @@
output "bastion_security_group_ids" {
value = ["${aws_security_group.bastion-privatecalico-example-com.id}"]
}
output "cluster_name" {
value = "privatecalico.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-privatecalico-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-privatecalico-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-privatecalico-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.privatecalico-example-com.id}"
}
resource "aws_autoscaling_attachment" "bastion-privatecalico-example-com" {
elb = "${aws_elb.bastion-privatecalico-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.bastion-privatecalico-example-com.id}"

View File

@ -1,3 +1,31 @@
output "bastion_security_group_ids" {
value = ["${aws_security_group.bastion-privatecanal-example-com.id}"]
}
output "cluster_name" {
value = "privatecanal.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-privatecanal-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-privatecanal-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-privatecanal-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.privatecanal-example-com.id}"
}
resource "aws_autoscaling_attachment" "bastion-privatecanal-example-com" {
elb = "${aws_elb.bastion-privatecanal-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.bastion-privatecanal-example-com.id}"

View File

@ -1,3 +1,31 @@
output "bastion_security_group_ids" {
value = ["${aws_security_group.bastion-privateflannel-example-com.id}"]
}
output "cluster_name" {
value = "privateflannel.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-privateflannel-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-privateflannel-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-privateflannel-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.privateflannel-example-com.id}"
}
resource "aws_autoscaling_attachment" "bastion-privateflannel-example-com" {
elb = "${aws_elb.bastion-privateflannel-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.bastion-privateflannel-example-com.id}"

View File

@ -1,3 +1,31 @@
output "bastion_security_group_ids" {
value = ["${aws_security_group.bastion-privateweave-example-com.id}"]
}
output "cluster_name" {
value = "privateweave.example.com"
}
output "master_security_group_ids" {
value = ["${aws_security_group.masters-privateweave-example-com.id}"]
}
output "node_security_group_ids" {
value = ["${aws_security_group.nodes-privateweave-example-com.id}"]
}
output "node_subnet_ids" {
value = ["${aws_subnet.us-test-1a-privateweave-example-com.id}"]
}
output "region" {
value = "us-test-1"
}
output "vpc_id" {
value = "${aws_vpc.privateweave-example-com.id}"
}
resource "aws_autoscaling_attachment" "bastion-privateweave-example-com" {
elb = "${aws_elb.bastion-privateweave-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.bastion-privateweave-example-com.id}"

View File

@ -547,7 +547,24 @@ func (c *ApplyClusterCmd) Run() error {
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
target = terraform.NewTerraformTarget(cloud, region, project, outDir)
tf := terraform.NewTerraformTarget(cloud, region, project, outDir)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraform.LiteralFromStringValue(region)); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraform.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraform.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false

View File

@ -32,6 +32,8 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
)
const CloudTagInstanceGroupRolePrefix = "k8s.io/role/"
//go:generate fitask -type=AutoscalingGroup
type AutoscalingGroup struct {
Name *string
@ -290,6 +292,34 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c
})
}
if e.LaunchConfiguration != nil {
// Create TF output variable with security group ids
// This is in the launch configuration, but the ASG has the information about the instance group type
role := ""
for k := range e.Tags {
if strings.HasPrefix(k, CloudTagInstanceGroupRolePrefix) {
suffix := strings.TrimPrefix(k, CloudTagInstanceGroupRolePrefix)
if role != "" && role != suffix {
return fmt.Errorf("Found multiple role tags: %q vs %q", role, suffix)
}
role = suffix
}
}
if role != "" {
for _, sg := range e.LaunchConfiguration.SecurityGroups {
t.AddOutputVariableArray(role+"_security_group_ids", sg.TerraformLink())
}
}
if role == "node" {
for _, s := range e.Subnets {
t.AddOutputVariableArray(role+"_subnet_ids", s.TerraformLink())
}
}
}
return t.RenderResource("aws_autoscaling_group", *e.Name, tf)
}

View File

@ -197,7 +197,7 @@ func (_ *Subnet) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Su
shared := fi.BoolValue(e.Shared)
if shared {
// Not terraform owned / managed
return nil
return t.AddOutputVariableArray("subnet_ids", terraform.LiteralFromStringValue(*e.ID))
}
tf := &terraformSubnet{

View File

@ -201,6 +201,10 @@ type terraformVPC struct {
func (_ *VPC) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *VPC) error {
cloud := t.Cloud.(awsup.AWSCloud)
if err := t.AddOutputVariable("vpc_id", e.TerraformLink()); err != nil {
return err
}
shared := fi.BoolValue(e.Shared)
if shared {
// Not terraform owned / managed

View File

@ -16,7 +16,10 @@ limitations under the License.
package terraform
import "encoding/json"
import (
"encoding/json"
"sort"
)
type Literal struct {
value string
@ -46,3 +49,43 @@ func LiteralProperty(resourceType, resourceName, prop string) *Literal {
func LiteralFromStringValue(s string) *Literal {
return &Literal{value: s}
}
type literalWithJSON struct {
literal *Literal
key string
}
type byKey []*literalWithJSON
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool {
return a[i].key < a[j].key
}
func sortLiterals(v []*Literal, dedup bool) ([]*Literal, error) {
var proxies []*literalWithJSON
for _, l := range v {
k, err := json.Marshal(l)
if err != nil {
return nil, err
}
proxies = append(proxies, &literalWithJSON{
literal: l,
key: string(k),
})
}
sort.Sort(byKey(proxies))
var sorted []*Literal
for i, p := range proxies {
if dedup && i != 0 && proxies[i-1].key == proxies[i].key {
continue
}
sorted = append(sorted, p.literal)
}
return sorted, nil
}

View File

@ -34,12 +34,16 @@ type TerraformTarget struct {
Region string
Project string
ClusterName string
outDir string
// mutex protects the following items (resources & files)
mutex sync.Mutex
// resources is a list of TF items that should be created
resources []*terraformResource
// outputs is a list of our TF output variables
outputs map[string]*terraformOutputVariable
// files is a map of TF resource files that should be created
files map[string][]byte
}
@ -49,8 +53,10 @@ func NewTerraformTarget(cloud fi.Cloud, region, project string, outDir string) *
Cloud: cloud,
Region: region,
Project: project,
outDir: outDir,
files: make(map[string][]byte),
outputs: make(map[string]*terraformOutputVariable),
}
}
@ -62,6 +68,12 @@ type terraformResource struct {
Item interface{}
}
type terraformOutputVariable struct {
Key string
Value *Literal
ValueArray []*Literal
}
// A TF name can't have dots in it (if we want to refer to it from a literal),
// so we replace them
func tfSanitize(name string) string {
@ -108,6 +120,42 @@ func (t *TerraformTarget) RenderResource(resourceType string, resourceName strin
return nil
}
func (t *TerraformTarget) AddOutputVariable(key string, literal *Literal) error {
v := &terraformOutputVariable{
Key: key,
Value: literal,
}
t.mutex.Lock()
defer t.mutex.Unlock()
if t.outputs[key] != nil {
return fmt.Errorf("duplicate variable: %q", key)
}
t.outputs[key] = v
return nil
}
func (t *TerraformTarget) AddOutputVariableArray(key string, literal *Literal) error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.outputs[key] == nil {
v := &terraformOutputVariable{
Key: key,
}
t.outputs[key] = v
}
if t.outputs[key].Value != nil {
return fmt.Errorf("variable %q is both an array and a scalar", key)
}
t.outputs[key].ValueArray = append(t.outputs[key].ValueArray, literal)
return nil
}
func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
resourcesByType := make(map[string]map[string]interface{})
@ -139,11 +187,36 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
providersByName["aws"] = providerAWS
}
outputVariables := make(map[string]interface{})
for _, v := range t.outputs {
tfName := tfSanitize(v.Key)
if outputVariables[tfName] != nil {
return fmt.Errorf("duplicate variable found: %s", tfName)
}
tfVar := make(map[string]interface{})
if v.Value != nil {
tfVar["value"] = v.Value
} else {
dedup := true
sorted, err := sortLiterals(v.ValueArray, dedup)
if err != nil {
return fmt.Errorf("error sorting literals: %v", err)
}
tfVar["value"] = sorted
}
outputVariables[tfName] = tfVar
}
data := make(map[string]interface{})
data["resource"] = resourcesByType
if len(providersByName) != 0 {
data["provider"] = providersByName
}
if len(outputVariables) != 0 {
data["output"] = outputVariables
}
jsonBytes, err := json.MarshalIndent(data, "", " ")
if err != nil {