mirror of https://github.com/kubernetes/kops.git
Merge pull request #4171 from rifelpet/rifelpet_iam
Add support for external IAM Instance Profiles
This commit is contained in:
commit
1b29b54d5b
|
@ -52,15 +52,15 @@ const updateClusterTestBase = "../../tests/integration/update_cluster/"
|
|||
|
||||
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
|
||||
func TestMinimal(t *testing.T) {
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1)
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1)
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1)
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true)
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true)
|
||||
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true)
|
||||
}
|
||||
|
||||
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
|
||||
func TestHA(t *testing.T) {
|
||||
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3)
|
||||
runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3)
|
||||
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true)
|
||||
runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true)
|
||||
}
|
||||
|
||||
// TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com
|
||||
|
@ -71,86 +71,97 @@ func TestHighAvailabilityGCE(t *testing.T) {
|
|||
|
||||
// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
|
||||
func TestComplex(t *testing.T) {
|
||||
runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1)
|
||||
runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true)
|
||||
}
|
||||
|
||||
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
|
||||
func TestMinimalCloudformation(t *testing.T) {
|
||||
runTestCloudformation(t, "minimal.example.com", "minimal-cloudformation", "v1alpha2", false)
|
||||
runTestCloudformation(t, "minimal.example.com", "minimal-cloudformation", "v1alpha2", false, nil)
|
||||
}
|
||||
|
||||
// TestExistingIAMCloudformation runs the test with existing IAM instance profiles, similar to kops create cluster minimal.example.com --zones us-west-1a
|
||||
func TestExistingIAMCloudformation(t *testing.T) {
|
||||
lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"}
|
||||
runTestCloudformation(t, "minimal.example.com", "existing_iam_cloudformation", "v1alpha2", false, lifecycleOverrides)
|
||||
}
|
||||
|
||||
// TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap.
|
||||
func TestAdditionalUserData(t *testing.T) {
|
||||
runTestCloudformation(t, "additionaluserdata.example.com", "additional_user-data", "v1alpha2", false)
|
||||
runTestCloudformation(t, "additionaluserdata.example.com", "additional_user-data", "v1alpha2", false, nil)
|
||||
}
|
||||
|
||||
// TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group
|
||||
func TestBastionAdditionalUserData(t *testing.T) {
|
||||
runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestMinimal_141 runs the test on a configuration from 1.4.1 release
|
||||
func TestMinimal_141(t *testing.T) {
|
||||
runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1)
|
||||
runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateWeave runs the test on a configuration with private topology, weave networking
|
||||
func TestPrivateWeave(t *testing.T) {
|
||||
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1)
|
||||
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true)
|
||||
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
|
||||
func TestPrivateFlannel(t *testing.T) {
|
||||
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1)
|
||||
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true)
|
||||
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
|
||||
func TestPrivateCalico(t *testing.T) {
|
||||
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1)
|
||||
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true)
|
||||
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
|
||||
func TestPrivateCanal(t *testing.T) {
|
||||
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1)
|
||||
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true)
|
||||
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking
|
||||
func TestPrivateKopeio(t *testing.T) {
|
||||
runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateSharedSubnet runs the test on a configuration with private topology & shared subnets
|
||||
func TestPrivateSharedSubnet(t *testing.T) {
|
||||
runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateDns1 runs the test on a configuration with private topology, private dns
|
||||
func TestPrivateDns1(t *testing.T) {
|
||||
runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc
|
||||
func TestPrivateDns2(t *testing.T) {
|
||||
runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1)
|
||||
runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true)
|
||||
}
|
||||
|
||||
// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
|
||||
func TestSharedSubnet(t *testing.T) {
|
||||
runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1)
|
||||
runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true)
|
||||
}
|
||||
|
||||
// TestSharedVPC runs the test on a configuration with a shared VPC
|
||||
func TestSharedVPC(t *testing.T) {
|
||||
runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1)
|
||||
runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true)
|
||||
}
|
||||
|
||||
// TestExistingIAM runs the test on a configuration with existing IAM instance profiles
|
||||
func TestExistingIAM(t *testing.T) {
|
||||
runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false)
|
||||
}
|
||||
|
||||
// TestAdditionalCIDR runs the test on a configuration with a shared VPC
|
||||
func TestAdditionalCIDR(t *testing.T) {
|
||||
runTestCloudformation(t, "additionalcidr.example.com", "additional_cidr", "v1alpha2", false)
|
||||
runTestCloudformation(t, "additionalcidr.example.com", "additional_cidr", "v1alpha2", false, nil)
|
||||
}
|
||||
|
||||
// TestPhaseNetwork tests the output of tf for the network phase
|
||||
|
@ -171,7 +182,7 @@ func TestPhaseCluster(t *testing.T) {
|
|||
runTestPhase(t, "lifecyclephases.example.com", "lifecycle_phases", "v1alpha2", true, 1, cloudup.PhaseCluster)
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, phase *cloudup.Phase) {
|
||||
func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, phase *cloudup.Phase, lifecycleOverrides []string) {
|
||||
var stdout bytes.Buffer
|
||||
|
||||
srcDir = updateClusterTestBase + srcDir
|
||||
|
@ -223,6 +234,8 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
|
|||
// We don't test it here, and it adds a dependency on kubectl
|
||||
options.CreateKubecfg = false
|
||||
|
||||
options.LifecycleOverrides = lifecycleOverrides
|
||||
|
||||
_, err := RunUpdateCluster(factory, clusterName, &stdout, options)
|
||||
if err != nil {
|
||||
t.Fatalf("error running update cluster %q: %v", clusterName, err)
|
||||
|
@ -334,7 +347,7 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
|
|||
}
|
||||
}
|
||||
|
||||
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
|
||||
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool) {
|
||||
h := testutils.NewIntegrationTestHarness(t)
|
||||
defer h.Close()
|
||||
|
||||
|
@ -342,10 +355,6 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string,
|
|||
h.SetupMockAWS()
|
||||
|
||||
expectedFilenames := []string{
|
||||
"aws_iam_role_masters." + clusterName + "_policy",
|
||||
"aws_iam_role_nodes." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_masters." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_nodes." + clusterName + "_policy",
|
||||
"aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
|
||||
"aws_launch_configuration_nodes." + clusterName + "_user_data",
|
||||
}
|
||||
|
@ -356,20 +365,35 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string,
|
|||
expectedFilenames = append(expectedFilenames, s)
|
||||
}
|
||||
|
||||
if private {
|
||||
expectedFilenames = append(expectedFilenames, []string{
|
||||
"aws_iam_role_bastions." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_bastions." + clusterName + "_policy",
|
||||
lifecycleOverrides := []string{}
|
||||
if !expectPolicies {
|
||||
lifecycleOverrides = append(lifecycleOverrides, "IAMRole=Ignore")
|
||||
lifecycleOverrides = append(lifecycleOverrides, "IAMRolePolicy=Ignore")
|
||||
lifecycleOverrides = append(lifecycleOverrides, "IAMInstanceProfileRole=Ignore")
|
||||
}
|
||||
|
||||
// bastions usually don't have any userdata
|
||||
// "aws_launch_configuration_bastions." + clusterName + "_user_data",
|
||||
if expectPolicies {
|
||||
expectedFilenames = append(expectedFilenames, []string{
|
||||
"aws_iam_role_masters." + clusterName + "_policy",
|
||||
"aws_iam_role_nodes." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_masters." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_nodes." + clusterName + "_policy",
|
||||
}...)
|
||||
if private {
|
||||
expectedFilenames = append(expectedFilenames, []string{
|
||||
"aws_iam_role_bastions." + clusterName + "_policy",
|
||||
"aws_iam_role_policy_bastions." + clusterName + "_policy",
|
||||
|
||||
// bastions usually don't have any userdata
|
||||
// "aws_launch_configuration_bastions." + clusterName + "_user_data",
|
||||
}...)
|
||||
}
|
||||
}
|
||||
// Special case that tests a bastion with user-data
|
||||
if srcDir == "bastionadditional_user-data" {
|
||||
expectedFilenames = append(expectedFilenames, "aws_launch_configuration_bastion."+clusterName+"_user_data")
|
||||
}
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil)
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, lifecycleOverrides)
|
||||
}
|
||||
|
||||
func runTestPhase(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, phase cloudup.Phase) {
|
||||
|
@ -415,7 +439,7 @@ func runTestPhase(t *testing.T, clusterName string, srcDir string, version strin
|
|||
}
|
||||
}
|
||||
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, &phase)
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, &phase, nil)
|
||||
}
|
||||
|
||||
func runTestGCE(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
|
||||
|
@ -442,10 +466,10 @@ func runTestGCE(t *testing.T, clusterName string, srcDir string, version string,
|
|||
expectedFilenames = append(expectedFilenames, prefix+"ssh-keys")
|
||||
}
|
||||
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil)
|
||||
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, nil)
|
||||
}
|
||||
|
||||
func runTestCloudformation(t *testing.T, clusterName string, srcDir string, version string, private bool) {
|
||||
func runTestCloudformation(t *testing.T, clusterName string, srcDir string, version string, private bool, lifecycleOverrides []string) {
|
||||
srcDir = updateClusterTestBase + srcDir
|
||||
var stdout bytes.Buffer
|
||||
|
||||
|
@ -494,6 +518,7 @@ func runTestCloudformation(t *testing.T, clusterName string, srcDir string, vers
|
|||
|
||||
// We don't test it here, and it adds a dependency on kubectl
|
||||
options.CreateKubecfg = false
|
||||
options.LifecycleOverrides = lifecycleOverrides
|
||||
|
||||
_, err := RunUpdateCluster(factory, clusterName, &stdout, options)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# IAM Roles
|
||||
|
||||
Two IAM roles are created for the cluster: one for the masters, and one for the nodes.
|
||||
By default Kops creates two IAM roles for the cluster: one for the masters, and one for the nodes.
|
||||
|
||||
> Please note that currently all Pods running on your cluster have access to the instance IAM role.
|
||||
> Consider using projects such as [kube2iam](https://github.com/jtblin/kube2iam) to prevent that.
|
||||
|
@ -66,6 +66,7 @@ to add DynamoDB and Elasticsearch permissions to your nodes.
|
|||
Edit your cluster via `kops edit cluster ${CLUSTER_NAME}` and add the following to the spec:
|
||||
|
||||
```
|
||||
spec:
|
||||
additionalPolicies:
|
||||
node: |
|
||||
[
|
||||
|
@ -121,6 +122,7 @@ kops update cluster ${CLUSTER_NAME} --yes
|
|||
You can have an additional policy for each kops role (node, master, bastion). For instance, if you wanted to apply one set of additional permissions to the master instances, and another to the nodes, you could do the following:
|
||||
|
||||
```
|
||||
spec:
|
||||
additionalPolicies:
|
||||
node: |
|
||||
[
|
||||
|
@ -139,3 +141,47 @@ You can have an additional policy for each kops role (node, master, bastion). Fo
|
|||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Use existing AWS Instance Profiles
|
||||
|
||||
Rather than having Kops create and manage IAM roles and instance profiles, it is possible to use an existing instance profile. This is useful in organizations where security policies prevent tools from creating their own IAM roles and policies.
|
||||
Kops will still output any differences in the IAM Inline Policy for each IAM Role.
|
||||
This is convenient for determining policy changes that need to be made when upgrading Kops.
|
||||
**Using IAM Managed Policies will not output these differences, it is up to the user to track expected changes to policies.**
|
||||
|
||||
*NOTE: Currently Kops only supports using existing instance profiles for every instance group in the cluster, not a mix of existing and managed instance profiles.
|
||||
This is due to the lifecycle overrides being used to prevent creation of the IAM-related resources.*
|
||||
|
||||
To do this, get a list of instance group names for the cluster:
|
||||
|
||||
```
|
||||
kops get ig --name ${CLUSTER_NAME}
|
||||
```
|
||||
|
||||
And update every instance group's spec with the desired instance profile ARNs:
|
||||
|
||||
```
|
||||
kops edit ig --name ${CLUSTER_NAME} ${INSTANCE_GROUP_NAME}
|
||||
```
|
||||
|
||||
Adding the following `iam` section to the spec:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
iam:
|
||||
profile: arn:aws:iam::1234567890108:instance-profile/kops-custom-node-role
|
||||
```
|
||||
|
||||
Now run a cluster update to create the new launch configuration, using [lifecycle overrides](./cli/kops_update_cluster.md#options) to prevent IAM-related resources from being created:
|
||||
|
||||
```
|
||||
kops update cluster ${CLUSTER_NAME} --yes --lifecycle-overrides IAMRole=ExistsAndWarnIfChanges,IAMRolePolicy=ExistsAndWarnIfChanges,IAMInstanceProfileRole=ExistsAndWarnIfChanges
|
||||
```
|
||||
|
||||
*Everytime `kops update cluster` is ran, it must include the above `--lifecycle-overrides` unless a non-`security` phase is specified.*
|
||||
|
||||
Finally, perform a rolling update in order to replace EC2 instances in the ASG with the new launch configuration:
|
||||
|
||||
```
|
||||
kops rolling-update cluster ${CLUSTER_NAME} --yes
|
||||
```
|
|
@ -118,6 +118,8 @@ type InstanceGroupSpec struct {
|
|||
SuspendProcesses []string `json:"suspendProcesses,omitempty"`
|
||||
// DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only)
|
||||
DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"`
|
||||
// IAMProfileSpec defines the identity of the cloud group iam profile (AWS only).
|
||||
IAM *IAMProfileSpec `json:"iam,omitempty"`
|
||||
}
|
||||
|
||||
// UserData defines a user-data section
|
||||
|
@ -130,6 +132,14 @@ type UserData struct {
|
|||
Content string `json:"content,omitempty"`
|
||||
}
|
||||
|
||||
// IAMProfileSpec is the AWS IAM Profile to attach to instances in this instance
|
||||
// group. Specify the ARN for the IAM instance profile (AWS only).
|
||||
type IAMProfileSpec struct {
|
||||
// Profile is the AWS IAM Profile to attach to instances in this instance group.
|
||||
// Specify the ARN for the IAM instance profile. (AWS only)
|
||||
Profile *string `json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
// PerformAssignmentsInstanceGroups populates InstanceGroups with default values
|
||||
func PerformAssignmentsInstanceGroups(groups []*InstanceGroup) error {
|
||||
names := map[string]bool{}
|
||||
|
|
|
@ -98,6 +98,16 @@ type InstanceGroupSpec struct {
|
|||
SuspendProcesses []string `json:"suspendProcesses,omitempty"`
|
||||
// DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only)
|
||||
DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"`
|
||||
// IAMProfileSpec defines the identity of the cloud group iam profile (AWS only).
|
||||
IAM *IAMProfileSpec `json:"iam,omitempty"`
|
||||
}
|
||||
|
||||
// IAMProfileSpec is the AWS IAM Profile to attach to instances in this instance
|
||||
// group. Specify the ARN for the IAM instance profile (AWS only).
|
||||
type IAMProfileSpec struct {
|
||||
// Profile of the cloud group iam profile. In aws this is the arn
|
||||
// for the iam instance profile
|
||||
Profile *string `json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
// UserData defines a user-data section
|
||||
|
|
|
@ -101,6 +101,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_kops_HeptioAuthenticationSpec_To_v1alpha1_HeptioAuthenticationSpec,
|
||||
Convert_v1alpha1_HookSpec_To_kops_HookSpec,
|
||||
Convert_kops_HookSpec_To_v1alpha1_HookSpec,
|
||||
Convert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec,
|
||||
Convert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec,
|
||||
Convert_v1alpha1_IAMSpec_To_kops_IAMSpec,
|
||||
Convert_kops_IAMSpec_To_v1alpha1_IAMSpec,
|
||||
Convert_v1alpha1_InstanceGroup_To_kops_InstanceGroup,
|
||||
|
@ -1705,6 +1707,26 @@ func Convert_kops_HookSpec_To_v1alpha1_HookSpec(in *kops.HookSpec, out *HookSpec
|
|||
return autoConvert_kops_HookSpec_To_v1alpha1_HookSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec(in *IAMProfileSpec, out *kops.IAMProfileSpec, s conversion.Scope) error {
|
||||
out.Profile = in.Profile
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec(in *IAMProfileSpec, out *kops.IAMProfileSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec(in *kops.IAMProfileSpec, out *IAMProfileSpec, s conversion.Scope) error {
|
||||
out.Profile = in.Profile
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec is an autogenerated conversion function.
|
||||
func Convert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec(in *kops.IAMProfileSpec, out *IAMProfileSpec, s conversion.Scope) error {
|
||||
return autoConvert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
|
@ -1857,6 +1879,15 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
|
|||
out.Zones = in.Zones
|
||||
out.SuspendProcesses = in.SuspendProcesses
|
||||
out.DetailedInstanceMonitoring = in.DetailedInstanceMonitoring
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
*out = new(kops.IAMProfileSpec)
|
||||
if err := Convert_v1alpha1_IAMProfileSpec_To_kops_IAMProfileSpec(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IAM = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1923,6 +1954,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I
|
|||
}
|
||||
out.SuspendProcesses = in.SuspendProcesses
|
||||
out.DetailedInstanceMonitoring = in.DetailedInstanceMonitoring
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
*out = new(IAMProfileSpec)
|
||||
if err := Convert_kops_IAMProfileSpec_To_v1alpha1_IAMProfileSpec(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IAM = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1452,6 +1452,31 @@ func (in *HookSpec) DeepCopy() *HookSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMProfileSpec) DeepCopyInto(out *IAMProfileSpec) {
|
||||
*out = *in
|
||||
if in.Profile != nil {
|
||||
in, out := &in.Profile, &out.Profile
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMProfileSpec.
|
||||
func (in *IAMProfileSpec) DeepCopy() *IAMProfileSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IAMProfileSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMSpec) DeepCopyInto(out *IAMSpec) {
|
||||
*out = *in
|
||||
|
@ -1674,6 +1699,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(IAMProfileSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,8 @@ type InstanceGroupSpec struct {
|
|||
SuspendProcesses []string `json:"suspendProcesses,omitempty"`
|
||||
// DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only)
|
||||
DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"`
|
||||
// IAMProfileSpec defines the identity of the cloud group iam profile (AWS only).
|
||||
IAM *IAMProfileSpec `json:"iam,omitempty"`
|
||||
}
|
||||
|
||||
// UserData defines a user-data section
|
||||
|
@ -118,3 +120,11 @@ type UserData struct {
|
|||
// Content is the user-data content
|
||||
Content string `json:"content,omitempty"`
|
||||
}
|
||||
|
||||
// IAMProfileSpec is the AWS IAM Profile to attach to instances in this instance
|
||||
// group. Specify the ARN for the IAM instance profile (AWS only).
|
||||
type IAMProfileSpec struct {
|
||||
// Profile of the cloud group iam profile. In aws this is the arn
|
||||
// for the iam instance profile
|
||||
Profile *string `json:"profile,omitempty"`
|
||||
}
|
||||
|
|
|
@ -105,6 +105,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_kops_HeptioAuthenticationSpec_To_v1alpha2_HeptioAuthenticationSpec,
|
||||
Convert_v1alpha2_HookSpec_To_kops_HookSpec,
|
||||
Convert_kops_HookSpec_To_v1alpha2_HookSpec,
|
||||
Convert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec,
|
||||
Convert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec,
|
||||
Convert_v1alpha2_IAMSpec_To_kops_IAMSpec,
|
||||
Convert_kops_IAMSpec_To_v1alpha2_IAMSpec,
|
||||
Convert_v1alpha2_InstanceGroup_To_kops_InstanceGroup,
|
||||
|
@ -1816,6 +1818,26 @@ func Convert_kops_HookSpec_To_v1alpha2_HookSpec(in *kops.HookSpec, out *HookSpec
|
|||
return autoConvert_kops_HookSpec_To_v1alpha2_HookSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec(in *IAMProfileSpec, out *kops.IAMProfileSpec, s conversion.Scope) error {
|
||||
out.Profile = in.Profile
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec(in *IAMProfileSpec, out *kops.IAMProfileSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec(in *kops.IAMProfileSpec, out *IAMProfileSpec, s conversion.Scope) error {
|
||||
out.Profile = in.Profile
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec is an autogenerated conversion function.
|
||||
func Convert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec(in *kops.IAMProfileSpec, out *IAMProfileSpec, s conversion.Scope) error {
|
||||
return autoConvert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
|
@ -1969,6 +1991,15 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
|
|||
}
|
||||
out.SuspendProcesses = in.SuspendProcesses
|
||||
out.DetailedInstanceMonitoring = in.DetailedInstanceMonitoring
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
*out = new(kops.IAMProfileSpec)
|
||||
if err := Convert_v1alpha2_IAMProfileSpec_To_kops_IAMProfileSpec(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IAM = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2040,6 +2071,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I
|
|||
}
|
||||
out.SuspendProcesses = in.SuspendProcesses
|
||||
out.DetailedInstanceMonitoring = in.DetailedInstanceMonitoring
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
*out = new(IAMProfileSpec)
|
||||
if err := Convert_kops_IAMProfileSpec_To_v1alpha2_IAMProfileSpec(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IAM = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1419,6 +1419,31 @@ func (in *HookSpec) DeepCopy() *HookSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMProfileSpec) DeepCopyInto(out *IAMProfileSpec) {
|
||||
*out = *in
|
||||
if in.Profile != nil {
|
||||
in, out := &in.Profile, &out.Profile
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMProfileSpec.
|
||||
func (in *IAMProfileSpec) DeepCopy() *IAMProfileSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IAMProfileSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMSpec) DeepCopyInto(out *IAMSpec) {
|
||||
*out = *in
|
||||
|
@ -1646,6 +1671,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(IAMProfileSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ go_test(
|
|||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
|
|
|
@ -18,6 +18,7 @@ package validation
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
|
@ -86,6 +87,10 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := validateInstanceProfile(g.Spec.IAM, field.NewPath("iam")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -165,3 +170,18 @@ func validateExtraUserData(userData *kops.UserData) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// format is arn:aws:iam::123456789012:instance-profile/S3Access
|
||||
var validARN = regexp.MustCompile(`^arn:aws:iam::\d+:instance-profile\/\S+$`)
|
||||
|
||||
// validateInstanceProfile checks the String values for the AuthProfile
|
||||
func validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) *field.Error {
|
||||
if v != nil && v.Profile != nil {
|
||||
arn := *v.Profile
|
||||
if !validARN.MatchString(arn) {
|
||||
return field.Invalid(fldPath.Child("Profile"), arn,
|
||||
"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
)
|
||||
|
||||
func TestDefaultTaintsEnforcedBefore160(t *testing.T) {
|
||||
|
@ -64,3 +66,64 @@ func TestDefaultTaintsEnforcedBefore160(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func s(v string) *string {
|
||||
return fi.String(v)
|
||||
}
|
||||
func TestValidateInstanceProfile(t *testing.T) {
|
||||
grid := []struct {
|
||||
Input *kops.IAMProfileSpec
|
||||
ExpectedErrors []string
|
||||
ExpectedDetail string
|
||||
}{
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("arn:aws:iam::123456789012:instance-profile/S3Access"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("arn:aws:iam::123456789012:instance-profile/has/path/S3Access"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("42"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_A/Developers"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range grid {
|
||||
err := validateInstanceProfile(g.Input, field.NewPath("IAMProfile"))
|
||||
allErrs := field.ErrorList{}
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
testErrors(t, g.Input, allErrs, g.ExpectedErrors)
|
||||
|
||||
if g.ExpectedDetail != "" {
|
||||
found := false
|
||||
for _, err := range allErrs {
|
||||
if err.Detail == g.ExpectedDetail {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
for _, err := range allErrs {
|
||||
t.Logf("found detail: %q", err.Detail)
|
||||
}
|
||||
|
||||
t.Errorf("did not find expected error %q", g.ExpectedDetail)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1582,6 +1582,31 @@ func (in *HookSpec) DeepCopy() *HookSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMProfileSpec) DeepCopyInto(out *IAMProfileSpec) {
|
||||
*out = *in
|
||||
if in.Profile != nil {
|
||||
in, out := &in.Profile, &out.Profile
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMProfileSpec.
|
||||
func (in *IAMProfileSpec) DeepCopy() *IAMProfileSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IAMProfileSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IAMSpec) DeepCopyInto(out *IAMSpec) {
|
||||
*out = *in
|
||||
|
@ -1809,6 +1834,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.IAM != nil {
|
||||
in, out := &in.IAM, &out.IAM
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(IAMProfileSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -71,6 +71,11 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
volumeIops = DefaultVolumeIops
|
||||
}
|
||||
|
||||
link, err := b.LinkToIAMInstanceProfile(ig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find iam profile link for instance group %q: %v", ig.ObjectMeta.Name, err)
|
||||
}
|
||||
|
||||
t := &awstasks.LaunchConfiguration{
|
||||
Name: s(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
@ -78,7 +83,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
SecurityGroups: []*awstasks.SecurityGroup{
|
||||
b.LinkToSecurityGroup(ig.Spec.Role),
|
||||
},
|
||||
IAMInstanceProfile: b.LinkToIAMInstanceProfile(ig),
|
||||
IAMInstanceProfile: link,
|
||||
ImageID: s(ig.Spec.Image),
|
||||
InstanceType: s(ig.Spec.MachineType),
|
||||
InstanceMonitoring: ig.Spec.DetailedInstanceMonitoring,
|
||||
|
|
|
@ -51,24 +51,50 @@ const RolePolicyTemplate = `{
|
|||
}`
|
||||
|
||||
func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
// Collect the roles in use
|
||||
var roles []kops.InstanceGroupRole
|
||||
// Collect managed Instance Group roles
|
||||
managedRoles := make(map[kops.InstanceGroupRole]bool)
|
||||
|
||||
// Collect Instance Profile ARNs and their associated Instance Group roles
|
||||
sharedProfileARNsToIGRole := make(map[string]kops.InstanceGroupRole)
|
||||
for _, ig := range b.InstanceGroups {
|
||||
found := false
|
||||
for _, r := range roles {
|
||||
if r == ig.Spec.Role {
|
||||
found = true
|
||||
if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil {
|
||||
specProfile := fi.StringValue(ig.Spec.IAM.Profile)
|
||||
if matchingRole, ok := sharedProfileARNsToIGRole[specProfile]; ok {
|
||||
if matchingRole != ig.Spec.Role {
|
||||
return fmt.Errorf("Found IAM instance profile assigned to multiple Instance Group roles %v and %v: %v",
|
||||
ig.Spec.Role, sharedProfileARNsToIGRole[specProfile], specProfile)
|
||||
}
|
||||
} else {
|
||||
sharedProfileARNsToIGRole[specProfile] = ig.Spec.Role
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
roles = append(roles, ig.Spec.Role)
|
||||
} else {
|
||||
managedRoles[ig.Spec.Role] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Generate IAM objects etc for each role
|
||||
for _, role := range roles {
|
||||
name := b.IAMName(role)
|
||||
// Generate IAM tasks for each shared role
|
||||
for profileARN, igRole := range sharedProfileARNsToIGRole {
|
||||
iamName, err := findCustomAuthNameFromArn(profileARN)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse instance profile name from arn %q: %v", profileARN, err)
|
||||
}
|
||||
err = b.buildIAMTasks(igRole, iamName, c, true)
|
||||
}
|
||||
|
||||
// Generate IAM tasks for each managed role
|
||||
for igRole := range managedRoles {
|
||||
iamName := b.IAMName(igRole)
|
||||
err := b.buildIAMTasks(igRole, iamName, c, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *IAMModelBuilder) buildIAMTasks(igRole kops.InstanceGroupRole, iamName string, c *fi.ModelBuilderContext, shared bool) error {
|
||||
{ // To minimize diff for easier code review
|
||||
var iamRole *awstasks.IAMRole
|
||||
{
|
||||
rolePolicy, err := b.buildAWSIAMRolePolicy()
|
||||
|
@ -77,11 +103,11 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
iamRole = &awstasks.IAMRole{
|
||||
Name: s(name),
|
||||
Name: s(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
RolePolicyDocument: fi.WrapResource(rolePolicy),
|
||||
ExportWithID: s(strings.ToLower(string(role)) + "s"),
|
||||
ExportWithID: s(strings.ToLower(string(igRole)) + "s"),
|
||||
}
|
||||
c.AddTask(iamRole)
|
||||
|
||||
|
@ -91,7 +117,7 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
iamPolicy := &iam.PolicyResource{
|
||||
Builder: &iam.PolicyBuilder{
|
||||
Cluster: b.Cluster,
|
||||
Role: role,
|
||||
Role: igRole,
|
||||
Region: b.Region,
|
||||
},
|
||||
}
|
||||
|
@ -108,7 +134,7 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
t := &awstasks.IAMRolePolicy{
|
||||
Name: s(name),
|
||||
Name: s(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
Role: iamRole,
|
||||
|
@ -120,15 +146,16 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
var iamInstanceProfile *awstasks.IAMInstanceProfile
|
||||
{
|
||||
iamInstanceProfile = &awstasks.IAMInstanceProfile{
|
||||
Name: s(name),
|
||||
Name: s(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Shared: fi.Bool(shared),
|
||||
}
|
||||
c.AddTask(iamInstanceProfile)
|
||||
}
|
||||
|
||||
{
|
||||
iamInstanceProfileRole := &awstasks.IAMInstanceProfileRole{
|
||||
Name: s(name),
|
||||
Name: s(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
InstanceProfile: iamInstanceProfile,
|
||||
|
@ -141,12 +168,13 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
{
|
||||
additionalPolicy := ""
|
||||
if b.Cluster.Spec.AdditionalPolicies != nil {
|
||||
roleAsString := reflect.ValueOf(role).String()
|
||||
roleAsString := reflect.ValueOf(igRole).String()
|
||||
additionalPolicies := *(b.Cluster.Spec.AdditionalPolicies)
|
||||
|
||||
additionalPolicy = additionalPolicies[strings.ToLower(roleAsString)]
|
||||
}
|
||||
|
||||
additionalPolicyName := "additional." + name
|
||||
additionalPolicyName := "additional." + iamName
|
||||
|
||||
t := &awstasks.IAMRolePolicy{
|
||||
Name: s(additionalPolicyName),
|
||||
|
@ -177,7 +205,6 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(t)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -18,11 +18,13 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/pki"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||
)
|
||||
|
||||
|
@ -99,6 +101,7 @@ func (b *KopsModelContext) NameForDNSZone() string {
|
|||
return name
|
||||
}
|
||||
|
||||
// IAMName determines the name of the IAM Role and Instance Profile to use for the InstanceGroup
|
||||
func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) string {
|
||||
switch role {
|
||||
case kops.InstanceGroupRoleMaster:
|
||||
|
@ -114,9 +117,28 @@ func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) string {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *KopsModelContext) LinkToIAMInstanceProfile(ig *kops.InstanceGroup) *awstasks.IAMInstanceProfile {
|
||||
var roleNamRegExp = regexp.MustCompile(`([^/]+$)`)
|
||||
|
||||
// findCustomAuthNameFromArn parses the name of a instance profile from the arn
|
||||
func findCustomAuthNameFromArn(arn string) (string, error) {
|
||||
if arn == "" {
|
||||
return "", fmt.Errorf("unable to parse role arn as it is not set")
|
||||
}
|
||||
rs := roleNamRegExp.FindStringSubmatch(arn)
|
||||
if len(rs) >= 2 {
|
||||
return rs[1], nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unable to parse role arn %q", arn)
|
||||
}
|
||||
|
||||
func (b *KopsModelContext) LinkToIAMInstanceProfile(ig *kops.InstanceGroup) (*awstasks.IAMInstanceProfile, error) {
|
||||
if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil {
|
||||
name, err := findCustomAuthNameFromArn(fi.StringValue(ig.Spec.IAM.Profile))
|
||||
return &awstasks.IAMInstanceProfile{Name: &name}, err
|
||||
}
|
||||
name := b.IAMName(ig.Spec.Role)
|
||||
return &awstasks.IAMInstanceProfile{Name: &name}
|
||||
return &awstasks.IAMInstanceProfile{Name: &name}, nil
|
||||
}
|
||||
|
||||
// SSHKeyName computes a unique SSH key name, combining the cluster name and the SSH public key fingerprint.
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==
|
|
@ -0,0 +1,132 @@
|
|||
apiVersion: kops/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: 2017-01-01T00:00:00Z
|
||||
name: existing-iam.example.com
|
||||
spec:
|
||||
api:
|
||||
dns: {}
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://tests/existing-iam.example.com
|
||||
etcdClusters:
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: a
|
||||
name: main
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: a
|
||||
name: events
|
||||
kubernetesApiAccess:
|
||||
- 0.0.0.0/0
|
||||
kubernetesVersion: v1.6.4
|
||||
masterPublicName: api.existing-iam.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
roleCustomIamRoles:
|
||||
Master: foo
|
||||
Node: bar
|
||||
sshAccess:
|
||||
- 0.0.0.0/0
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
type: Public
|
||||
zone: us-test-1a
|
||||
- cidr: 172.20.64.0/19
|
||||
name: us-test-1b
|
||||
type: Public
|
||||
zone: us-test-1b
|
||||
- cidr: 172.20.96.0/19
|
||||
name: us-test-1c
|
||||
type: Public
|
||||
zone: us-test-1c
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: 2017-01-01T00:00:00Z
|
||||
labels:
|
||||
kops.k8s.io/cluster: existing-iam.example.com
|
||||
name: master-us-test-1a
|
||||
spec:
|
||||
iam:
|
||||
profile: arn:aws:iam::4222917490108:instance-profile/kops-custom-master-role
|
||||
image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: 2017-01-01T00:00:00Z
|
||||
labels:
|
||||
kops.k8s.io/cluster: existing-iam.example.com
|
||||
name: master-us-test-1b
|
||||
spec:
|
||||
iam:
|
||||
profile: arn:aws:iam::4222917490108:instance-profile/kops-custom-master-role
|
||||
image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1b
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: 2017-01-01T00:00:00Z
|
||||
labels:
|
||||
kops.k8s.io/cluster: existing-iam.example.com
|
||||
name: master-us-test-1c
|
||||
spec:
|
||||
iam:
|
||||
profile: arn:aws:iam::4222917490108:instance-profile/kops-custom-master-role
|
||||
image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1c
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: 2017-01-01T00:00:00Z
|
||||
labels:
|
||||
kops.k8s.io/cluster: existing-iam.example.com
|
||||
name: nodes
|
||||
spec:
|
||||
iam:
|
||||
profile: arn:aws:iam::422917490108:instance-profile/kops-custom-node-role
|
||||
image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
role: Node
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
|
@ -0,0 +1,533 @@
|
|||
output "cluster_name" {
|
||||
value = "existing-iam.example.com"
|
||||
}
|
||||
|
||||
output "master_security_group_ids" {
|
||||
value = ["${aws_security_group.masters-existing-iam-example-com.id}"]
|
||||
}
|
||||
|
||||
output "node_security_group_ids" {
|
||||
value = ["${aws_security_group.nodes-existing-iam-example-com.id}"]
|
||||
}
|
||||
|
||||
output "node_subnet_ids" {
|
||||
value = ["${aws_subnet.us-test-1a-existing-iam-example-com.id}"]
|
||||
}
|
||||
|
||||
output "region" {
|
||||
value = "us-test-1"
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = "${aws_vpc.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-test-1"
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "master-us-test-1a-masters-existing-iam-example-com" {
|
||||
name = "master-us-test-1a.masters.existing-iam.example.com"
|
||||
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-existing-iam-example-com.id}"
|
||||
max_size = 1
|
||||
min_size = 1
|
||||
vpc_zone_identifier = ["${aws_subnet.us-test-1a-existing-iam-example-com.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "master-us-test-1a.masters.existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/master"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
metrics_granularity = "1Minute"
|
||||
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "master-us-test-1b-masters-existing-iam-example-com" {
|
||||
name = "master-us-test-1b.masters.existing-iam.example.com"
|
||||
launch_configuration = "${aws_launch_configuration.master-us-test-1b-masters-existing-iam-example-com.id}"
|
||||
max_size = 1
|
||||
min_size = 1
|
||||
vpc_zone_identifier = ["${aws_subnet.us-test-1b-existing-iam-example-com.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "master-us-test-1b.masters.existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/master"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
metrics_granularity = "1Minute"
|
||||
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "master-us-test-1c-masters-existing-iam-example-com" {
|
||||
name = "master-us-test-1c.masters.existing-iam.example.com"
|
||||
launch_configuration = "${aws_launch_configuration.master-us-test-1c-masters-existing-iam-example-com.id}"
|
||||
max_size = 1
|
||||
min_size = 1
|
||||
vpc_zone_identifier = ["${aws_subnet.us-test-1c-existing-iam-example-com.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "master-us-test-1c.masters.existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/master"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
metrics_granularity = "1Minute"
|
||||
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "nodes-existing-iam-example-com" {
|
||||
name = "nodes.existing-iam.example.com"
|
||||
launch_configuration = "${aws_launch_configuration.nodes-existing-iam-example-com.id}"
|
||||
max_size = 2
|
||||
min_size = 2
|
||||
vpc_zone_identifier = ["${aws_subnet.us-test-1a-existing-iam-example-com.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "nodes.existing-iam.example.com"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/node"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
metrics_granularity = "1Minute"
|
||||
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
|
||||
}
|
||||
|
||||
resource "aws_ebs_volume" "a-etcd-events-existing-iam-example-com" {
|
||||
availability_zone = "us-test-1a"
|
||||
size = 20
|
||||
type = "gp2"
|
||||
encrypted = false
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "a.etcd-events.existing-iam.example.com"
|
||||
"k8s.io/etcd/events" = "a/a"
|
||||
"k8s.io/role/master" = "1"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_ebs_volume" "a-etcd-main-existing-iam-example-com" {
|
||||
availability_zone = "us-test-1a"
|
||||
size = 20
|
||||
type = "gp2"
|
||||
encrypted = false
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "a.etcd-main.existing-iam.example.com"
|
||||
"k8s.io/etcd/main" = "a/a"
|
||||
"k8s.io/role/master" = "1"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_key_pair" "kubernetes-existing-iam-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
|
||||
key_name = "kubernetes.existing-iam.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
|
||||
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.existing-iam.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "master-us-test-1a-masters-existing-iam-example-com" {
|
||||
name_prefix = "master-us-test-1a.masters.existing-iam.example.com-"
|
||||
image_id = "ami-15000000"
|
||||
instance_type = "m3.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-existing-iam-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||
iam_instance_profile = "kops-custom-master-role"
|
||||
security_groups = ["${aws_security_group.masters-existing-iam-example-com.id}"]
|
||||
associate_public_ip_address = true
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.existing-iam.example.com_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 64
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
ephemeral_block_device = {
|
||||
device_name = "/dev/sdc"
|
||||
virtual_name = "ephemeral0"
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
enable_monitoring = false
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "master-us-test-1b-masters-existing-iam-example-com" {
|
||||
name_prefix = "master-us-test-1b.masters.existing-iam.example.com-"
|
||||
image_id = "ami-15000000"
|
||||
instance_type = "m3.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-existing-iam-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||
iam_instance_profile = "kops-custom-master-role"
|
||||
security_groups = ["${aws_security_group.masters-existing-iam-example-com.id}"]
|
||||
associate_public_ip_address = true
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1b.masters.existing-iam.example.com_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 64
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
ephemeral_block_device = {
|
||||
device_name = "/dev/sdc"
|
||||
virtual_name = "ephemeral0"
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
enable_monitoring = false
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "master-us-test-1c-masters-existing-iam-example-com" {
|
||||
name_prefix = "master-us-test-1c.masters.existing-iam.example.com-"
|
||||
image_id = "ami-15000000"
|
||||
instance_type = "m3.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-existing-iam-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||
iam_instance_profile = "kops-custom-master-role"
|
||||
security_groups = ["${aws_security_group.masters-existing-iam-example-com.id}"]
|
||||
associate_public_ip_address = true
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1c.masters.existing-iam.example.com_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 64
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
ephemeral_block_device = {
|
||||
device_name = "/dev/sdc"
|
||||
virtual_name = "ephemeral0"
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
enable_monitoring = false
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "nodes-existing-iam-example-com" {
|
||||
name_prefix = "nodes.existing-iam.example.com-"
|
||||
image_id = "ami-15000000"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-existing-iam-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||
iam_instance_profile = "kops-custom-node-role"
|
||||
security_groups = ["${aws_security_group.nodes-existing-iam-example-com.id}"]
|
||||
associate_public_ip_address = true
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.existing-iam.example.com_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 128
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
enable_monitoring = false
|
||||
}
|
||||
|
||||
resource "aws_route" "0-0-0-0--0" {
|
||||
route_table_id = "${aws_route_table.existing-iam-example-com.id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
"kubernetes.io/kops/role" = "public"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "us-test-1a-existing-iam-example-com" {
|
||||
subnet_id = "${aws_subnet.us-test-1a-existing-iam-example-com.id}"
|
||||
route_table_id = "${aws_route_table.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "us-test-1b-existing-iam-example-com" {
|
||||
subnet_id = "${aws_subnet.us-test-1b-existing-iam-example-com.id}"
|
||||
route_table_id = "${aws_route_table.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "us-test-1c-existing-iam-example-com" {
|
||||
subnet_id = "${aws_subnet.us-test-1c-existing-iam-example-com.id}"
|
||||
route_table_id = "${aws_route_table.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "masters-existing-iam-example-com" {
|
||||
name = "masters.existing-iam.example.com"
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
description = "Security group for masters"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "masters.existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "nodes-existing-iam-example-com" {
|
||||
name = "nodes.existing-iam.example.com"
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
description = "Security group for nodes"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "nodes.existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-master-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-master-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "https-external-to-master-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "master-egress" {
|
||||
type = "egress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-egress" {
|
||||
type = "egress"
|
||||
security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 4003
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 65535
|
||||
protocol = "udp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-existing-iam-example-com.id}"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-existing-iam-example-com.id}"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-test-1a-existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
cidr_block = "172.20.32.0/19"
|
||||
availability_zone = "us-test-1a"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "us-test-1a.existing-iam.example.com"
|
||||
SubnetType = "Public"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-test-1b-existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
cidr_block = "172.20.64.0/19"
|
||||
availability_zone = "us-test-1b"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "us-test-1b.existing-iam.example.com"
|
||||
SubnetType = "Public"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-test-1c-existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
cidr_block = "172.20.96.0/19"
|
||||
availability_zone = "us-test-1c"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "us-test-1c.existing-iam.example.com"
|
||||
SubnetType = "Public"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc" "existing-iam-example-com" {
|
||||
cidr_block = "172.20.0.0/16"
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc_dhcp_options" "existing-iam-example-com" {
|
||||
domain_name = "us-test-1.compute.internal"
|
||||
domain_name_servers = ["AmazonProvidedDNS"]
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "existing-iam.example.com"
|
||||
Name = "existing-iam.example.com"
|
||||
"kubernetes.io/cluster/existing-iam.example.com" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc_dhcp_options_association" "existing-iam-example-com" {
|
||||
vpc_id = "${aws_vpc.existing-iam-example-com.id}"
|
||||
dhcp_options_id = "${aws_vpc_dhcp_options.existing-iam-example-com.id}"
|
||||
}
|
||||
|
||||
terraform = {
|
||||
required_version = ">= 0.9.3"
|
||||
}
|
|
@ -0,0 +1,591 @@
|
|||
{
|
||||
"Resources": {
|
||||
"AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": {
|
||||
"Type": "AWS::AutoScaling::AutoScalingGroup",
|
||||
"Properties": {
|
||||
"AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com",
|
||||
"LaunchConfigurationName": {
|
||||
"Ref": "AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexamplecom"
|
||||
},
|
||||
"MaxSize": 1,
|
||||
"MinSize": 1,
|
||||
"VPCZoneIdentifier": [
|
||||
{
|
||||
"Ref": "AWSEC2Subnetustest1aminimalexamplecom"
|
||||
}
|
||||
],
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com",
|
||||
"PropagateAtLaunch": true
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "master-us-test-1a.masters.minimal.example.com",
|
||||
"PropagateAtLaunch": true
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/role/master",
|
||||
"Value": "1",
|
||||
"PropagateAtLaunch": true
|
||||
}
|
||||
],
|
||||
"MetricsCollection": [
|
||||
{
|
||||
"Granularity": "1Minute",
|
||||
"Metrics": [
|
||||
"GroupDesiredCapacity",
|
||||
"GroupInServiceInstances",
|
||||
"GroupMaxSize",
|
||||
"GroupMinSize",
|
||||
"GroupPendingInstances",
|
||||
"GroupStandbyInstances",
|
||||
"GroupTerminatingInstances",
|
||||
"GroupTotalInstances"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": {
|
||||
"Type": "AWS::AutoScaling::AutoScalingGroup",
|
||||
"Properties": {
|
||||
"AutoScalingGroupName": "nodes.minimal.example.com",
|
||||
"LaunchConfigurationName": {
|
||||
"Ref": "AWSAutoScalingLaunchConfigurationnodesminimalexamplecom"
|
||||
},
|
||||
"MaxSize": 2,
|
||||
"MinSize": 2,
|
||||
"VPCZoneIdentifier": [
|
||||
{
|
||||
"Ref": "AWSEC2Subnetustest1aminimalexamplecom"
|
||||
}
|
||||
],
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com",
|
||||
"PropagateAtLaunch": true
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "nodes.minimal.example.com",
|
||||
"PropagateAtLaunch": true
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/role/node",
|
||||
"Value": "1",
|
||||
"PropagateAtLaunch": true
|
||||
}
|
||||
],
|
||||
"MetricsCollection": [
|
||||
{
|
||||
"Granularity": "1Minute",
|
||||
"Metrics": [
|
||||
"GroupDesiredCapacity",
|
||||
"GroupInServiceInstances",
|
||||
"GroupMaxSize",
|
||||
"GroupMinSize",
|
||||
"GroupPendingInstances",
|
||||
"GroupStandbyInstances",
|
||||
"GroupTerminatingInstances",
|
||||
"GroupTotalInstances"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexamplecom": {
|
||||
"Type": "AWS::AutoScaling::LaunchConfiguration",
|
||||
"Properties": {
|
||||
"AssociatePublicIpAddress": true,
|
||||
"BlockDeviceMappings": [
|
||||
{
|
||||
"DeviceName": "/dev/xvda",
|
||||
"Ebs": {
|
||||
"VolumeType": "gp2",
|
||||
"VolumeSize": 64,
|
||||
"DeleteOnTermination": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"DeviceName": "/dev/sdc",
|
||||
"VirtualName": "ephemeral0"
|
||||
}
|
||||
],
|
||||
"IamInstanceProfile": "kops-custom-master-role",
|
||||
"ImageId": "ami-12345678",
|
||||
"InstanceType": "m3.medium",
|
||||
"KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57",
|
||||
"SecurityGroups": [
|
||||
{
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
}
|
||||
],
|
||||
"UserData": "extracted",
|
||||
"InstanceMonitoring": false
|
||||
}
|
||||
},
|
||||
"AWSAutoScalingLaunchConfigurationnodesminimalexamplecom": {
|
||||
"Type": "AWS::AutoScaling::LaunchConfiguration",
|
||||
"Properties": {
|
||||
"AssociatePublicIpAddress": true,
|
||||
"BlockDeviceMappings": [
|
||||
{
|
||||
"DeviceName": "/dev/xvda",
|
||||
"Ebs": {
|
||||
"VolumeType": "gp2",
|
||||
"VolumeSize": 128,
|
||||
"DeleteOnTermination": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"IamInstanceProfile": "kops-custom-node-role",
|
||||
"ImageId": "ami-12345678",
|
||||
"InstanceType": "t2.medium",
|
||||
"KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57",
|
||||
"SecurityGroups": [
|
||||
{
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
}
|
||||
],
|
||||
"UserData": "extracted",
|
||||
"InstanceMonitoring": false
|
||||
}
|
||||
},
|
||||
"AWSEC2DHCPOptionsminimalexamplecom": {
|
||||
"Type": "AWS::EC2::DHCPOptions",
|
||||
"Properties": {
|
||||
"DomainName": "us-test-1.compute.internal",
|
||||
"DomainNameServers": [
|
||||
"AmazonProvidedDNS"
|
||||
],
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2InternetGatewayminimalexamplecom": {
|
||||
"Type": "AWS::EC2::InternetGateway",
|
||||
"Properties": {
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2Route00000": {
|
||||
"Type": "AWS::EC2::Route",
|
||||
"Properties": {
|
||||
"RouteTableId": {
|
||||
"Ref": "AWSEC2RouteTableminimalexamplecom"
|
||||
},
|
||||
"DestinationCidrBlock": "0.0.0.0/0",
|
||||
"GatewayId": {
|
||||
"Ref": "AWSEC2InternetGatewayminimalexamplecom"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AWSEC2RouteTableminimalexamplecom": {
|
||||
"Type": "AWS::EC2::RouteTable",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/kops/role",
|
||||
"Value": "public"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupEgressmasteregress": {
|
||||
"Type": "AWS::EC2::SecurityGroupEgress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"FromPort": 0,
|
||||
"ToPort": 0,
|
||||
"IpProtocol": "-1",
|
||||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupEgressnodeegress": {
|
||||
"Type": "AWS::EC2::SecurityGroupEgress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 0,
|
||||
"ToPort": 0,
|
||||
"IpProtocol": "-1",
|
||||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressallmastertomaster": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"FromPort": 0,
|
||||
"ToPort": 0,
|
||||
"IpProtocol": "-1"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressallmastertonode": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"FromPort": 0,
|
||||
"ToPort": 0,
|
||||
"IpProtocol": "-1"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressallnodetonode": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 0,
|
||||
"ToPort": 0,
|
||||
"IpProtocol": "-1"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngresshttpsexternaltomaster00000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"FromPort": 443,
|
||||
"ToPort": 443,
|
||||
"IpProtocol": "tcp",
|
||||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp12379": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 1,
|
||||
"ToPort": 2379,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp23824000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 2382,
|
||||
"ToPort": 4000,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp400365535": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 4003,
|
||||
"ToPort": 65535,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomasterudp165535": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 1,
|
||||
"ToPort": 65535,
|
||||
"IpProtocol": "udp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngresssshexternaltomaster00000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"FromPort": 22,
|
||||
"ToPort": 22,
|
||||
"IpProtocol": "tcp",
|
||||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngresssshexternaltonode00000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 22,
|
||||
"ToPort": 22,
|
||||
"IpProtocol": "tcp",
|
||||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupmastersminimalexamplecom": {
|
||||
"Type": "AWS::EC2::SecurityGroup",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"GroupDescription": "Security group for masters",
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "masters.minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupnodesminimalexamplecom": {
|
||||
"Type": "AWS::EC2::SecurityGroup",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"GroupDescription": "Security group for nodes",
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "nodes.minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": {
|
||||
"Type": "AWS::EC2::SubnetRouteTableAssociation",
|
||||
"Properties": {
|
||||
"SubnetId": {
|
||||
"Ref": "AWSEC2Subnetustest1aminimalexamplecom"
|
||||
},
|
||||
"RouteTableId": {
|
||||
"Ref": "AWSEC2RouteTableminimalexamplecom"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AWSEC2Subnetustest1aminimalexamplecom": {
|
||||
"Type": "AWS::EC2::Subnet",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"CidrBlock": "172.20.32.0/19",
|
||||
"AvailabilityZone": "us-test-1a",
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "us-test-1a.minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "SubnetType",
|
||||
"Value": "Public"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/role/elb",
|
||||
"Value": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": {
|
||||
"Type": "AWS::EC2::VPCDHCPOptionsAssociation",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"DhcpOptionsId": {
|
||||
"Ref": "AWSEC2DHCPOptionsminimalexamplecom"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AWSEC2VPCGatewayAttachmentminimalexamplecom": {
|
||||
"Type": "AWS::EC2::VPCGatewayAttachment",
|
||||
"Properties": {
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCminimalexamplecom"
|
||||
},
|
||||
"InternetGatewayId": {
|
||||
"Ref": "AWSEC2InternetGatewayminimalexamplecom"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AWSEC2VPCminimalexamplecom": {
|
||||
"Type": "AWS::EC2::VPC",
|
||||
"Properties": {
|
||||
"CidrBlock": "172.20.0.0/16",
|
||||
"EnableDnsHostnames": true,
|
||||
"EnableDnsSupport": true,
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2Volumeustest1aetcdeventsminimalexamplecom": {
|
||||
"Type": "AWS::EC2::Volume",
|
||||
"Properties": {
|
||||
"AvailabilityZone": "us-test-1a",
|
||||
"Size": 20,
|
||||
"VolumeType": "gp2",
|
||||
"Encrypted": false,
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "us-test-1a.etcd-events.minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/etcd/events",
|
||||
"Value": "us-test-1a/us-test-1a"
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/role/master",
|
||||
"Value": "1"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"AWSEC2Volumeustest1aetcdmainminimalexamplecom": {
|
||||
"Type": "AWS::EC2::Volume",
|
||||
"Properties": {
|
||||
"AvailabilityZone": "us-test-1a",
|
||||
"Size": 20,
|
||||
"VolumeType": "gp2",
|
||||
"Encrypted": false,
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "us-test-1a.etcd-main.minimal.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/etcd/main",
|
||||
"Value": "us-test-1a/us-test-1a"
|
||||
},
|
||||
{
|
||||
"Key": "k8s.io/role/master",
|
||||
"Value": "1"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,482 @@
|
|||
Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexamplecom.Properties.UserData: |
|
||||
#!/bin/bash
|
||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.8.1/linux/amd64/nodeup
|
||||
NODEUP_HASH=bb41724c37d15ab7e039e06230e742b9b38d0808
|
||||
|
||||
export AWS_REGION=us-test-1
|
||||
|
||||
|
||||
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
|
||||
if [[ -d /var/lib/toolbox ]]; then
|
||||
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
|
||||
fi
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
download-or-bust() {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
|
||||
if [[ $(which curl) ]]; then
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
|
||||
echo "== Failed to curl ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
elif [[ $(which wget ) ]]; then
|
||||
if ! wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10 "${url}"; then
|
||||
echo "== Failed to wget ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
else
|
||||
echo "== Could not find curl or wget. Retrying. =="
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All downloads failed; sleeping before retrying"
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
validate-hash() {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
local actual
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function split-commas() {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
function try-download-release() {
|
||||
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
|
||||
# optimization.
|
||||
|
||||
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
|
||||
local -r nodeup_filename="${nodeup_urls[0]##*/}"
|
||||
if [[ -n "${NODEUP_HASH:-}" ]]; then
|
||||
local -r nodeup_hash="${NODEUP_HASH}"
|
||||
else
|
||||
# TODO: Remove?
|
||||
echo "Downloading sha1 (not found in env)"
|
||||
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
|
||||
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading nodeup (${nodeup_urls[@]})"
|
||||
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
|
||||
|
||||
chmod +x nodeup
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# In case of failure checking integrity of release, retry.
|
||||
until try-download-release; do
|
||||
sleep 15
|
||||
echo "Couldn't download release. Retrying..."
|
||||
done
|
||||
|
||||
echo "Running nodeup"
|
||||
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
|
||||
}
|
||||
|
||||
####################################################################################
|
||||
|
||||
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
|
||||
|
||||
echo "== nodeup node config starting =="
|
||||
ensure-install-dir
|
||||
|
||||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
storage: overlay,aufs
|
||||
version: 1.11.2
|
||||
encryptionConfig: null
|
||||
etcdClusters:
|
||||
events:
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
version: 2.2.1
|
||||
main:
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
version: 2.2.1
|
||||
kubeAPIServer:
|
||||
address: 127.0.0.1
|
||||
admissionControl:
|
||||
- NamespaceLifecycle
|
||||
- LimitRanger
|
||||
- ServiceAccount
|
||||
- PersistentVolumeLabel
|
||||
- DefaultStorageClass
|
||||
- ResourceQuota
|
||||
allowPrivileged: true
|
||||
apiServerCount: 1
|
||||
authorizationMode: AlwaysAllow
|
||||
cloudProvider: aws
|
||||
etcdServers:
|
||||
- http://127.0.0.1:4001
|
||||
etcdServersOverrides:
|
||||
- /events#http://127.0.0.1:4002
|
||||
image: gcr.io/google_containers/kube-apiserver:v1.4.12
|
||||
insecurePort: 8080
|
||||
logLevel: 2
|
||||
securePort: 443
|
||||
serviceClusterIPRange: 100.64.0.0/13
|
||||
storageBackend: etcd2
|
||||
kubeControllerManager:
|
||||
allocateNodeCIDRs: true
|
||||
attachDetachReconcileSyncPeriod: 1m0s
|
||||
cloudProvider: aws
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
clusterName: minimal.example.com
|
||||
configureCloudRoutes: true
|
||||
image: gcr.io/google_containers/kube-controller-manager:v1.4.12
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
logLevel: 2
|
||||
master: 127.0.0.1:8080
|
||||
kubeProxy:
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
cpuRequest: 100m
|
||||
hostnameOverride: '@aws'
|
||||
image: gcr.io/google_containers/kube-proxy:v1.4.12
|
||||
logLevel: 2
|
||||
kubeScheduler:
|
||||
image: gcr.io/google_containers/kube-scheduler:v1.4.12
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
logLevel: 2
|
||||
master: http://127.0.0.1:8080
|
||||
kubelet:
|
||||
allowPrivileged: true
|
||||
apiServers: https://api.internal.minimal.example.com
|
||||
babysitDaemons: true
|
||||
cgroupRoot: docker
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
hostnameOverride: '@aws'
|
||||
logLevel: 2
|
||||
networkPluginMTU: 9001
|
||||
networkPluginName: kubenet
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
reconcileCIDR: true
|
||||
masterKubelet:
|
||||
allowPrivileged: true
|
||||
apiServers: http://127.0.0.1:8080
|
||||
babysitDaemons: true
|
||||
cgroupRoot: docker
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
hostnameOverride: '@aws'
|
||||
logLevel: 2
|
||||
networkPluginMTU: 9001
|
||||
networkPluginName: kubenet
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podCIDR: 10.123.45.0/28
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
reconcileCIDR: true
|
||||
registerSchedulable: false
|
||||
|
||||
__EOF_CLUSTER_SPEC
|
||||
|
||||
cat > ig_spec.yaml << '__EOF_IG_SPEC'
|
||||
kubelet: null
|
||||
nodeLabels: null
|
||||
suspendProcesses: null
|
||||
taints: null
|
||||
|
||||
__EOF_IG_SPEC
|
||||
|
||||
cat > kube_env.yaml << '__EOF_KUBE_ENV'
|
||||
Assets:
|
||||
- c4871c7315817ee114f5c554a58da8ebc54f08c3@https://storage.googleapis.com/kubernetes-release/release/v1.4.12/bin/linux/amd64/kubelet
|
||||
- d9fdb6b37597d371ef853cde76170f38a553aa78@https://storage.googleapis.com/kubernetes-release/release/v1.4.12/bin/linux/amd64/kubectl
|
||||
- 19d49f7b2b99cd2493d5ae0ace896c64e289ccbb@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
|
||||
- 42b15a0a0a56531750bde3c7b08d0cf27c170c48@https://kubeupv2.s3.amazonaws.com/kops/1.8.1/linux/amd64/utils.tar.gz
|
||||
ClusterName: minimal.example.com
|
||||
ConfigBase: memfs://clusters.example.com/minimal.example.com
|
||||
InstanceGroupName: master-us-test-1a
|
||||
Tags:
|
||||
- _automatic_upgrades
|
||||
- _aws
|
||||
- _kubernetes_master
|
||||
channels:
|
||||
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
|
||||
protokubeImage:
|
||||
hash: 0b1f26208f8f6cc02468368706d0236670fec8a2
|
||||
name: protokube:1.8.1
|
||||
source: https://kubeupv2.s3.amazonaws.com/kops/1.8.1/images/protokube.tar.gz
|
||||
|
||||
__EOF_KUBE_ENV
|
||||
|
||||
download-release
|
||||
echo "== nodeup node config done =="
|
||||
Resources.AWSAutoScalingLaunchConfigurationnodesminimalexamplecom.Properties.UserData: |
|
||||
#!/bin/bash
|
||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.8.1/linux/amd64/nodeup
|
||||
NODEUP_HASH=bb41724c37d15ab7e039e06230e742b9b38d0808
|
||||
|
||||
export AWS_REGION=us-test-1
|
||||
|
||||
|
||||
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
|
||||
if [[ -d /var/lib/toolbox ]]; then
|
||||
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
|
||||
fi
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
download-or-bust() {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
|
||||
if [[ $(which curl) ]]; then
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
|
||||
echo "== Failed to curl ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
elif [[ $(which wget ) ]]; then
|
||||
if ! wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10 "${url}"; then
|
||||
echo "== Failed to wget ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
else
|
||||
echo "== Could not find curl or wget. Retrying. =="
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All downloads failed; sleeping before retrying"
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
validate-hash() {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
local actual
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function split-commas() {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
function try-download-release() {
|
||||
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
|
||||
# optimization.
|
||||
|
||||
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
|
||||
local -r nodeup_filename="${nodeup_urls[0]##*/}"
|
||||
if [[ -n "${NODEUP_HASH:-}" ]]; then
|
||||
local -r nodeup_hash="${NODEUP_HASH}"
|
||||
else
|
||||
# TODO: Remove?
|
||||
echo "Downloading sha1 (not found in env)"
|
||||
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
|
||||
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading nodeup (${nodeup_urls[@]})"
|
||||
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
|
||||
|
||||
chmod +x nodeup
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# In case of failure checking integrity of release, retry.
|
||||
until try-download-release; do
|
||||
sleep 15
|
||||
echo "Couldn't download release. Retrying..."
|
||||
done
|
||||
|
||||
echo "Running nodeup"
|
||||
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
|
||||
}
|
||||
|
||||
####################################################################################
|
||||
|
||||
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
|
||||
|
||||
echo "== nodeup node config starting =="
|
||||
ensure-install-dir
|
||||
|
||||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
storage: overlay,aufs
|
||||
version: 1.11.2
|
||||
kubeProxy:
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
cpuRequest: 100m
|
||||
hostnameOverride: '@aws'
|
||||
image: gcr.io/google_containers/kube-proxy:v1.4.12
|
||||
logLevel: 2
|
||||
kubelet:
|
||||
allowPrivileged: true
|
||||
apiServers: https://api.internal.minimal.example.com
|
||||
babysitDaemons: true
|
||||
cgroupRoot: docker
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
hostnameOverride: '@aws'
|
||||
logLevel: 2
|
||||
networkPluginMTU: 9001
|
||||
networkPluginName: kubenet
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
reconcileCIDR: true
|
||||
|
||||
__EOF_CLUSTER_SPEC
|
||||
|
||||
cat > ig_spec.yaml << '__EOF_IG_SPEC'
|
||||
kubelet: null
|
||||
nodeLabels: null
|
||||
suspendProcesses: null
|
||||
taints: null
|
||||
|
||||
__EOF_IG_SPEC
|
||||
|
||||
cat > kube_env.yaml << '__EOF_KUBE_ENV'
|
||||
Assets:
|
||||
- c4871c7315817ee114f5c554a58da8ebc54f08c3@https://storage.googleapis.com/kubernetes-release/release/v1.4.12/bin/linux/amd64/kubelet
|
||||
- d9fdb6b37597d371ef853cde76170f38a553aa78@https://storage.googleapis.com/kubernetes-release/release/v1.4.12/bin/linux/amd64/kubectl
|
||||
- 19d49f7b2b99cd2493d5ae0ace896c64e289ccbb@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
|
||||
- 42b15a0a0a56531750bde3c7b08d0cf27c170c48@https://kubeupv2.s3.amazonaws.com/kops/1.8.1/linux/amd64/utils.tar.gz
|
||||
ClusterName: minimal.example.com
|
||||
ConfigBase: memfs://clusters.example.com/minimal.example.com
|
||||
InstanceGroupName: nodes
|
||||
Tags:
|
||||
- _automatic_upgrades
|
||||
- _aws
|
||||
channels:
|
||||
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
|
||||
protokubeImage:
|
||||
hash: 0b1f26208f8f6cc02468368706d0236670fec8a2
|
||||
name: protokube:1.8.1
|
||||
source: https://kubeupv2.s3.amazonaws.com/kops/1.8.1/images/protokube.tar.gz
|
||||
|
||||
__EOF_KUBE_ENV
|
||||
|
||||
download-release
|
||||
echo "== nodeup node config done =="
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQChnQFuZwHH2dE8HBIaKlSL87xZQqo7pejSCLx4hctII+Dy2Zj4wv54PUDwhVFDl6DjWGwEuyTpppD8eGBE97UNYHAB/5fGRNKj0Bumu6eVsmHWXZV0t3kI+dcnkF04dXdxdFW2Z+RGdhMhgjS0Go87P9jEfNQIBBO6J6B8d6voeHJRLJp/p7r+qAc+oAAjdP1aYlUxHCGc+W/gGfZPFiuNmg1W/QCpATSFXkziIspHM+tFm6ygo7SRc5ArA1+XPfKztzHGNukSjANm+Mz+WXlj4EP/sHWwrm4jzjdIFC07K/2hsqsjYhWBjlYzZDJhovguueqS0+Dmhxca2/3zzASt thunderdome@aws
|
|
@ -0,0 +1,296 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.8.0/linux/amd64/nodeup
|
||||
NODEUP_HASH=02185512f78dc9d15a8c10774c4cb11f67e4bc20
|
||||
|
||||
|
||||
export AWS_REGION=us-west-2
|
||||
|
||||
|
||||
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
|
||||
if [[ -d /var/lib/toolbox ]]; then
|
||||
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
|
||||
fi
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
download-or-bust() {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
|
||||
if [[ $(which curl) ]]; then
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
|
||||
echo "== Failed to curl ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
elif [[ $(which wget ) ]]; then
|
||||
if ! wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10 "${url}"; then
|
||||
echo "== Failed to wget ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
else
|
||||
echo "== Could not find curl or wget. Retrying. =="
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All downloads failed; sleeping before retrying"
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
validate-hash() {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
local actual
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function split-commas() {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
function try-download-release() {
|
||||
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
|
||||
# optimization.
|
||||
|
||||
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
|
||||
local -r nodeup_filename="${nodeup_urls[0]##*/}"
|
||||
if [[ -n "${NODEUP_HASH:-}" ]]; then
|
||||
local -r nodeup_hash="${NODEUP_HASH}"
|
||||
else
|
||||
# TODO: Remove?
|
||||
echo "Downloading sha1 (not found in env)"
|
||||
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
|
||||
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading nodeup (${nodeup_urls[@]})"
|
||||
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
|
||||
|
||||
chmod +x nodeup
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# In case of failure checking integrity of release, retry.
|
||||
until try-download-release; do
|
||||
sleep 15
|
||||
echo "Couldn't download release. Retrying..."
|
||||
done
|
||||
|
||||
echo "Running nodeup"
|
||||
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
|
||||
}
|
||||
|
||||
####################################################################################
|
||||
|
||||
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
|
||||
|
||||
echo "== nodeup node config starting =="
|
||||
ensure-install-dir
|
||||
|
||||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logDriver: json-file
|
||||
logLevel: warn
|
||||
logOpt:
|
||||
- max-size=10m
|
||||
- max-file=5
|
||||
storage: overlay,aufs
|
||||
version: 1.13.1
|
||||
encryptionConfig: null
|
||||
etcdClusters:
|
||||
events:
|
||||
version: 2.2.1
|
||||
main:
|
||||
version: 2.2.1
|
||||
kubeAPIServer:
|
||||
address: 127.0.0.1
|
||||
admissionControl:
|
||||
- Initializers
|
||||
- NamespaceLifecycle
|
||||
- LimitRanger
|
||||
- ServiceAccount
|
||||
- PersistentVolumeLabel
|
||||
- DefaultStorageClass
|
||||
- DefaultTolerationSeconds
|
||||
- NodeRestriction
|
||||
- Priority
|
||||
- ResourceQuota
|
||||
allowPrivileged: true
|
||||
anonymousAuth: false
|
||||
apiServerCount: 1
|
||||
authorizationMode: RBAC
|
||||
cloudProvider: aws
|
||||
etcdServers:
|
||||
- http://127.0.0.1:4001
|
||||
etcdServersOverrides:
|
||||
- /events#http://127.0.0.1:4002
|
||||
image: gcr.io/google_containers/kube-apiserver:v1.8.4
|
||||
insecurePort: 8080
|
||||
kubeletPreferredAddressTypes:
|
||||
- InternalIP
|
||||
- Hostname
|
||||
- ExternalIP
|
||||
logLevel: 2
|
||||
requestheaderAllowedNames:
|
||||
- aggregator
|
||||
requestheaderExtraHeaderPrefixes:
|
||||
- X-Remote-Extra-
|
||||
requestheaderGroupHeaders:
|
||||
- X-Remote-Group
|
||||
requestheaderUsernameHeaders:
|
||||
- X-Remote-User
|
||||
securePort: 443
|
||||
serviceClusterIPRange: 100.64.0.0/13
|
||||
storageBackend: etcd2
|
||||
kubeControllerManager:
|
||||
allocateNodeCIDRs: true
|
||||
attachDetachReconcileSyncPeriod: 1m0s
|
||||
cloudProvider: aws
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
clusterName: k8s-iam.us-west-2.td.priv
|
||||
configureCloudRoutes: false
|
||||
image: gcr.io/google_containers/kube-controller-manager:v1.8.4
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
logLevel: 2
|
||||
useServiceAccountCredentials: true
|
||||
kubeProxy:
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
cpuRequest: 100m
|
||||
featureGates: null
|
||||
hostnameOverride: '@aws'
|
||||
image: gcr.io/google_containers/kube-proxy:v1.8.4
|
||||
logLevel: 2
|
||||
kubeScheduler:
|
||||
image: gcr.io/google_containers/kube-scheduler:v1.8.4
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
logLevel: 2
|
||||
kubelet:
|
||||
allowPrivileged: true
|
||||
cgroupRoot: /
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
featureGates:
|
||||
ExperimentalCriticalPodAnnotation: "true"
|
||||
hostnameOverride: '@aws'
|
||||
kubeconfigPath: /var/lib/kubelet/kubeconfig
|
||||
logLevel: 2
|
||||
networkPluginName: cni
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
requireKubeconfig: true
|
||||
masterKubelet:
|
||||
allowPrivileged: true
|
||||
cgroupRoot: /
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
featureGates:
|
||||
ExperimentalCriticalPodAnnotation: "true"
|
||||
hostnameOverride: '@aws'
|
||||
kubeconfigPath: /var/lib/kubelet/kubeconfig
|
||||
logLevel: 2
|
||||
networkPluginName: cni
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
registerSchedulable: false
|
||||
requireKubeconfig: true
|
||||
|
||||
__EOF_CLUSTER_SPEC
|
||||
|
||||
cat > ig_spec.yaml << '__EOF_IG_SPEC'
|
||||
kubelet: null
|
||||
nodeLabels:
|
||||
kops.k8s.io/instancegroup: master-us-west-2a
|
||||
taints: null
|
||||
|
||||
__EOF_IG_SPEC
|
||||
|
||||
cat > kube_env.yaml << '__EOF_KUBE_ENV'
|
||||
Assets:
|
||||
- 125993c220d1a9b5b60ad20a867a0e7cda63e64c@https://storage.googleapis.com/kubernetes-release/release/v1.8.4/bin/linux/amd64/kubelet
|
||||
- 8e2314db816b9b4465c5f713c1152cb0603db15e@https://storage.googleapis.com/kubernetes-release/release/v1.8.4/bin/linux/amd64/kubectl
|
||||
- 1d9788b0f5420e1a219aad2cb8681823fc515e7c@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
|
||||
- f62360d3351bed837ae3ffcdee65e9d57511695a@https://kubeupv2.s3.amazonaws.com/kops/1.8.0/linux/amd64/utils.tar.gz
|
||||
ClusterName: k8s-iam.us-west-2.td.priv
|
||||
ConfigBase: s3://tune-k8s-kops-test/k8s-iam.us-west-2.td.priv
|
||||
InstanceGroupName: master-us-west-2a
|
||||
Tags:
|
||||
- _automatic_upgrades
|
||||
- _aws
|
||||
- _kubernetes_master
|
||||
- _networking_cni
|
||||
channels:
|
||||
- s3://tune-k8s-kops-test/k8s-iam.us-west-2.td.priv/addons/bootstrap-channel.yaml
|
||||
protokubeImage:
|
||||
hash: 1b972e92520b3cafd576893ae3daeafdd1bc9ffd
|
||||
name: protokube:1.8.0
|
||||
source: https://kubeupv2.s3.amazonaws.com/kops/1.8.0/images/protokube.tar.gz
|
||||
|
||||
__EOF_KUBE_ENV
|
||||
|
||||
download-release
|
||||
echo "== nodeup node config done =="
|
|
@ -0,0 +1,213 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.8.1/linux/amd64/nodeup
|
||||
NODEUP_HASH=02185512f78dc9d15a8c10774c4cb11f67e4bc20
|
||||
|
||||
|
||||
export AWS_REGION=us-west-2
|
||||
|
||||
|
||||
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
|
||||
if [[ -d /var/lib/toolbox ]]; then
|
||||
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
|
||||
fi
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
||||
# Retry a download until we get it. Takes a hash and a set of URLs.
|
||||
#
|
||||
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
|
||||
# $2+ are the URLs to download.
|
||||
download-or-bust() {
|
||||
local -r hash="$1"
|
||||
shift 1
|
||||
|
||||
urls=( $* )
|
||||
while true; do
|
||||
for url in "${urls[@]}"; do
|
||||
local file="${url##*/}"
|
||||
rm -f "${file}"
|
||||
|
||||
if [[ $(which curl) ]]; then
|
||||
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
|
||||
echo "== Failed to curl ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
elif [[ $(which wget ) ]]; then
|
||||
if ! wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10 "${url}"; then
|
||||
echo "== Failed to wget ${url}. Retrying. =="
|
||||
break
|
||||
fi
|
||||
else
|
||||
echo "== Could not find curl or wget. Retrying. =="
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
|
||||
echo "== Hash validation of ${url} failed. Retrying. =="
|
||||
else
|
||||
if [[ -n "${hash}" ]]; then
|
||||
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
|
||||
else
|
||||
echo "== Downloaded ${url} =="
|
||||
fi
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All downloads failed; sleeping before retrying"
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
validate-hash() {
|
||||
local -r file="$1"
|
||||
local -r expected="$2"
|
||||
local actual
|
||||
|
||||
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
|
||||
if [[ "${actual}" != "${expected}" ]]; then
|
||||
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function split-commas() {
|
||||
echo $1 | tr "," "\n"
|
||||
}
|
||||
|
||||
function try-download-release() {
|
||||
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
|
||||
# optimization.
|
||||
|
||||
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
|
||||
local -r nodeup_filename="${nodeup_urls[0]##*/}"
|
||||
if [[ -n "${NODEUP_HASH:-}" ]]; then
|
||||
local -r nodeup_hash="${NODEUP_HASH}"
|
||||
else
|
||||
# TODO: Remove?
|
||||
echo "Downloading sha1 (not found in env)"
|
||||
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
|
||||
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
|
||||
fi
|
||||
|
||||
echo "Downloading nodeup (${nodeup_urls[@]})"
|
||||
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
|
||||
|
||||
chmod +x nodeup
|
||||
}
|
||||
|
||||
function download-release() {
|
||||
# In case of failure checking integrity of release, retry.
|
||||
until try-download-release; do
|
||||
sleep 15
|
||||
echo "Couldn't download release. Retrying..."
|
||||
done
|
||||
|
||||
echo "Running nodeup"
|
||||
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
|
||||
}
|
||||
|
||||
####################################################################################
|
||||
|
||||
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
|
||||
|
||||
echo "== nodeup node config starting =="
|
||||
ensure-install-dir
|
||||
|
||||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logDriver: json-file
|
||||
logLevel: warn
|
||||
logOpt:
|
||||
- max-size=10m
|
||||
- max-file=5
|
||||
storage: overlay,aufs
|
||||
version: 1.13.1
|
||||
kubeProxy:
|
||||
clusterCIDR: 100.96.0.0/11
|
||||
cpuRequest: 100m
|
||||
featureGates: null
|
||||
hostnameOverride: '@aws'
|
||||
image: gcr.io/google_containers/kube-proxy:v1.8.4
|
||||
logLevel: 2
|
||||
kubelet:
|
||||
allowPrivileged: true
|
||||
cgroupRoot: /
|
||||
cloudProvider: aws
|
||||
clusterDNS: 100.64.0.10
|
||||
clusterDomain: cluster.local
|
||||
enableDebuggingHandlers: true
|
||||
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
|
||||
featureGates:
|
||||
ExperimentalCriticalPodAnnotation: "true"
|
||||
hostnameOverride: '@aws'
|
||||
kubeconfigPath: /var/lib/kubelet/kubeconfig
|
||||
logLevel: 2
|
||||
networkPluginName: cni
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
|
||||
podManifestPath: /etc/kubernetes/manifests
|
||||
requireKubeconfig: true
|
||||
|
||||
__EOF_CLUSTER_SPEC
|
||||
|
||||
cat > ig_spec.yaml << '__EOF_IG_SPEC'
|
||||
kubelet: null
|
||||
nodeLabels:
|
||||
kops.k8s.io/instancegroup: nodes
|
||||
taints: null
|
||||
|
||||
__EOF_IG_SPEC
|
||||
|
||||
cat > kube_env.yaml << '__EOF_KUBE_ENV'
|
||||
Assets:
|
||||
- 125993c220d1a9b5b60ad20a867a0e7cda63e64c@https://storage.googleapis.com/kubernetes-release/release/v1.8.4/bin/linux/amd64/kubelet
|
||||
- 8e2314db816b9b4465c5f713c1152cb0603db15e@https://storage.googleapis.com/kubernetes-release/release/v1.8.4/bin/linux/amd64/kubectl
|
||||
- 1d9788b0f5420e1a219aad2cb8681823fc515e7c@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
|
||||
- f62360d3351bed837ae3ffcdee65e9d57511695a@https://kubeupv2.s3.amazonaws.com/kops/1.8.0/linux/amd64/utils.tar.gz
|
||||
ClusterName: k8s-iam.us-west-2.td.priv
|
||||
ConfigBase: s3://tune-k8s-kops-test/k8s-iam.us-west-2.td.priv
|
||||
InstanceGroupName: nodes
|
||||
Tags:
|
||||
- _automatic_upgrades
|
||||
- _aws
|
||||
- _networking_cni
|
||||
channels:
|
||||
- s3://tune-k8s-kops-test/k8s-iam.us-west-2.td.priv/addons/bootstrap-channel.yaml
|
||||
protokubeImage:
|
||||
hash: 1b972e92520b3cafd576893ae3daeafdd1bc9ffd
|
||||
name: protokube:1.8.0
|
||||
source: https://kubeupv2.s3.amazonaws.com/kops/1.8.0/images/protokube.tar.gz
|
||||
|
||||
__EOF_KUBE_ENV
|
||||
|
||||
download-release
|
||||
echo "== nodeup node config done =="
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==
|
|
@ -0,0 +1,81 @@
|
|||
apiVersion: kops/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:27Z"
|
||||
name: minimal.example.com
|
||||
spec:
|
||||
kubernetesApiAccess:
|
||||
- 0.0.0.0/0
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://clusters.example.com/minimal.example.com
|
||||
etcdClusters:
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
name: main
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
name: events
|
||||
kubernetesVersion: v1.4.12
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
sshAccess:
|
||||
- 0.0.0.0/0
|
||||
topology:
|
||||
masters: public
|
||||
nodes: public
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
type: Public
|
||||
zone: us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:28Z"
|
||||
name: nodes
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
iam:
|
||||
profile: arn:aws:iam::422917490108:instance-profile/kops-custom-node-role
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
role: Node
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:28Z"
|
||||
name: master-us-test-1a
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
iam:
|
||||
profile: arn:aws:iam::422917490108:instance-profile/kops-custom-master-role
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
|
|
@ -0,0 +1,684 @@
|
|||
output "cluster_name" {
|
||||
value = "k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
|
||||
output "master_security_group_ids" {
|
||||
value = ["${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"]
|
||||
}
|
||||
|
||||
output "masters_role_arn" {
|
||||
value = "${aws_iam_role.masters-k8s-iam-us-west-2-td-priv.arn}"
|
||||
}
|
||||
|
||||
output "masters_role_name" {
|
||||
value = "${aws_iam_role.masters-k8s-iam-us-west-2-td-priv.name}"
|
||||
}
|
||||
|
||||
output "node_security_group_ids" {
|
||||
value = ["${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"]
|
||||
}
|
||||
|
||||
output "node_subnet_ids" {
|
||||
value = ["${aws_subnet.us-west-2a-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2b-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2c-k8s-iam-us-west-2-td-priv.id}"]
|
||||
}
|
||||
|
||||
output "nodes_role_arn" {
|
||||
value = "${aws_iam_role.nodes-k8s-iam-us-west-2-td-priv.arn}"
|
||||
}
|
||||
|
||||
output "nodes_role_name" {
|
||||
value = "${aws_iam_role.nodes-k8s-iam-us-west-2-td-priv.name}"
|
||||
}
|
||||
|
||||
output "region" {
|
||||
value = "us-west-2"
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_attachment" "master-us-west-2a-masters-k8s-iam-us-west-2-td-priv" {
|
||||
elb = "${aws_elb.api-k8s-iam-us-west-2-td-priv.id}"
|
||||
autoscaling_group_name = "${aws_autoscaling_group.master-us-west-2a-masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "master-us-west-2a-masters-k8s-iam-us-west-2-td-priv" {
|
||||
name = "master-us-west-2a.masters.k8s-iam.us-west-2.td.priv"
|
||||
launch_configuration = "${aws_launch_configuration.master-us-west-2a-masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
max_size = 1
|
||||
min_size = 1
|
||||
vpc_zone_identifier = ["${aws_subnet.us-west-2a-k8s-iam-us-west-2-td-priv.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "k8s-iam.us-west-2.td.priv"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "master-us-west-2a.masters.k8s-iam.us-west-2.td.priv"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
|
||||
value = "master-us-west-2a"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/master"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "terraform"
|
||||
value = "true"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
launch_configuration = "${aws_launch_configuration.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
max_size = 2
|
||||
min_size = 2
|
||||
vpc_zone_identifier = ["${aws_subnet.us-west-2a-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2b-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2c-k8s-iam-us-west-2-td-priv.id}"]
|
||||
|
||||
tag = {
|
||||
key = "KubernetesCluster"
|
||||
value = "k8s-iam.us-west-2.td.priv"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "Name"
|
||||
value = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
|
||||
value = "nodes"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "k8s.io/role/node"
|
||||
value = "1"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
|
||||
tag = {
|
||||
key = "terraform"
|
||||
value = "true"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_ebs_volume" "a-etcd-events-k8s-iam-us-west-2-td-priv" {
|
||||
availability_zone = "us-west-2a"
|
||||
size = 20
|
||||
type = "gp2"
|
||||
encrypted = false
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "a.etcd-events.k8s-iam.us-west-2.td.priv"
|
||||
"k8s.io/etcd/events" = "a/a"
|
||||
"k8s.io/role/master" = "1"
|
||||
terraform = "true"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_ebs_volume" "a-etcd-main-k8s-iam-us-west-2-td-priv" {
|
||||
availability_zone = "us-west-2a"
|
||||
size = 20
|
||||
type = "gp2"
|
||||
encrypted = false
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "a.etcd-main.k8s-iam.us-west-2.td.priv"
|
||||
"k8s.io/etcd/main" = "a/a"
|
||||
"k8s.io/role/master" = "1"
|
||||
terraform = "true"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_eip" "us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
vpc = true
|
||||
}
|
||||
|
||||
resource "aws_eip" "us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
vpc = true
|
||||
}
|
||||
|
||||
resource "aws_eip" "us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
vpc = true
|
||||
}
|
||||
|
||||
resource "aws_elb" "api-k8s-iam-us-west-2-td-priv" {
|
||||
name = "api-k8s-iam-us-west-2-td--a7fd54"
|
||||
|
||||
listener = {
|
||||
instance_port = 443
|
||||
instance_protocol = "TCP"
|
||||
lb_port = 443
|
||||
lb_protocol = "TCP"
|
||||
}
|
||||
|
||||
security_groups = ["${aws_security_group.api-elb-k8s-iam-us-west-2-td-priv.id}"]
|
||||
subnets = ["${aws_subnet.us-west-2a-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2b-k8s-iam-us-west-2-td-priv.id}", "${aws_subnet.us-west-2c-k8s-iam-us-west-2-td-priv.id}"]
|
||||
internal = true
|
||||
|
||||
health_check = {
|
||||
target = "SSL:443"
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
interval = 10
|
||||
timeout = 5
|
||||
}
|
||||
|
||||
idle_timeout = 300
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "api.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "masters-k8s-iam-us-west-2-td-priv" {
|
||||
name = "masters.k8s-iam.us-west-2.td.priv"
|
||||
role = "${aws_iam_role.masters-k8s-iam-us-west-2-td-priv.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
role = "${aws_iam_role.nodes-k8s-iam-us-west-2-td-priv.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "masters-k8s-iam-us-west-2-td-priv" {
|
||||
name = "masters.k8s-iam.us-west-2.td.priv"
|
||||
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.k8s-iam.us-west-2.td.priv_policy")}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.k8s-iam.us-west-2.td.priv_policy")}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "masters-k8s-iam-us-west-2-td-priv" {
|
||||
name = "masters.k8s-iam.us-west-2.td.priv"
|
||||
role = "${aws_iam_role.masters-k8s-iam-us-west-2-td-priv.name}"
|
||||
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.k8s-iam.us-west-2.td.priv_policy")}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
role = "${aws_iam_role.nodes-k8s-iam-us-west-2-td-priv.name}"
|
||||
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.k8s-iam.us-west-2.td.priv_policy")}"
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_key_pair" "kubernetes-k8s-iam-us-west-2-td-priv-ad4e821eea9c965ed12a95b3bde99ed3" {
|
||||
key_name = "kubernetes.k8s-iam.us-west-2.td.priv-ad:4e:82:1e:ea:9c:96:5e:d1:2a:95:b3:bd:e9:9e:d3"
|
||||
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.k8s-iam.us-west-2.td.priv-ad4e821eea9c965ed12a95b3bde99ed3_public_key")}"
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "master-us-west-2a-masters-k8s-iam-us-west-2-td-priv" {
|
||||
name_prefix = "master-us-west-2a.masters.k8s-iam.us-west-2.td.priv-"
|
||||
image_id = "ami-06a57e7e"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-k8s-iam-us-west-2-td-priv-ad4e821eea9c965ed12a95b3bde99ed3.id}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
security_groups = ["${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"]
|
||||
associate_public_ip_address = false
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-west-2a.masters.k8s-iam.us-west-2.td.priv_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 64
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name_prefix = "nodes.k8s-iam.us-west-2.td.priv-"
|
||||
image_id = "ami-06a57e7e"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "${aws_key_pair.kubernetes-k8s-iam-us-west-2-td-priv-ad4e821eea9c965ed12a95b3bde99ed3.id}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
security_groups = ["${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"]
|
||||
associate_public_ip_address = false
|
||||
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.k8s-iam.us-west-2.td.priv_user_data")}"
|
||||
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = 128
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
lifecycle = {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
allocation_id = "${aws_eip.us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
subnet_id = "${aws_subnet.utility-us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
allocation_id = "${aws_eip.us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
subnet_id = "${aws_subnet.utility-us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
allocation_id = "${aws_eip.us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
subnet_id = "${aws_subnet.utility-us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route" "0-0-0-0--0" {
|
||||
route_table_id = "${aws_route_table.k8s-iam-us-west-2-td-priv.id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route" "private-us-west-2a-0-0-0-0--0" {
|
||||
route_table_id = "${aws_route_table.private-us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${aws_nat_gateway.us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route" "private-us-west-2b-0-0-0-0--0" {
|
||||
route_table_id = "${aws_route_table.private-us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${aws_nat_gateway.us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route" "private-us-west-2c-0-0-0-0--0" {
|
||||
route_table_id = "${aws_route_table.private-us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${aws_nat_gateway.us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route53_record" "api-k8s-iam-us-west-2-td-priv" {
|
||||
name = "api.k8s-iam.us-west-2.td.priv"
|
||||
type = "A"
|
||||
|
||||
alias = {
|
||||
name = "${aws_elb.api-k8s-iam-us-west-2-td-priv.dns_name}"
|
||||
zone_id = "${aws_elb.api-k8s-iam-us-west-2-td-priv.zone_id}"
|
||||
evaluate_target_health = false
|
||||
}
|
||||
|
||||
zone_id = "/hostedzone/Z1WJ08IMPUI44S"
|
||||
}
|
||||
|
||||
resource "aws_route53_zone_association" "us-west-2-td-priv" {
|
||||
zone_id = "/hostedzone/Z1WJ08IMPUI44S"
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table" "private-us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "private-us-west-2a.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table" "private-us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "private-us-west-2b.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table" "private-us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "private-us-west-2c.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "private-us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.private-us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "private-us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.private-us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "private-us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.private-us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "utility-us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.utility-us-west-2a-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "utility-us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.utility-us-west-2b-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "utility-us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
subnet_id = "${aws_subnet.utility-us-west-2c-k8s-iam-us-west-2-td-priv.id}"
|
||||
route_table_id = "${aws_route_table.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "api-elb-k8s-iam-us-west-2-td-priv" {
|
||||
name = "api-elb.k8s-iam.us-west-2.td.priv"
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
description = "Security group for api ELB"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "api-elb.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "masters-k8s-iam-us-west-2-td-priv" {
|
||||
name = "masters.k8s-iam.us-west-2.td.priv"
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
description = "Security group for masters"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "masters.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "nodes-k8s-iam-us-west-2-td-priv" {
|
||||
name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
description = "Security group for nodes"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "nodes.k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-master-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-master-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "api-elb-egress" {
|
||||
type = "egress"
|
||||
security_group_id = "${aws_security_group.api-elb-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.api-elb-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "https-elb-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.api-elb-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "master-egress" {
|
||||
type = "egress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-egress" {
|
||||
type = "egress"
|
||||
security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-protocol-ipip" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "4"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4001" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 2382
|
||||
to_port = 4001
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 4003
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 1
|
||||
to_port = 65535
|
||||
protocol = "udp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-k8s-iam-us-west-2-td-priv.id}"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.32.0/19"
|
||||
availability_zone = "us-west-2a"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "us-west-2a.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.64.0/19"
|
||||
availability_zone = "us-west-2b"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "us-west-2b.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.96.0/19"
|
||||
availability_zone = "us-west-2c"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "us-west-2c.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "utility-us-west-2a-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.0.0/22"
|
||||
availability_zone = "us-west-2a"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "utility-us-west-2a.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "utility-us-west-2b-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.4.0/22"
|
||||
availability_zone = "us-west-2b"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "utility-us-west-2b.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "utility-us-west-2c-k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
cidr_block = "10.203.8.0/22"
|
||||
availability_zone = "us-west-2c"
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "utility-us-west-2c.k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc" "k8s-iam-us-west-2-td-priv" {
|
||||
cidr_block = "10.203.0.0/16"
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "k8s-iam.us-west-2.td.priv"
|
||||
"kubernetes.io/cluster/k8s-iam.us-west-2.td.priv" = "owned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc_dhcp_options" "k8s-iam-us-west-2-td-priv" {
|
||||
domain_name = "us-west-2.compute.internal"
|
||||
domain_name_servers = ["AmazonProvidedDNS"]
|
||||
|
||||
tags = {
|
||||
KubernetesCluster = "k8s-iam.us-west-2.td.priv"
|
||||
Name = "k8s-iam.us-west-2.td.priv"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc_dhcp_options_association" "k8s-iam-us-west-2-td-priv" {
|
||||
vpc_id = "${aws_vpc.k8s-iam-us-west-2-td-priv.id}"
|
||||
dhcp_options_id = "${aws_vpc_dhcp_options.k8s-iam-us-west-2-td-priv.id}"
|
||||
}
|
||||
|
||||
terraform = {
|
||||
required_version = ">= 0.9.3"
|
||||
}
|
|
@ -35,7 +35,8 @@ type IAMInstanceProfile struct {
|
|||
Name *string
|
||||
Lifecycle *fi.Lifecycle
|
||||
|
||||
ID *string
|
||||
ID *string
|
||||
Shared *bool
|
||||
}
|
||||
|
||||
var _ fi.CompareWithID = &IAMInstanceProfile{}
|
||||
|
@ -85,6 +86,7 @@ func (e *IAMInstanceProfile) Find(c *fi.Context) (*IAMInstanceProfile, error) {
|
|||
|
||||
// Avoid spurious changes
|
||||
actual.Lifecycle = e.Lifecycle
|
||||
actual.Shared = e.Shared
|
||||
|
||||
return actual, nil
|
||||
}
|
||||
|
@ -95,7 +97,7 @@ func (e *IAMInstanceProfile) Run(c *fi.Context) error {
|
|||
|
||||
func (s *IAMInstanceProfile) CheckChanges(a, e, changes *IAMInstanceProfile) error {
|
||||
if a != nil {
|
||||
if fi.StringValue(e.Name) == "" {
|
||||
if fi.StringValue(e.Name) == "" && !fi.BoolValue(e.Shared) {
|
||||
return fi.RequiredField("Name")
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +105,11 @@ func (s *IAMInstanceProfile) CheckChanges(a, e, changes *IAMInstanceProfile) err
|
|||
}
|
||||
|
||||
func (_ *IAMInstanceProfile) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *IAMInstanceProfile) error {
|
||||
if a == nil {
|
||||
if fi.BoolValue(e.Shared) {
|
||||
if a == nil {
|
||||
return fmt.Errorf("instance role profile with id %q not found", fi.StringValue(e.ID))
|
||||
}
|
||||
} else if a == nil {
|
||||
glog.V(2).Infof("Creating IAMInstanceProfile with Name:%q", *e.Name)
|
||||
|
||||
request := &iam.CreateInstanceProfileInput{
|
||||
|
@ -154,6 +160,9 @@ func (_ *IAMInstanceProfile) RenderTerraform(t *terraform.TerraformTarget, a, e,
|
|||
}
|
||||
|
||||
func (e *IAMInstanceProfile) TerraformLink() *terraform.Literal {
|
||||
if fi.BoolValue(e.Shared) {
|
||||
return terraform.LiteralFromStringValue(fi.StringValue(e.Name))
|
||||
}
|
||||
return terraform.LiteralProperty("aws_iam_instance_profile", *e.Name, "id")
|
||||
}
|
||||
|
||||
|
@ -163,5 +172,8 @@ func (_ *IAMInstanceProfile) RenderCloudformation(t *cloudformation.Cloudformati
|
|||
}
|
||||
|
||||
func (e *IAMInstanceProfile) CloudformationLink() *cloudformation.Literal {
|
||||
return cloudformation.Ref("AWS::IAM::InstanceProfile", *e.Name)
|
||||
if fi.BoolValue(e.Shared) {
|
||||
return cloudformation.LiteralString(fi.StringValue(e.Name))
|
||||
}
|
||||
return cloudformation.Ref("AWS::IAM::InstanceProfile", fi.StringValue(e.Name))
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue