diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 6b01672fe0..279a06809c 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -116,9 +116,10 @@ func (i *integrationTest) withPrivate() *integrationTest { // withServiceAccountRoles indicates we expect to assign an IAM role for a ServiceAccount (instead of just using the node roles) func (i *integrationTest) withServiceAccountRole(sa string, inlinePolicy bool) *integrationTest { - i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_%s.sa.%s_policy", sa, i.clusterName)) + role := truncate.TruncateString(sa+".sa."+i.clusterName, truncate.TruncateStringOptions{MaxLength: iam.MaxLengthIAMRoleName, AlwaysAddHash: false}) + i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_%s_policy", role)) if inlinePolicy { - i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_policy_%s.sa.%s_policy", sa, i.clusterName)) + i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_policy_%s_policy", role)) } return i } @@ -620,10 +621,10 @@ func TestManyAddons(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", metricsServerAddon, dnsControllerAddon). + withNTH(). runTestTerraformAWS(t) } @@ -642,12 +643,12 @@ func TestManyAddonsCCMIRSA(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", metricsServerAddon, dnsControllerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -666,13 +667,13 @@ func TestManyAddonsCCMIRSA23(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", leaderElectionAddon, metricsServerAddon, dnsControllerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -691,13 +692,13 @@ func TestManyAddonsCCMIRSA24(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", leaderElectionAddon, metricsServerAddon, dnsControllerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -716,13 +717,13 @@ func TestManyAddonsCCMIRSA25(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", leaderElectionAddon, metricsServerAddon, dnsControllerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -742,12 +743,12 @@ func TestManyAddonsCCMIRSA26(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", metricsServerAddon, dnsControllerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -773,12 +774,12 @@ func TestCCM(t *testing.T) { "certmanager.io-k8s-1.16", "cluster-autoscaler.addons.k8s.io-k8s-1.15", "networking.amazon-vpc-routed-eni-k8s-1.16", - "node-termination-handler.aws-k8s-1.11", "snapshot-controller.addons.k8s.io-k8s-1.20", "aws-cloud-controller.addons.k8s.io-k8s-1.18", dnsControllerAddon, metricsServerAddon, ). + withNTH(). runTestTerraformAWS(t) } @@ -934,11 +935,26 @@ func TestAPIServerNodes(t *testing.T) { runTestTerraformAWS(t) } -// TestNTHQueueProcessor tests the output for resources required by NTH Queue Processor mode -func TestNTHQueueProcessor(t *testing.T) { - newIntegrationTest("nthsqsresources.longclustername.example.com", "nth_sqs_resources"). - withNTH(). - withAddons(dnsControllerAddon). +// TestNTHIMDSProcessor tests the output for resources required by NTH IMDS Processor mode +func TestNTHIMDSProcessor(t *testing.T) { + newIntegrationTest("nthimdsprocessor.longclustername.example.com", "nth-imds-processor"). + withAddons( + dnsControllerAddon, + "node-termination-handler.aws-k8s-1.11", + ). + runTestTerraformAWS(t) +} + +// TestNTHIMDSProcessorIRSSA tests the output for resources required by NTH IMDS Processor mode with IRSA +func TestNTHIMDSProcessorIRSA(t *testing.T) { + newIntegrationTest("nthimdsprocessor.longclustername.example.com", "nth-imds-processor-irsa"). + withOIDCDiscovery(). + withServiceAccountRole("dns-controller.kube-system", true). + withServiceAccountRole("aws-node-termination-handler.kube-system", true). + withAddons( + dnsControllerAddon, + "node-termination-handler.aws-k8s-1.11", + ). runTestTerraformAWS(t) } diff --git a/cmd/kops/lifecycle_integration_test.go b/cmd/kops/lifecycle_integration_test.go index a47ace9ae9..78a0cfa80d 100644 --- a/cmd/kops/lifecycle_integration_test.go +++ b/cmd/kops/lifecycle_integration_test.go @@ -171,12 +171,12 @@ func TestLifecyclePrivateSharedIP(t *testing.T) { }) } -// TestLifecycleNodeTerminationHandlerQueueProcessor runs the test on a cluster with requisite resources for NTH Queue Processor -func TestLifecycleNodeTerminationHandlerQueueProcessor(t *testing.T) { +// TestLifecycleManyAddons runs the test on a cluster with requisite resources for NTH Queue Processor and other addons. +func TestLifecycleManyAddons(t *testing.T) { runLifecycleTestAWS(&LifecycleTestOptions{ t: t, - SrcDir: "nth_sqs_resources", - ClusterName: "nthsqsresources.longclustername.example.com", + SrcDir: "many-addons", + ClusterName: "minimal.example.com", }) } diff --git a/docs/addons.md b/docs/addons.md index 9610993db1..f65e6677da 100644 --- a/docs/addons.md +++ b/docs/addons.md @@ -250,7 +250,7 @@ spec: {{ kops_feature_table(kops_added_default='1.21') }} -If `enableSQSTerminationDraining` is true Node Termination Handler will operate in Queue Processor mode. In addition to the events mentioned above, Queue Processor mode allows Node Termination Handler to take care of ASG Scale-In, AZ-Rebalance, Unhealthy Instances, EC2 Instance Termination via the API or Console, and more. kOps will provision the necessary infrastructure: an SQS queue, EventBridge rules, and ASG Lifecycle hooks. `managedASGTag` can be configured with Queue Processor mode to distinguish resource ownership between multiple clusters. +If `enableSQSTerminationDraining` is not false Node Termination Handler will operate in Queue Processor mode. In addition to the events mentioned above, Queue Processor mode allows Node Termination Handler to take care of ASG Scale-In, AZ-Rebalance, Unhealthy Instances, EC2 Instance Termination via the API or Console, and more. kOps will provision the necessary infrastructure: an SQS queue, EventBridge rules, and ASG Lifecycle hooks. `managedASGTag` can be configured with Queue Processor mode to distinguish resource ownership between multiple clusters. The kOps CLI requires additional IAM permissions to manage the requisite EventBridge rules and SQS queue: diff --git a/docs/releases/1.26-NOTES.md b/docs/releases/1.26-NOTES.md index 628711a770..16717f520d 100644 --- a/docs/releases/1.26-NOTES.md +++ b/docs/releases/1.26-NOTES.md @@ -29,6 +29,8 @@ with "control-plane-". The names of groups for existing clusters are unchanged. * New clusters can more easily be configured to use Cilium in ENI mode by setting `--networking=cilium-eni`. +* Node Termination Handler now defaults to Queue-Processor mode. + ## GCP * The default instance type is now `e2-medium` for control-plane and worker nodes, and `e2-micro` for bastions. diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index 80d09ff87e..d93b3517d3 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -5346,7 +5346,7 @@ spec: enableSQSTerminationDraining: description: 'EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received. - Default: false' + Default: true' type: boolean enableScheduledEventDraining: description: 'EnableScheduledEventDraining makes node termination diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 0ea59883a3..fa459c1414 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -941,7 +941,7 @@ type NodeTerminationHandlerConfig struct { EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"` // EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received. - // Default: false + // Default: true EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"` // ExcludeFromLoadBalancers makes node termination handler will mark for exclusion from load balancers before node are cordoned. @@ -963,6 +963,10 @@ type NodeTerminationHandlerConfig struct { Version *string `json:"version,omitempty"` } +func (n *NodeTerminationHandlerConfig) IsQueueMode() bool { + return n != nil && n.Enabled != nil && *n.Enabled && (n.EnableSQSTerminationDraining == nil || *n.EnableSQSTerminationDraining) +} + // NodeProblemDetector determines the node problem detector configuration. type NodeProblemDetectorConfig struct { // Enabled enables the NodeProblemDetector. diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index cf5213a6fb..2cdaf350ca 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -967,7 +967,7 @@ type NodeTerminationHandlerConfig struct { EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"` // EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received. - // Default: false + // Default: true EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"` // ExcludeFromLoadBalancers makes node termination handler will mark for exclusion from load balancers before node are cordoned. diff --git a/pkg/apis/kops/v1alpha3/componentconfig.go b/pkg/apis/kops/v1alpha3/componentconfig.go index c14031fea5..9e181bb83d 100644 --- a/pkg/apis/kops/v1alpha3/componentconfig.go +++ b/pkg/apis/kops/v1alpha3/componentconfig.go @@ -938,7 +938,7 @@ type NodeTerminationHandlerConfig struct { EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"` // EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received. - // Default: false + // Default: true EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"` // ManagedASGTag is the tag used to determine which nodes NTH can take action on diff --git a/pkg/model/components/nodeterminationhandler.go b/pkg/model/components/nodeterminationhandler.go index 91ae448f18..8d4b8a0922 100644 --- a/pkg/model/components/nodeterminationhandler.go +++ b/pkg/model/components/nodeterminationhandler.go @@ -56,10 +56,6 @@ func (b *NodeTerminationHandlerOptionsBuilder) BuildOptions(o interface{}) error nth.EnablePrometheusMetrics = fi.PtrTo(false) } - if nth.EnableSQSTerminationDraining == nil { - nth.EnableSQSTerminationDraining = fi.PtrTo(false) - } - if nth.ExcludeFromLoadBalancers == nil { nth.ExcludeFromLoadBalancers = fi.PtrTo(true) } diff --git a/pkg/model/context.go b/pkg/model/context.go index f3c28332ea..c108cb2db9 100644 --- a/pkg/model/context.go +++ b/pkg/model/context.go @@ -149,7 +149,7 @@ func (b *KopsModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (ma // Apply NTH Labels nth := b.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { + if nth.IsQueueMode() { labels[fi.ValueOf(nth.ManagedASGTag)] = "" } diff --git a/pkg/model/iam/iam_builder.go b/pkg/model/iam/iam_builder.go index 65b930669b..d17450156d 100644 --- a/pkg/model/iam/iam_builder.go +++ b/pkg/model/iam/iam_builder.go @@ -435,7 +435,7 @@ func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) { AddClusterAutoscalerPermissions(p, useStaticInstanceList) nth := b.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { + if nth.IsQueueMode() { AddNodeTerminationHandlerSQSPermissions(p) } } diff --git a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh index cb1ccf6965..3629c0a384 100755 --- a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh +++ b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh @@ -31,6 +31,7 @@ KOPS=$(kops-download-from-base) # Start with a cluster running nodeTerminationHandler ARGS="--override=cluster.spec.nodeTerminationHandler.enabled=true" +ARGS="${ARGS} --override=cluster.spec.nodeTerminationHandler.enableSQSTerminationDraining=false" ${KUBETEST2} \ --up \ diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle_event_pattern rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange_event_pattern rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange_event_pattern rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation_event_pattern rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-SpotInterruption_event_pattern rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content index 4bd8666606..43e1475d7b 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content @@ -213,7 +213,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 9c4259b282..4cad615f71 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -62,7 +62,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720 + manifestHash: 24d22c723a0179f85aacb0b9f9390234829da92e7da424efc72de0b03b1fb707 name: node-termination-handler.aws prune: kinds: @@ -77,17 +77,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 92bc9e84c4..1c97c8ddcf 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 2 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,22 +125,14 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate containers: - env: - name: NODE_NAME @@ -155,7 +148,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +163,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +187,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,16 +198,31 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" - name: AWS_ROLE_ARN value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com - name: AWS_WEB_IDENTITY_TOKEN_FILE value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -218,27 +232,33 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true + runAsUser: 1000 volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - mountPath: /var/run/secrets/amazonaws.com/ name: token-amazonaws-com readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler - tolerations: - - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /proc/uptime - name: uptime - name: token-amazonaws-com projected: defaultMode: 420 @@ -247,7 +267,25 @@ spec: audience: amazonaws.com expirationSeconds: 86400 path: token - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_sqs_queue_nthsqsresources-longclustername-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_sqs_queue_minimal-example-com-nth_policy similarity index 67% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_sqs_queue_nthsqsresources-longclustername-example-com-nth_policy rename to tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_sqs_queue_minimal-example-com-nth_policy index c73cbc4c57..fece0c117d 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_sqs_queue_nthsqsresources-longclustername-example-com-nth_policy +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -6,6 +6,6 @@ "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] }, "Action": "sqs:SendMessage", - "Resource": "arn:aws-test:sqs:us-test-1:123456789012:nthsqsresources-longclustername-example-com-nth" + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" }] } diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml index bbf1222fca..8d06cbeaa3 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml @@ -42,7 +42,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 serviceAccountIssuerDiscovery: discoveryStore: memfs://discovery.example.com/minimal.example.com diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm-irsa/kubernetes.tf index 607dd58b07..c882c8904d 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/kubernetes.tf @@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -257,6 +262,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" propagate_at_launch = true @@ -285,6 +295,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -555,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -571,6 +673,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -585,6 +688,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -640,6 +744,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -652,6 +757,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -662,6 +768,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -1073,6 +1180,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content index d24237cf9d..799c639a1c 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content @@ -217,7 +217,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 2f2bd3ae04..f2e5a2396c 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -69,7 +69,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720 + manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035 name: node-termination-handler.aws prune: kinds: @@ -84,17 +84,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 92bc9e84c4..5dda522e0f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 2 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,22 +125,14 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate containers: - env: - name: NODE_NAME @@ -155,7 +148,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +163,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +187,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,16 +198,31 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" - name: AWS_ROLE_ARN value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com - name: AWS_WEB_IDENTITY_TOKEN_FILE value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -218,27 +232,33 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true + runAsUser: 1000 volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - mountPath: /var/run/secrets/amazonaws.com/ name: token-amazonaws-com readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler - tolerations: - - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /proc/uptime - name: uptime - name: token-amazonaws-com projected: defaultMode: 420 @@ -247,7 +267,25 @@ spec: audience: amazonaws.com expirationSeconds: 86400 path: token - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml index b57ac351e4..c40a0daca0 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml @@ -42,7 +42,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 serviceAccountIssuerDiscovery: discoveryStore: memfs://discovery.example.com/minimal.example.com diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm-irsa23/kubernetes.tf index 8373bd245d..3fc6f46802 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/kubernetes.tf @@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -257,6 +262,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" propagate_at_launch = true @@ -285,6 +295,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -555,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -571,6 +673,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -585,6 +688,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -640,6 +744,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -652,6 +757,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -662,6 +768,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -1081,6 +1188,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content index 85d8fef259..fe7e9c75fd 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content @@ -216,7 +216,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 4799a239f9..c313e117f3 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -69,7 +69,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720 + manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035 name: node-termination-handler.aws prune: kinds: @@ -84,17 +84,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 92bc9e84c4..5dda522e0f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 2 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,22 +125,14 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate containers: - env: - name: NODE_NAME @@ -155,7 +148,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +163,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +187,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,16 +198,31 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" - name: AWS_ROLE_ARN value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com - name: AWS_WEB_IDENTITY_TOKEN_FILE value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -218,27 +232,33 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true + runAsUser: 1000 volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - mountPath: /var/run/secrets/amazonaws.com/ name: token-amazonaws-com readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler - tolerations: - - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /proc/uptime - name: uptime - name: token-amazonaws-com projected: defaultMode: 420 @@ -247,7 +267,25 @@ spec: audience: amazonaws.com expirationSeconds: 86400 path: token - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml index 836a4d4b0e..9251278034 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml @@ -42,7 +42,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 serviceAccountIssuerDiscovery: discoveryStore: memfs://discovery.example.com/minimal.example.com diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm-irsa24/kubernetes.tf index c76f00d056..2c89ac12f1 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/kubernetes.tf @@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" propagate_at_launch = true @@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -1057,6 +1164,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content index 84da773d54..d531d972ea 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content @@ -215,7 +215,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 416a497870..f3d1bf1127 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -69,7 +69,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720 + manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035 name: node-termination-handler.aws prune: kinds: @@ -84,17 +84,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 92bc9e84c4..5dda522e0f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 2 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,22 +125,14 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate containers: - env: - name: NODE_NAME @@ -155,7 +148,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +163,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +187,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,16 +198,31 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" - name: AWS_ROLE_ARN value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com - name: AWS_WEB_IDENTITY_TOKEN_FILE value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -218,27 +232,33 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true + runAsUser: 1000 volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - mountPath: /var/run/secrets/amazonaws.com/ name: token-amazonaws-com readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler - tolerations: - - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /proc/uptime - name: uptime - name: token-amazonaws-com projected: defaultMode: 420 @@ -247,7 +267,25 @@ spec: audience: amazonaws.com expirationSeconds: 86400 path: token - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml index 4532c69d3f..f3fc89fe33 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml @@ -42,7 +42,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 serviceAccountIssuerDiscovery: discoveryStore: memfs://discovery.example.com/minimal.example.com diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm-irsa25/kubernetes.tf index c76f00d056..2c89ac12f1 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/kubernetes.tf @@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" propagate_at_launch = true @@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -1057,6 +1164,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content index 3cf44594af..9400b2744f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content @@ -215,7 +215,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 95dc88837b..2e1dbb0fa2 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -62,7 +62,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720 + manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035 name: node-termination-handler.aws prune: kinds: @@ -77,17 +77,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 92bc9e84c4..5dda522e0f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 2 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,22 +125,14 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate containers: - env: - name: NODE_NAME @@ -155,7 +148,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +163,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +187,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,16 +198,31 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" - name: AWS_ROLE_ARN value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com - name: AWS_WEB_IDENTITY_TOKEN_FILE value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -218,27 +232,33 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true + runAsUser: 1000 volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - mountPath: /var/run/secrets/amazonaws.com/ name: token-amazonaws-com readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler - tolerations: - - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /proc/uptime - name: uptime - name: token-amazonaws-com projected: defaultMode: 420 @@ -247,7 +267,25 @@ spec: audience: amazonaws.com expirationSeconds: 86400 path: token - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml index 106083b6b0..393eda9508 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml @@ -42,7 +42,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 serviceAccountIssuerDiscovery: discoveryStore: memfs://discovery.example.com/minimal.example.com diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm-irsa26/kubernetes.tf index c40faf21e7..2299556d7f 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/kubernetes.tf @@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" propagate_at_launch = true @@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" @@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" "kops.k8s.io/instancegroup" = "nodes" @@ -1049,6 +1156,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_iam_role_policy_masters.minimal.example.com_policy b/tests/integration/update_cluster/many-addons-ccm/data/aws_iam_role_policy_masters.minimal.example.com_policy index 9d470ef481..4193c545d3 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_iam_role_policy_masters.minimal.example.com_policy +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_iam_role_policy_masters.minimal.example.com_policy @@ -268,13 +268,16 @@ "iam:GetServerCertificate", "iam:ListServerCertificates", "kms:DescribeKey", - "kms:GenerateRandom" + "kms:GenerateRandom", + "sqs:DeleteMessage", + "sqs:ReceiveMessage" ], "Effect": "Allow", "Resource": "*" }, { "Action": [ + "autoscaling:CompleteLifecycleAction", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", "ec2:AttachVolume", diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content index 34be1f90c2..fe4a40b80a 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content @@ -212,7 +212,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 08825c6554..94f1ff0246 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -62,7 +62,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: a6ccfd21bb3ab6ffbc5d48580197c2ecbbcf3ad68043b4c068eb4cc40405fd2c + manifestHash: 0c08eb3cb6900ebc1bdd84104d498ae007983f483b71b4628a460ba48181dd81 name: node-termination-handler.aws prune: kinds: @@ -77,17 +77,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 476744fd6c..d5f7249ddd 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 1 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,11 +125,12 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: affinity: @@ -136,10 +138,11 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists containers: - env: - name: NODE_NAME @@ -155,7 +158,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +173,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +197,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,12 +208,27 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -214,25 +238,54 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true - volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - dnsPolicy: ClusterFirstWithHostNet + runAsUser: 1000 hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + nodeSelector: null + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler tolerations: - - operator: Exists - volumes: - - hostPath: - path: /proc/uptime - name: uptime - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons-ccm/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml index ce2e9ae56b..4467c7658f 100644 --- a/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml @@ -41,7 +41,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 snapshotController: enabled: true diff --git a/tests/integration/update_cluster/many-addons-ccm/kubernetes.tf b/tests/integration/update_cluster/many-addons-ccm/kubernetes.tf index fed6d911e8..970dd8d732 100644 --- a/tests/integration/update_cluster/many-addons-ccm/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons-ccm/kubernetes.tf @@ -117,6 +117,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -187,6 +192,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" propagate_at_launch = true @@ -215,6 +225,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -366,6 +467,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -382,6 +484,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -396,6 +499,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -451,6 +555,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -463,6 +568,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -473,6 +579,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -866,6 +973,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000..c8db9dbe9c --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000..fb4ea7defd --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000..8c2916419d --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern new file mode 100644 index 0000000000..226b0ac52d --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]} diff --git a/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000..2d0e83b416 --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/many-addons/data/aws_iam_role_policy_masters.minimal.example.com_policy b/tests/integration/update_cluster/many-addons/data/aws_iam_role_policy_masters.minimal.example.com_policy index 72445655e8..2d358213c7 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_iam_role_policy_masters.minimal.example.com_policy +++ b/tests/integration/update_cluster/many-addons/data/aws_iam_role_policy_masters.minimal.example.com_policy @@ -268,13 +268,16 @@ "iam:GetServerCertificate", "iam:ListServerCertificates", "kms:DescribeKey", - "kms:GenerateRandom" + "kms:GenerateRandom", + "sqs:DeleteMessage", + "sqs:ReceiveMessage" ], "Effect": "Allow", "Resource": "*" }, { "Action": [ + "autoscaling:CompleteLifecycleAction", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", "ec2:AttachVolume", diff --git a/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content index bd87568ff0..a78e46f3a3 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content @@ -205,7 +205,6 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true diff --git a/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-bootstrap_content index 5fac835ada..ac5a465c6f 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-bootstrap_content @@ -62,7 +62,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: a6ccfd21bb3ab6ffbc5d48580197c2ecbbcf3ad68043b4c068eb4cc40405fd2c + manifestHash: 0c08eb3cb6900ebc1bdd84104d498ae007983f483b71b4628a460ba48181dd81 name: node-termination-handler.aws prune: kinds: @@ -77,17 +77,19 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content index 476744fd6c..d5f7249ddd 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/many-addons/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: DaemonSet +kind: Deployment metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,6 +115,7 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: + replicas: 1 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -124,11 +125,12 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: daemonset + app.kubernetes.io/component: deployment app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: affinity: @@ -136,10 +138,11 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists containers: - env: - name: NODE_NAME @@ -155,7 +158,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "false" + value: "true" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -170,8 +173,12 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: METADATA_TRIES - value: "3" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -190,6 +197,8 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -199,12 +208,27 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "false" - - name: UPTIME_FROM_FILE - value: /proc/uptime + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth + - name: WORKERS + value: "10" image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP resources: requests: cpu: 50m @@ -214,25 +238,54 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true - volumeMounts: - - mountPath: /proc/uptime - name: uptime - readOnly: true - dnsPolicy: ClusterFirstWithHostNet + runAsUser: 1000 hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical + nodeSelector: null + priorityClassName: system-cluster-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler tolerations: - - operator: Exists - volumes: - - hostPath: - path: /proc/uptime - name: uptime - updateStrategy: - rollingUpdate: - maxUnavailable: 25% - type: RollingUpdate + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/many-addons/data/aws_sqs_queue_minimal-example-com-nth_policy b/tests/integration/update_cluster/many-addons/data/aws_sqs_queue_minimal-example-com-nth_policy new file mode 100644 index 0000000000..fece0c117d --- /dev/null +++ b/tests/integration/update_cluster/many-addons/data/aws_sqs_queue_minimal-example-com-nth_policy @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": ["events.amazonaws.com", "sqs.amazonaws.com"] + }, + "Action": "sqs:SendMessage", + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth" + }] + } diff --git a/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml index 4e5667396b..31b773b568 100644 --- a/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml @@ -41,7 +41,6 @@ spec: amazonvpc: {} nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: false nonMasqueradeCIDR: 172.20.0.0/16 snapshotController: enabled: true diff --git a/tests/integration/update_cluster/many-addons/kubernetes.tf b/tests/integration/update_cluster/many-addons/kubernetes.tf index 5c9db12b37..d21bedc13b 100644 --- a/tests/integration/update_cluster/many-addons/kubernetes.tf +++ b/tests/integration/update_cluster/many-addons/kubernetes.tf @@ -117,6 +117,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" propagate_at_launch = true value = "master-us-test-1a.masters.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" propagate_at_launch = true @@ -187,6 +192,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { propagate_at_launch = true value = "nodes.minimal.example.com" } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } tag { key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" propagate_at_launch = true @@ -215,6 +225,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" { vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] } +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern") + name = "minimal.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern") + name = "minimal.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern") + name = "minimal.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern") + name = "minimal.example.com-RebalanceRecommendation" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-RebalanceRecommendation" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern") + name = "minimal.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id +} + +resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id +} + resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { availability_zone = "us-test-1a" encrypted = false @@ -366,6 +467,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -382,6 +484,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -396,6 +499,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "master-us-test-1a.masters.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" @@ -451,6 +555,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -463,6 +568,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -473,6 +579,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" { tags = { "KubernetesCluster" = "minimal.example.com" "Name" = "nodes.minimal.example.com" + "aws-node-termination-handler/managed" = "" "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" "k8s.io/role/node" = "1" @@ -858,6 +965,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1 type = "ingress" } +resource "aws_sqs_queue" "minimal-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal-example-com-nth" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + resource "aws_subnet" "us-test-1a-minimal-example-com" { availability_zone = "us-test-1a" cidr_block = "172.20.32.0/19" diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy new file mode 100644 index 0000000000..3266f85030 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:aws-node-termination-handler" + } + }, + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com" + } + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy new file mode 100644 index 0000000000..a1f5c8e2f8 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:dns-controller" + } + }, + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com" + } + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_masters.nthsqsresources.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_masters.nthsqsresources.longclustername.example.com_policy rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_nodes.nthsqsresources.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_nodes.nthsqsresources.longclustername.example.com_policy rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy new file mode 100644 index 0000000000..ec41720841 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy @@ -0,0 +1,26 @@ +{ + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "sqs:DeleteMessage", + "sqs:ReceiveMessage" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "autoscaling:CompleteLifecycleAction", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy new file mode 100644 index 0000000000..48c0a0297b --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy @@ -0,0 +1,35 @@ +{ + "Statement": [ + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy new file mode 100644 index 0000000000..ed8e0bc4d2 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy @@ -0,0 +1,231 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:ModifyInstanceAttribute", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_policy_nodes.nthsqsresources.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_policy_nodes.nthsqsresources.longclustername.example.com_policy rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_key_pair_kubernetes.nthsqsresources.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_key_pair_kubernetes.nthsqsresources.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_master-us-test-1a.masters.nthsqsresources.longclustername.example.com_user_data b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data similarity index 94% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_master-us-test-1a.masters.nthsqsresources.longclustername.example.com_user_data rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data index c71592ba1c..7f703cdd03 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_master-us-test-1a.masters.nthsqsresources.longclustername.example.com_user_data +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data @@ -177,8 +177,8 @@ kubeAPIServer: requestheaderUsernameHeaders: - X-Remote-User securePort: 443 - serviceAccountIssuer: https://api.internal.nthsqsresources.longclustername.example.com - serviceAccountJWKSURI: https://api.internal.nthsqsresources.longclustername.example.com/openid/v1/jwks + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks serviceClusterIPRange: 100.64.0.0/13 storageBackend: etcd3 kubeControllerManager: @@ -186,7 +186,7 @@ kubeControllerManager: attachDetachReconcileSyncPeriod: 1m0s cloudProvider: aws clusterCIDR: 100.96.0.0/11 - clusterName: nthsqsresources.longclustername.example.com + clusterName: nthimdsprocessor.longclustername.example.com configureCloudRoutes: false image: registry.k8s.io/kube-controller-manager:v1.21.0 leaderElection: @@ -242,10 +242,10 @@ __EOF_CLUSTER_SPEC cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' CloudProvider: aws -ConfigBase: memfs://clusters.example.com/nthsqsresources.longclustername.example.com +ConfigBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com InstanceGroupName: master-us-test-1a InstanceGroupRole: ControlPlane -NodeupConfigHash: KfWSX4emtavW2QDKkc+Wok3rLiV+c1jzaUH2UIu6BBI= +NodeupConfigHash: VY+yD9mAGWb9ytiA3sZITojjBw0TRyUzJ0XVvFrkxAo= __EOF_KUBE_ENV diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_nodes.nthsqsresources.longclustername.example.com_user_data b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data similarity index 97% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_nodes.nthsqsresources.longclustername.example.com_user_data rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data index a4a907535d..b644b450fb 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_launch_template_nodes.nthsqsresources.longclustername.example.com_user_data +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data @@ -182,10 +182,10 @@ ConfigServer: MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== -----END CERTIFICATE----- - server: https://kops-controller.internal.nthsqsresources.longclustername.example.com:3988/ + server: https://kops-controller.internal.nthimdsprocessor.longclustername.example.com:3988/ InstanceGroupName: nodes InstanceGroupRole: Node -NodeupConfigHash: G8g6T2b7gcigf604l9EuzyDS4NkqKM4RMwILiPmi/2g= +NodeupConfigHash: vosxmdDYcHeTNZAcHYhro8U29h3arjlmlqVfVl73yGw= __EOF_KUBE_ENV diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000..f16e46b25b --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,198 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: nthimdsprocessor.longclustername.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com + configStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com + containerRuntime: containerd + containerd: + logLevel: info + version: 1.4.13 + dnsZone: Z1AFAKE1ZON3YO + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + useServiceAccountExternalPermissions: true + keyStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: nthimdsprocessor.longclustername.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.nthimdsprocessor.longclustername.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nodeTerminationHandler: + cpuRequest: 50m + enableRebalanceDraining: false + enableRebalanceMonitoring: false + enableSQSTerminationDraining: false + enableScheduledEventDraining: false + enableSpotInterruptionDraining: true + enabled: true + excludeFromLoadBalancers: true + managedASGTag: aws-node-termination-handler/managed + memoryRequest: 64Mi + prometheusEnable: false + version: v1.18.1 + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/secrets + serviceAccountIssuerDiscovery: + discoveryStore: memfs://discovery.example.com/minimal.example.com + enableAWSOIDCProvider: true + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_discovery.json_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_discovery.json_content new file mode 100644 index 0000000000..aba05dfd1a --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_discovery.json_content @@ -0,0 +1,18 @@ +{ +"issuer": "https://discovery.example.com/minimal.example.com", +"jwks_uri": "https://discovery.example.com/minimal.example.com/openid/v1/jwks", +"authorization_endpoint": "urn:kubernetes:programmatic_authorization", +"response_types_supported": [ +"id_token" +], +"subject_types_supported": [ +"public" +], +"id_token_signing_alg_values_supported": [ +"RS256" +], +"claims_supported": [ +"sub", +"iss" +] +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_etcd-cluster-spec-events_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_etcd-cluster-spec-events_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_etcd-cluster-spec-events_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_etcd-cluster-spec-main_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_etcd-cluster-spec-main_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_etcd-cluster-spec-main_content diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_keys.json_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_keys.json_content new file mode 100644 index 0000000000..ddcbc6ed75 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_keys.json_content @@ -0,0 +1,20 @@ +{ +"keys": [ +{ +"use": "sig", +"kty": "RSA", +"kid": "3mNcULfgtWECYyZWY5ow1rOHjiRwEZHx28HQcRec3Ew", +"alg": "RS256", +"n": "2JbeF8dNwqfEKKD65aGlVs58fWkA0qZdVLKw8qATzRBJTi1nqbj2kAR4gyy_C8Mxouxva_om9d7Sq8Ka55T7-w", +"e": "AQAB" +}, +{ +"use": "sig", +"kty": "RSA", +"kid": "G-cZ10iKJqrXhR15ivI7Lg2q_cuL0zN9ouL0vF67FLc", +"alg": "RS256", +"n": "o4Tridlsf4Yz3UAiup_scSTiG_OqxkUW3Fz7zGKvVcLeYj9GEIKuzoB1VFk1nboDq4cCuGLfdzaQdCQKPIsDuw", +"e": "AQAB" +} +] +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_kops-version.txt_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_kops-version.txt_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_kops-version.txt_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content similarity index 85% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content index 6cced2a2a9..44d93cd147 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -12,12 +12,12 @@ spec: - /bin/sh - -c - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager - --backup-store=memfs://clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/events + --backup-store=memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true - --dns-suffix=.internal.nthsqsresources.longclustername.example.com --grpc-port=3997 + --dns-suffix=.internal.nthimdsprocessor.longclustername.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events - --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthsqsresources.longclustername.example.com=owned + --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com=owned > /tmp/pipe 2>&1 image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 name: etcd-manager diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content similarity index 85% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content index 1582a5d6ae..123ec38676 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -12,12 +12,12 @@ spec: - /bin/sh - -c - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager - --backup-store=memfs://clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/main + --backup-store=memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true - --dns-suffix=.internal.nthsqsresources.longclustername.example.com --grpc-port=3996 + --dns-suffix=.internal.nthimdsprocessor.longclustername.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main - --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthsqsresources.longclustername.example.com=owned + --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com=owned > /tmp/pipe 2>&1 image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 name: etcd-manager diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-master-us-test-1a_content similarity index 95% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-master-us-test-1a_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-master-us-test-1a_content index fb8d442d12..027fdbb58a 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-master-us-test-1a_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -37,8 +37,8 @@ APIServerConfig: requestheaderUsernameHeaders: - X-Remote-User securePort: 443 - serviceAccountIssuer: https://api.internal.nthsqsresources.longclustername.example.com - serviceAccountJWKSURI: https://api.internal.nthsqsresources.longclustername.example.com/openid/v1/jwks + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks serviceClusterIPRange: 100.64.0.0/13 storageBackend: etcd3 ServiceAccountPublicKeys: | @@ -217,7 +217,7 @@ CAs: MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== -----END CERTIFICATE----- -ClusterName: nthsqsresources.longclustername.example.com +ClusterName: nthimdsprocessor.longclustername.example.com FileAssets: - content: | apiVersion: kubescheduler.config.k8s.io/v1beta1 @@ -264,13 +264,13 @@ KubeletConfig: - node-role.kubernetes.io/master=:NoSchedule UpdatePolicy: automatic channels: -- memfs://clusters.example.com/nthsqsresources.longclustername.example.com/addons/bootstrap-channel.yaml +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml containerdConfig: logLevel: info version: 1.4.13 etcdManifests: -- memfs://clusters.example.com/nthsqsresources.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml -- memfs://clusters.example.com/nthsqsresources.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml staticManifests: - key: kube-apiserver-healthcheck path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-nodes_content similarity index 92% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-nodes_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-nodes_content index 4d64745490..ba386f5681 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nodeupconfig-nodes_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nodeupconfig-nodes_content @@ -10,7 +10,7 @@ Assets: - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz CAs: {} -ClusterName: nthsqsresources.longclustername.example.com +ClusterName: nthimdsprocessor.longclustername.example.com Hooks: - null - null @@ -38,7 +38,7 @@ KubeletConfig: shutdownGracePeriodCriticalPods: 10s UpdatePolicy: automatic channels: -- memfs://clusters.example.com/nthsqsresources.longclustername.example.com/addons/bootstrap-channel.yaml +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml containerdConfig: logLevel: info version: 1.4.13 diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content new file mode 100644 index 0000000000..86a8db2e5d --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content @@ -0,0 +1,91 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: acd599090ff336e8892f45cd940caf00c2279f0a91bbd3cd40e70fae525e5119 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c15ffbd08abce8d8595b6a807b41abec100fb33381adaf10a10891539a78ca2 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.11 + manifest: node-termination-handler.aws/k8s-1.11.yaml + manifestHash: 441c711457eff20874830aa63ae35a0c94e0dc99cdd0c295e2907f85751f2e34 + name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + selector: + k8s-addon: node-termination-handler.aws + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000..4c50967734 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,155 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.2 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.2 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + - name: AWS_ROLE_ARN + value: arn:aws-test:iam::123456789012:role/dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer + - name: AWS_WEB_IDENTITY_TOKEN_FILE + value: /var/run/secrets/amazonaws.com/token + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.2 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + volumeMounts: + - mountPath: /var/run/secrets/amazonaws.com/ + name: token-amazonaws-com + readOnly: true + dnsPolicy: Default + hostNetwork: true + priorityClassName: system-cluster-critical + securityContext: + fsGroup: 10001 + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: token-amazonaws-com + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: amazonaws.com + expirationSeconds: 86400 + path: token + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content similarity index 89% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content index 6745a08b29..a33763ea49 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -1,7 +1,7 @@ apiVersion: v1 data: config.yaml: | - {"cloud":"aws","configBase":"memfs://clusters.example.com/nthsqsresources.longclustername.example.com","secretStore":"memfs://clusters.example.com/nthsqsresources.longclustername.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.nthsqsresources.longclustername.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} + {"cloud":"aws","configBase":"memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com","secretStore":"memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.nthimdsprocessor.longclustername.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} kind: ConfigMap metadata: creationTimestamp: null @@ -33,7 +33,7 @@ spec: template: metadata: annotations: - dns.alpha.kubernetes.io/internal: kops-controller.internal.nthsqsresources.longclustername.example.com + dns.alpha.kubernetes.io/internal: kops-controller.internal.nthimdsprocessor.longclustername.example.com creationTimestamp: null labels: k8s-addon: kops-controller.addons.k8s.io diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-limit-range.addons.k8s.io_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content similarity index 66% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content index bc207ebf0d..3d9043d12c 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -99,12 +99,12 @@ subjects: --- apiVersion: apps/v1 -kind: Deployment +kind: DaemonSet metadata: creationTimestamp: null labels: addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/component: deployment + app.kubernetes.io/component: daemonset app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/managed-by: kops app.kubernetes.io/name: aws-node-termination-handler @@ -115,7 +115,6 @@ metadata: name: aws-node-termination-handler namespace: kube-system spec: - replicas: 1 selector: matchLabels: app.kubernetes.io/instance: aws-node-termination-handler @@ -125,12 +124,11 @@ spec: metadata: creationTimestamp: null labels: - app.kubernetes.io/component: deployment + app.kubernetes.io/component: daemonset app.kubernetes.io/instance: aws-node-termination-handler app.kubernetes.io/name: aws-node-termination-handler k8s-app: aws-node-termination-handler kops.k8s.io/managed-by: kops - kops.k8s.io/nth-mode: sqs kubernetes.io/os: linux spec: affinity: @@ -138,11 +136,10 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate containers: - env: - name: NODE_NAME @@ -158,7 +155,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: ENABLE_PROBES_SERVER - value: "true" + value: "false" - name: PROBES_SERVER_PORT value: "8080" - name: PROBES_SERVER_ENDPOINT @@ -173,12 +170,8 @@ spec: value: "false" - name: PROMETHEUS_SERVER_PORT value: "9092" - - name: CHECK_TAG_BEFORE_DRAINING - value: "true" - - name: MANAGED_TAG - value: aws-node-termination-handler/managed - - name: USE_PROVIDER_ID - value: "true" + - name: METADATA_TRIES + value: "3" - name: DRY_RUN value: "false" - name: CORDON_ONLY @@ -197,8 +190,6 @@ spec: value: "120" - name: EMIT_KUBERNETES_EVENTS value: "true" - - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS - value: "-1" - name: ENABLE_SPOT_INTERRUPTION_DRAINING value: "true" - name: ENABLE_SCHEDULED_EVENT_DRAINING @@ -208,27 +199,16 @@ spec: - name: ENABLE_REBALANCE_DRAINING value: "false" - name: ENABLE_SQS_TERMINATION_DRAINING - value: "true" - - name: QUEUE_URL - value: https://sqs.us-test-1.amazonaws.com/123456789012/nthsqsresources-longclustername-example-com-nth - - name: WORKERS - value: "10" + value: "false" + - name: UPTIME_FROM_FILE + value: /proc/uptime + - name: AWS_ROLE_ARN + value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566 + - name: AWS_WEB_IDENTITY_TOKEN_FILE + value: /var/run/secrets/amazonaws.com/token image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 name: aws-node-termination-handler - ports: - - containerPort: 8080 - name: liveness-probe - protocol: TCP - - containerPort: 9092 - name: metrics - protocol: TCP resources: requests: cpu: 50m @@ -238,54 +218,36 @@ spec: readOnlyRootFilesystem: true runAsGroup: 1000 runAsNonRoot: true - runAsUser: 1000 + volumeMounts: + - mountPath: /proc/uptime + name: uptime + readOnly: true + - mountPath: /var/run/secrets/amazonaws.com/ + name: token-amazonaws-com + readOnly: true + dnsPolicy: ClusterFirstWithHostNet hostNetwork: true - nodeSelector: null - priorityClassName: system-cluster-critical + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical securityContext: fsGroup: 1000 serviceAccountName: aws-node-termination-handler tolerations: - - key: node-role.kubernetes.io/control-plane - operator: Exists - - key: node-role.kubernetes.io/master - operator: Exists - topologySpreadConstraints: - - labelSelector: - matchLabels: - app.kubernetes.io/instance: aws-node-termination-handler - app.kubernetes.io/name: aws-node-termination-handler - kops.k8s.io/nth-mode: sqs - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - - labelSelector: - matchLabels: - app.kubernetes.io/instance: aws-node-termination-handler - app.kubernetes.io/name: aws-node-termination-handler - kops.k8s.io/nth-mode: sqs - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - ---- - -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - creationTimestamp: null - labels: - addon.kops.k8s.io/name: node-termination-handler.aws - app.kubernetes.io/instance: aws-node-termination-handler - app.kubernetes.io/managed-by: kops - app.kubernetes.io/name: aws-node-termination-handler - k8s-addon: node-termination-handler.aws - name: aws-node-termination-handler - namespace: kube-system -spec: - maxUnavailable: 1 - selector: - matchLabels: - app.kubernetes.io/instance: aws-node-termination-handler - app.kubernetes.io/name: aws-node-termination-handler - kops.k8s.io/nth-mode: sqs + - operator: Exists + volumes: + - hostPath: + path: /proc/uptime + name: uptime + - name: token-amazonaws-com + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: amazonaws.com + expirationSeconds: 86400 + path: token + updateStrategy: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content rename to tests/integration/update_cluster/nth-imds-processor-irsa/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content diff --git a/tests/integration/update_cluster/nth_sqs_resources/id_rsa.pub b/tests/integration/update_cluster/nth-imds-processor-irsa/id_rsa.pub similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/id_rsa.pub rename to tests/integration/update_cluster/nth-imds-processor-irsa/id_rsa.pub diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/nth-imds-processor-irsa/in-v1alpha2.yaml new file mode 100644 index 0000000000..dc3c6acb7f --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/in-v1alpha2.yaml @@ -0,0 +1,84 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: nthimdsprocessor.longclustername.example.com +spec: + kubernetesApiAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + iam: + useServiceAccountExternalPermissions: true + kubelet: + anonymousAuth: false + kubernetesVersion: v1.21.0 + masterPublicName: api.nthimdsprocessor.longclustername.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + serviceAccountIssuerDiscovery: + discoveryStore: memfs://discovery.example.com/minimal.example.com + enableAWSOIDCProvider: true + nodeTerminationHandler: + enabled: true + enableSQSTerminationDraining: false + sshAccess: + - 0.0.0.0/0 + topology: + masters: public + nodes: public + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: nthimdsprocessor.longclustername.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: nthimdsprocessor.longclustername.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a diff --git a/tests/integration/update_cluster/nth-imds-processor-irsa/kubernetes.tf b/tests/integration/update_cluster/nth-imds-processor-irsa/kubernetes.tf new file mode 100644 index 0000000000..2c0e526435 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor-irsa/kubernetes.tf @@ -0,0 +1,943 @@ +locals { + cluster_name = "nthimdsprocessor.longclustername.example.com" + iam_openid_connect_provider_arn = aws_iam_openid_connect_provider.nthimdsprocessor-longclustername-example-com.arn + iam_openid_connect_provider_issuer = "discovery.example.com/minimal.example.com" + kube-system-aws-node-termination-handler_role_arn = aws_iam_role.aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566.arn + kube-system-aws-node-termination-handler_role_name = aws_iam_role.aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566.name + kube-system-dns-controller_role_arn = aws_iam_role.dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer.arn + kube-system-dns-controller_role_name = aws_iam_role.dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer.name + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id] + master_security_group_ids = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] + masters_role_arn = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.arn + masters_role_name = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-nthimdsprocessor-longclustername-example-com.id] + node_security_group_ids = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] + nodes_role_arn = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.arn + nodes_role_name = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id + vpc_cidr_block = aws_vpc.nthimdsprocessor-longclustername-example-com.cidr_block + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id + vpc_ipv6_cidr_block = aws_vpc.nthimdsprocessor-longclustername-example-com.ipv6_cidr_block + vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +output "cluster_name" { + value = "nthimdsprocessor.longclustername.example.com" +} + +output "iam_openid_connect_provider_arn" { + value = aws_iam_openid_connect_provider.nthimdsprocessor-longclustername-example-com.arn +} + +output "iam_openid_connect_provider_issuer" { + value = "discovery.example.com/minimal.example.com" +} + +output "kube-system-aws-node-termination-handler_role_arn" { + value = aws_iam_role.aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566.arn +} + +output "kube-system-aws-node-termination-handler_role_name" { + value = aws_iam_role.aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566.name +} + +output "kube-system-dns-controller_role_arn" { + value = aws_iam_role.dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer.arn +} + +output "kube-system-dns-controller_role_name" { + value = aws_iam_role.dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer.name +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-nthimdsprocessor-longclustername-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +output "vpc_ipv6_cidr_block" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.ipv6_cidr_block +} + +output "vpc_ipv6_cidr_length" { + value = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id + version = aws_launch_template.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "nthimdsprocessor.longclustername.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/control-plane" + propagate_at_launch = true + value = "1" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-nthimdsprocessor-longclustername-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-nthimdsprocessor-longclustername-example-com.id + version = aws_launch_template.nodes-nthimdsprocessor-longclustername-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.nthimdsprocessor.longclustername.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "nthimdsprocessor.longclustername.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.nthimdsprocessor.longclustername.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.etcd-events.nthimdsprocessor.longclustername.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.etcd-main.nthimdsprocessor.longclustername.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-nthimdsprocessor-longclustername-example-com" { + name = "masters.nthimdsprocessor.longclustername.example.com" + role = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-nthimdsprocessor-longclustername-example-com" { + name = "nodes.nthimdsprocessor.longclustername.example.com" + role = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_openid_connect_provider" "nthimdsprocessor-longclustername-example-com" { + client_id_list = ["amazonaws.com"] + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + thumbprint_list = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280", "a9d53002e97e00e043244f3d170d6f4c414104fd"] + url = "https://discovery.example.com/minimal.example.com" +} + +resource "aws_iam_role" "aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566" { + assume_role_policy = file("${path.module}/data/aws_iam_role_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy") + name = "aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "service-account.kops.k8s.io/name" = "aws-node-termination-handler" + "service-account.kops.k8s.io/namespace" = "kube-system" + } +} + +resource "aws_iam_role" "dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer" { + assume_role_policy = file("${path.module}/data/aws_iam_role_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy") + name = "dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "service-account.kops.k8s.io/name" = "dns-controller" + "service-account.kops.k8s.io/namespace" = "kube-system" + } +} + +resource "aws_iam_role" "masters-nthimdsprocessor-longclustername-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy") + name = "masters.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-nthimdsprocessor-longclustername-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy") + name = "nodes.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566" { + name = "aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566" + policy = file("${path.module}/data/aws_iam_role_policy_aws-node-termination-handler.kube-system.sa.nthimdsproces-vt9566_policy") + role = aws_iam_role.aws-node-termination-handler-kube-system-sa-nthimdsproces-vt9566.name +} + +resource "aws_iam_role_policy" "dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer" { + name = "dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer" + policy = file("${path.module}/data/aws_iam_role_policy_dns-controller.kube-system.sa.nthimdsprocessor.longcluste-e6uuer_policy") + role = aws_iam_role.dns-controller-kube-system-sa-nthimdsprocessor-longcluste-e6uuer.name +} + +resource "aws_iam_role_policy" "masters-nthimdsprocessor-longclustername-example-com" { + name = "masters.nthimdsprocessor.longclustername.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy") + role = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name +} + +resource "aws_iam_role_policy" "nodes-nthimdsprocessor-longclustername-example-com" { + name = "nodes.nthimdsprocessor.longclustername.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy") + role = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name +} + +resource "aws_internet_gateway" "nthimdsprocessor-longclustername-example-com" { + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_key_pair" "kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.nthimdsprocessor.longclustername.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-nthimdsprocessor-longclustername-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data") +} + +resource "aws_launch_template" "nodes-nthimdsprocessor-longclustername-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-nthimdsprocessor-longclustername-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.nthimdsprocessor.longclustername.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.nthimdsprocessor-longclustername-example-com.id + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.nthimdsprocessor-longclustername-example-com.id + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route_table" "nthimdsprocessor-longclustername-example-com" { + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-nthimdsprocessor-longclustername-example-com" { + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id + subnet_id = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "discovery-json" { + acl = "public-read" + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_discovery.json_content") + key = "discovery.example.com/minimal.example.com/.well-known/openid-configuration" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "keys-json" { + acl = "public-read" + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_keys.json_content") + key = "discovery.example.com/minimal.example.com/openid/v1/jwks" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-node-termination-handler-aws-k8s-1-11" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-nthimdsprocessor-longclustername-example-com" { + description = "Security group for masters" + name = "masters.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_security_group" "nodes-nthimdsprocessor-longclustername-example-com" { + description = "Security group for nodes" + name = "nodes.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-nodes-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-nodes-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-1to2379-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-2382to4000-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-4003to65535-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-udp-1to65535-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.nthimdsprocessor.longclustername.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_vpc" "nthimdsprocessor-longclustername-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "nthimdsprocessor-longclustername-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "nthimdsprocessor-longclustername-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.nthimdsprocessor-longclustername-example-com.id + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy new file mode 100644 index 0000000000..66d5de1d5a --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy new file mode 100644 index 0000000000..66d5de1d5a --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_policy_masters.nthsqsresources.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy similarity index 88% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_policy_masters.nthsqsresources.longclustername.example.com_policy rename to tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy index c8ecdeb68a..57e397d9b9 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_iam_role_policy_masters.nthsqsresources.longclustername.example.com_policy +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy @@ -4,7 +4,7 @@ "Action": "ec2:AttachVolume", "Condition": { "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com", "aws:ResourceTag/k8s.io/role/master": "1" } }, @@ -18,7 +18,7 @@ "s3:Get*" ], "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/*" + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/*" }, { "Action": [ @@ -28,7 +28,7 @@ "s3:PutObject" ], "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/main/*" + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main/*" }, { "Action": [ @@ -38,7 +38,7 @@ "s3:PutObject" ], "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/events/*" + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events/*" }, { "Action": [ @@ -98,7 +98,7 @@ "Action": "ec2:CreateTags", "Condition": { "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", + "aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com", "ec2:CreateAction": [ "CreateSecurityGroup" ] @@ -119,7 +119,7 @@ "aws:RequestTag/KubernetesCluster": "true" }, "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" } }, "Effect": "Allow", @@ -131,7 +131,7 @@ "Action": "ec2:CreateTags", "Condition": { "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", + "aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com", "ec2:CreateAction": [ "CreateVolume", "CreateSnapshot" @@ -154,7 +154,7 @@ "aws:RequestTag/KubernetesCluster": "true" }, "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" } }, "Effect": "Allow", @@ -208,16 +208,13 @@ "iam:GetServerCertificate", "iam:ListServerCertificates", "kms:DescribeKey", - "kms:GenerateRandom", - "sqs:DeleteMessage", - "sqs:ReceiveMessage" + "kms:GenerateRandom" ], "Effect": "Allow", "Resource": "*" }, { "Action": [ - "autoscaling:CompleteLifecycleAction", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", "ec2:AttachVolume", @@ -251,7 +248,7 @@ ], "Condition": { "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" + "aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" } }, "Effect": "Allow", @@ -268,7 +265,7 @@ ], "Condition": { "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" + "aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com" } }, "Effect": "Allow", diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy new file mode 100644 index 0000000000..153ab3c7f6 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/nth-imds-processor/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000..81cb012783 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data b/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data new file mode 100644 index 0000000000..88423f383b --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data @@ -0,0 +1,253 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.nthimdsprocessor.longclustername.example.com + serviceAccountJWKSURI: https://api.internal.nthimdsprocessor.longclustername.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: nthimdsprocessor.longclustername.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: ControlPlane +NodeupConfigHash: iwo2pWEOoLfMnp9mOXkRehKi3870p1VXQk3Ag3uGatA= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data b/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data new file mode 100644 index 0000000000..b644b450fb --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data @@ -0,0 +1,193 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.nthimdsprocessor.longclustername.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: vosxmdDYcHeTNZAcHYhro8U29h3arjlmlqVfVl73yGw= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_cluster-completed.spec_content similarity index 83% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content rename to tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_cluster-completed.spec_content index f881795909..ca3ecd32c6 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_cluster-completed.spec_content @@ -2,7 +2,7 @@ apiVersion: kops.k8s.io/v1alpha2 kind: Cluster metadata: creationTimestamp: "2016-12-10T22:42:27Z" - name: nthsqsresources.longclustername.example.com + name: nthimdsprocessor.longclustername.example.com spec: api: dns: {} @@ -15,8 +15,8 @@ spec: manageStorageClasses: true cloudProvider: aws clusterDNSDomain: cluster.local - configBase: memfs://clusters.example.com/nthsqsresources.longclustername.example.com - configStore: memfs://clusters.example.com/nthsqsresources.longclustername.example.com + configBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com + configStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com containerRuntime: containerd containerd: logLevel: info @@ -26,14 +26,14 @@ spec: skipInstall: true etcdClusters: - backups: - backupStore: memfs://clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/main + backupStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main etcdMembers: - instanceGroup: master-us-test-1a name: us-test-1a name: main version: 3.4.13 - backups: - backupStore: memfs://clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/events + backupStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events etcdMembers: - instanceGroup: master-us-test-1a name: us-test-1a @@ -43,7 +43,7 @@ spec: provider: dns-controller iam: legacy: false - keyStore: memfs://clusters.example.com/nthsqsresources.longclustername.example.com/pki + keyStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/pki kubeAPIServer: allowPrivileged: true anonymousAuth: false @@ -82,8 +82,8 @@ spec: requestheaderUsernameHeaders: - X-Remote-User securePort: 443 - serviceAccountIssuer: https://api.internal.nthsqsresources.longclustername.example.com - serviceAccountJWKSURI: https://api.internal.nthsqsresources.longclustername.example.com/openid/v1/jwks + serviceAccountIssuer: https://api.internal.nthimdsprocessor.longclustername.example.com + serviceAccountJWKSURI: https://api.internal.nthimdsprocessor.longclustername.example.com/openid/v1/jwks serviceClusterIPRange: 100.64.0.0/13 storageBackend: etcd3 kubeControllerManager: @@ -91,7 +91,7 @@ spec: attachDetachReconcileSyncPeriod: 1m0s cloudProvider: aws clusterCIDR: 100.96.0.0/11 - clusterName: nthsqsresources.longclustername.example.com + clusterName: nthimdsprocessor.longclustername.example.com configureCloudRoutes: false image: registry.k8s.io/kube-controller-manager:v1.21.0 leaderElection: @@ -159,7 +159,7 @@ spec: registerSchedulable: true shutdownGracePeriod: 30s shutdownGracePeriodCriticalPods: 10s - masterPublicName: api.nthsqsresources.longclustername.example.com + masterPublicName: api.nthimdsprocessor.longclustername.example.com networkCIDR: 172.20.0.0/16 networking: cni: {} @@ -167,7 +167,7 @@ spec: cpuRequest: 50m enableRebalanceDraining: false enableRebalanceMonitoring: false - enableSQSTerminationDraining: true + enableSQSTerminationDraining: false enableScheduledEventDraining: false enableSpotInterruptionDraining: true enabled: true @@ -178,7 +178,7 @@ spec: version: v1.18.1 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 - secretStore: memfs://clusters.example.com/nthsqsresources.longclustername.example.com/secrets + secretStore: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/secrets serviceClusterIPRange: 100.64.0.0/13 sshAccess: - 0.0.0.0/0 diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000..bb8ddb0e2e --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000..bb8ddb0e2e --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000..b7340298dc --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000..44d93cd147 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.nthimdsprocessor.longclustername.example.com --grpc-port=3997 + --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 + --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events + --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com=owned + > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000..123ec38676 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.nthimdsprocessor.longclustername.example.com --grpc-port=3996 + --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 + --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main + --volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com=owned + > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000..d34269a809 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.2 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000..f30bebbb02 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,276 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.nthimdsprocessor.longclustername.example.com + serviceAccountJWKSURI: https://api.internal.nthimdsprocessor.longclustername.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: nthimdsprocessor.longclustername.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 +etcdManifests: +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000..ba386f5681 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,44 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz +CAs: {} +ClusterName: nthimdsprocessor.longclustername.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content similarity index 94% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content rename to tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content index e442630138..7d5ddfd03f 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content @@ -6,7 +6,7 @@ spec: addons: - id: k8s-1.16 manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml - manifestHash: 058871d16ecf21248a33cea1f26582bc1a6f0c4e16013e75cca4ec5dff1ce172 + manifestHash: acd599090ff336e8892f45cd940caf00c2279f0a91bbd3cd40e70fae525e5119 name: kops-controller.addons.k8s.io needsRollingUpdate: control-plane selector: @@ -41,7 +41,7 @@ spec: version: 9.99.0 - id: k8s-1.11 manifest: node-termination-handler.aws/k8s-1.11.yaml - manifestHash: ed2bdba1e75bedd13ea4368b86511f4e4d83dd16e456fd66e62b55696eda8253 + manifestHash: a6ccfd21bb3ab6ffbc5d48580197c2ecbbcf3ad68043b4c068eb4cc40405fd2c name: node-termination-handler.aws prune: kinds: @@ -56,19 +56,17 @@ spec: - group: apps kind: DaemonSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system - group: apps kind: Deployment labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: apps kind: StatefulSet labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - group: policy kind: PodDisruptionBudget labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops - namespaces: - - kube-system - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000..fd5b8a7c05 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content similarity index 100% rename from tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content rename to tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000..a33763ea49 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com","secretStore":"memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.nthimdsprocessor.longclustername.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.2 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.nthimdsprocessor.longclustername.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.2 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.2 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000..36761e1c56 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000..4dcdce48b9 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content new file mode 100644 index 0000000000..476744fd6c --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -0,0 +1,238 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.18.1 + k8s-addon: node-termination-handler.aws + k8s-app: aws-node-termination-handler + name: aws-node-termination-handler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.18.1 + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.18.1 + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: aws-node-termination-handler +subjects: +- kind: ServiceAccount + name: aws-node-termination-handler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/component: daemonset + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.18.1 + k8s-addon: node-termination-handler.aws + k8s-app: aws-node-termination-handler + name: aws-node-termination-handler + namespace: kube-system +spec: + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kubernetes.io/os: linux + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: daemonset + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + k8s-app: aws-node-termination-handler + kops.k8s.io/managed-by: kops + kubernetes.io/os: linux + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + containers: + - env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: "false" + - name: PROBES_SERVER_PORT + value: "8080" + - name: PROBES_SERVER_ENDPOINT + value: /healthz + - name: LOG_LEVEL + value: info + - name: JSON_LOGGING + value: "true" + - name: LOG_FORMAT_VERSION + value: "2" + - name: ENABLE_PROMETHEUS_SERVER + value: "false" + - name: PROMETHEUS_SERVER_PORT + value: "9092" + - name: METADATA_TRIES + value: "3" + - name: DRY_RUN + value: "false" + - name: CORDON_ONLY + value: "false" + - name: TAINT_NODE + value: "false" + - name: EXCLUDE_FROM_LOAD_BALANCERS + value: "true" + - name: DELETE_LOCAL_DATA + value: "true" + - name: IGNORE_DAEMON_SETS + value: "true" + - name: POD_TERMINATION_GRACE_PERIOD + value: "-1" + - name: NODE_TERMINATION_GRACE_PERIOD + value: "120" + - name: EMIT_KUBERNETES_EVENTS + value: "true" + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: "true" + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: "false" + - name: ENABLE_REBALANCE_MONITORING + value: "false" + - name: ENABLE_REBALANCE_DRAINING + value: "false" + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "false" + - name: UPTIME_FROM_FILE + value: /proc/uptime + image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1 + imagePullPolicy: IfNotPresent + name: aws-node-termination-handler + resources: + requests: + cpu: 50m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsNonRoot: true + volumeMounts: + - mountPath: /proc/uptime + name: uptime + readOnly: true + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + securityContext: + fsGroup: 1000 + serviceAccountName: aws-node-termination-handler + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /proc/uptime + name: uptime + updateStrategy: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate diff --git a/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000..21efd54326 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/nth-imds-processor/id_rsa.pub b/tests/integration/update_cluster/nth-imds-processor/id_rsa.pub new file mode 100755 index 0000000000..81cb012783 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml b/tests/integration/update_cluster/nth-imds-processor/in-v1alpha2.yaml similarity index 79% rename from tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml rename to tests/integration/update_cluster/nth-imds-processor/in-v1alpha2.yaml index 6ca36e437e..099511c8db 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/nth-imds-processor/in-v1alpha2.yaml @@ -2,13 +2,13 @@ apiVersion: kops.k8s.io/v1alpha2 kind: Cluster metadata: creationTimestamp: "2016-12-10T22:42:27Z" - name: nthsqsresources.longclustername.example.com + name: nthimdsprocessor.longclustername.example.com spec: kubernetesApiAccess: - 0.0.0.0/0 channel: stable cloudProvider: aws - configBase: memfs://clusters.example.com/nthsqsresources.longclustername.example.com + configBase: memfs://clusters.example.com/nthimdsprocessor.longclustername.example.com etcdClusters: - etcdMembers: - instanceGroup: master-us-test-1a @@ -22,14 +22,14 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterPublicName: api.nthsqsresources.longclustername.example.com + masterPublicName: api.nthimdsprocessor.longclustername.example.com networkCIDR: 172.20.0.0/16 networking: cni: {} nonMasqueradeCIDR: 100.64.0.0/10 nodeTerminationHandler: enabled: true - enableSQSTerminationDraining: true + enableSQSTerminationDraining: false sshAccess: - 0.0.0.0/0 topology: @@ -49,7 +49,7 @@ metadata: creationTimestamp: "2016-12-10T22:42:28Z" name: nodes labels: - kops.k8s.io/cluster: nthsqsresources.longclustername.example.com + kops.k8s.io/cluster: nthimdsprocessor.longclustername.example.com spec: associatePublicIp: true image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 @@ -68,7 +68,7 @@ metadata: creationTimestamp: "2016-12-10T22:42:28Z" name: master-us-test-1a labels: - kops.k8s.io/cluster: nthsqsresources.longclustername.example.com + kops.k8s.io/cluster: nthimdsprocessor.longclustername.example.com spec: associatePublicIp: true image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 diff --git a/tests/integration/update_cluster/nth-imds-processor/kubernetes.tf b/tests/integration/update_cluster/nth-imds-processor/kubernetes.tf new file mode 100644 index 0000000000..fa72244a19 --- /dev/null +++ b/tests/integration/update_cluster/nth-imds-processor/kubernetes.tf @@ -0,0 +1,848 @@ +locals { + cluster_name = "nthimdsprocessor.longclustername.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id] + master_security_group_ids = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] + masters_role_arn = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.arn + masters_role_name = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-nthimdsprocessor-longclustername-example-com.id] + node_security_group_ids = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] + nodes_role_arn = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.arn + nodes_role_name = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id + vpc_cidr_block = aws_vpc.nthimdsprocessor-longclustername-example-com.cidr_block + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id + vpc_ipv6_cidr_block = aws_vpc.nthimdsprocessor-longclustername-example-com.ipv6_cidr_block + vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +output "cluster_name" { + value = "nthimdsprocessor.longclustername.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-nthimdsprocessor-longclustername-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +output "vpc_ipv6_cidr_block" { + value = aws_vpc.nthimdsprocessor-longclustername-example-com.ipv6_cidr_block +} + +output "vpc_ipv6_cidr_length" { + value = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.id + version = aws_launch_template.master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "nthimdsprocessor.longclustername.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/control-plane" + propagate_at_launch = true + value = "1" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-nthimdsprocessor-longclustername-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-nthimdsprocessor-longclustername-example-com.id + version = aws_launch_template.nodes-nthimdsprocessor-longclustername-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.nthimdsprocessor.longclustername.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "nthimdsprocessor.longclustername.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.nthimdsprocessor.longclustername.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.etcd-events.nthimdsprocessor.longclustername.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.etcd-main.nthimdsprocessor.longclustername.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-nthimdsprocessor-longclustername-example-com" { + name = "masters.nthimdsprocessor.longclustername.example.com" + role = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-nthimdsprocessor-longclustername-example-com" { + name = "nodes.nthimdsprocessor.longclustername.example.com" + role = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-nthimdsprocessor-longclustername-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.nthimdsprocessor.longclustername.example.com_policy") + name = "masters.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-nthimdsprocessor-longclustername-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.nthimdsprocessor.longclustername.example.com_policy") + name = "nodes.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-nthimdsprocessor-longclustername-example-com" { + name = "masters.nthimdsprocessor.longclustername.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.nthimdsprocessor.longclustername.example.com_policy") + role = aws_iam_role.masters-nthimdsprocessor-longclustername-example-com.name +} + +resource "aws_iam_role_policy" "nodes-nthimdsprocessor-longclustername-example-com" { + name = "nodes.nthimdsprocessor.longclustername.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.nthimdsprocessor.longclustername.example.com_policy") + role = aws_iam_role.nodes-nthimdsprocessor-longclustername-example-com.name +} + +resource "aws_internet_gateway" "nthimdsprocessor-longclustername-example-com" { + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_key_pair" "kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.nthimdsprocessor.longclustername.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.nthimdsprocessor.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-nthimdsprocessor-longclustername-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-nthimdsprocessor-longclustername-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.nthimdsprocessor.longclustername.example.com_user_data") +} + +resource "aws_launch_template" "nodes-nthimdsprocessor-longclustername-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-nthimdsprocessor-longclustername-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-nthimdsprocessor-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.nthimdsprocessor.longclustername.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.nthimdsprocessor.longclustername.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.nthimdsprocessor-longclustername-example-com.id + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.nthimdsprocessor-longclustername-example-com.id + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route_table" "nthimdsprocessor-longclustername-example-com" { + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-nthimdsprocessor-longclustername-example-com" { + route_table_id = aws_route_table.nthimdsprocessor-longclustername-example-com.id + subnet_id = aws_subnet.us-test-1a-nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-bootstrap_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-node-termination-handler-aws-k8s-1-11" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nthimdsprocessor-longclustername-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nthimdsprocessor.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/nthimdsprocessor.longclustername.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-nthimdsprocessor-longclustername-example-com" { + description = "Security group for masters" + name = "masters.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "masters.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_security_group" "nodes-nthimdsprocessor-longclustername-example-com" { + description = "Security group for nodes" + name = "nodes.nthimdsprocessor.longclustername.example.com" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nodes.nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-nthimdsprocessor-longclustername-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-nodes-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-all-0to0-nodes-nthimdsprocessor-longclustername-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-1to2379-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-2382to4000-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-tcp-4003to65535-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-nthimdsprocessor-longclustername-example-com-ingress-udp-1to65535-masters-nthimdsprocessor-longclustername-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-nthimdsprocessor-longclustername-example-com.id + source_security_group_id = aws_security_group.nodes-nthimdsprocessor-longclustername-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-nthimdsprocessor-longclustername-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "us-test-1a.nthimdsprocessor.longclustername.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +resource "aws_vpc" "nthimdsprocessor-longclustername-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "nthimdsprocessor-longclustername-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "nthimdsprocessor.longclustername.example.com" + "Name" = "nthimdsprocessor.longclustername.example.com" + "kubernetes.io/cluster/nthimdsprocessor.longclustername.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "nthimdsprocessor-longclustername-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.nthimdsprocessor-longclustername-example-com.id + vpc_id = aws_vpc.nthimdsprocessor-longclustername-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/nth_sqs_resources/kubernetes.tf b/tests/integration/update_cluster/nth_sqs_resources/kubernetes.tf deleted file mode 100644 index 38d15e3c31..0000000000 --- a/tests/integration/update_cluster/nth_sqs_resources/kubernetes.tf +++ /dev/null @@ -1,966 +0,0 @@ -locals { - cluster_name = "nthsqsresources.longclustername.example.com" - master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-nthsqsresources-longclustername-example-com.id] - master_security_group_ids = [aws_security_group.masters-nthsqsresources-longclustername-example-com.id] - masters_role_arn = aws_iam_role.masters-nthsqsresources-longclustername-example-com.arn - masters_role_name = aws_iam_role.masters-nthsqsresources-longclustername-example-com.name - node_autoscaling_group_ids = [aws_autoscaling_group.nodes-nthsqsresources-longclustername-example-com.id] - node_security_group_ids = [aws_security_group.nodes-nthsqsresources-longclustername-example-com.id] - node_subnet_ids = [aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id] - nodes_role_arn = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.arn - nodes_role_name = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.name - region = "us-test-1" - route_table_public_id = aws_route_table.nthsqsresources-longclustername-example-com.id - subnet_us-test-1a_id = aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id - vpc_cidr_block = aws_vpc.nthsqsresources-longclustername-example-com.cidr_block - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id - vpc_ipv6_cidr_block = aws_vpc.nthsqsresources-longclustername-example-com.ipv6_cidr_block - vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) -} - -output "cluster_name" { - value = "nthsqsresources.longclustername.example.com" -} - -output "master_autoscaling_group_ids" { - value = [aws_autoscaling_group.master-us-test-1a-masters-nthsqsresources-longclustername-example-com.id] -} - -output "master_security_group_ids" { - value = [aws_security_group.masters-nthsqsresources-longclustername-example-com.id] -} - -output "masters_role_arn" { - value = aws_iam_role.masters-nthsqsresources-longclustername-example-com.arn -} - -output "masters_role_name" { - value = aws_iam_role.masters-nthsqsresources-longclustername-example-com.name -} - -output "node_autoscaling_group_ids" { - value = [aws_autoscaling_group.nodes-nthsqsresources-longclustername-example-com.id] -} - -output "node_security_group_ids" { - value = [aws_security_group.nodes-nthsqsresources-longclustername-example-com.id] -} - -output "node_subnet_ids" { - value = [aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id] -} - -output "nodes_role_arn" { - value = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.arn -} - -output "nodes_role_name" { - value = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.name -} - -output "region" { - value = "us-test-1" -} - -output "route_table_public_id" { - value = aws_route_table.nthsqsresources-longclustername-example-com.id -} - -output "subnet_us-test-1a_id" { - value = aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id -} - -output "vpc_cidr_block" { - value = aws_vpc.nthsqsresources-longclustername-example-com.cidr_block -} - -output "vpc_id" { - value = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -output "vpc_ipv6_cidr_block" { - value = aws_vpc.nthsqsresources-longclustername-example-com.ipv6_cidr_block -} - -output "vpc_ipv6_cidr_length" { - value = local.vpc_ipv6_cidr_block == null ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) -} - -provider "aws" { - region = "us-test-1" -} - -provider "aws" { - alias = "files" - region = "us-test-1" -} - -resource "aws_autoscaling_group" "master-us-test-1a-masters-nthsqsresources-longclustername-example-com" { - enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] - launch_template { - id = aws_launch_template.master-us-test-1a-masters-nthsqsresources-longclustername-example-com.id - version = aws_launch_template.master-us-test-1a-masters-nthsqsresources-longclustername-example-com.latest_version - } - max_instance_lifetime = 0 - max_size = 1 - metrics_granularity = "1Minute" - min_size = 1 - name = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - protect_from_scale_in = false - tag { - key = "KubernetesCluster" - propagate_at_launch = true - value = "nthsqsresources.longclustername.example.com" - } - tag { - key = "Name" - propagate_at_launch = true - value = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - } - tag { - key = "aws-node-termination-handler/managed" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" - propagate_at_launch = true - value = "master" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/role/control-plane" - propagate_at_launch = true - value = "1" - } - tag { - key = "k8s.io/role/master" - propagate_at_launch = true - value = "1" - } - tag { - key = "kops.k8s.io/instancegroup" - propagate_at_launch = true - value = "master-us-test-1a" - } - tag { - key = "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" - propagate_at_launch = true - value = "owned" - } - vpc_zone_identifier = [aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id] -} - -resource "aws_autoscaling_group" "nodes-nthsqsresources-longclustername-example-com" { - enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] - launch_template { - id = aws_launch_template.nodes-nthsqsresources-longclustername-example-com.id - version = aws_launch_template.nodes-nthsqsresources-longclustername-example-com.latest_version - } - max_instance_lifetime = 0 - max_size = 2 - metrics_granularity = "1Minute" - min_size = 2 - name = "nodes.nthsqsresources.longclustername.example.com" - protect_from_scale_in = false - tag { - key = "KubernetesCluster" - propagate_at_launch = true - value = "nthsqsresources.longclustername.example.com" - } - tag { - key = "Name" - propagate_at_launch = true - value = "nodes.nthsqsresources.longclustername.example.com" - } - tag { - key = "aws-node-termination-handler/managed" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" - propagate_at_launch = true - value = "node" - } - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" - propagate_at_launch = true - value = "" - } - tag { - key = "k8s.io/role/node" - propagate_at_launch = true - value = "1" - } - tag { - key = "kops.k8s.io/instancegroup" - propagate_at_launch = true - value = "nodes" - } - tag { - key = "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" - propagate_at_launch = true - value = "owned" - } - vpc_zone_identifier = [aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id] -} - -resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { - autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-nthsqsresources-longclustername-example-com.id - default_result = "CONTINUE" - heartbeat_timeout = 300 - lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" - name = "master-us-test-1a-NTHLifecycleHook" -} - -resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { - autoscaling_group_name = aws_autoscaling_group.nodes-nthsqsresources-longclustername-example-com.id - default_result = "CONTINUE" - heartbeat_timeout = 300 - lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" - name = "nodes-NTHLifecycleHook" -} - -resource "aws_cloudwatch_event_rule" "nthsqsresources-longclustername-e-fkbaoh-ASGLifecycle" { - event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle_event_pattern") - name = "nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_cloudwatch_event_rule" "nthsqsresources-longclustername-e-fkbaoh-InstanceScheduledChange" { - event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange_event_pattern") - name = "nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_cloudwatch_event_rule" "nthsqsresources-longclustername-e-fkbaoh-InstanceStateChange" { - event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange_event_pattern") - name = "nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_cloudwatch_event_rule" "nthsqsresources-longclustername-e-fkbaoh-RebalanceRecommendation" { - event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation_event_pattern") - name = "nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_cloudwatch_event_rule" "nthsqsresources-longclustername-e-fkbaoh-SpotInterruption" { - event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_nthsqsresources.longclustername.e-fkbaoh-SpotInterruption_event_pattern") - name = "nthsqsresources.longclustername.e-fkbaoh-SpotInterruption" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.e-fkbaoh-SpotInterruption" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_cloudwatch_event_target" "nthsqsresources-longclustername-e-fkbaoh-ASGLifecycle-Target" { - arn = aws_sqs_queue.nthsqsresources-longclustername-example-com-nth.arn - rule = aws_cloudwatch_event_rule.nthsqsresources-longclustername-e-fkbaoh-ASGLifecycle.id -} - -resource "aws_cloudwatch_event_target" "nthsqsresources-longclustername-e-fkbaoh-InstanceScheduledChange-Target" { - arn = aws_sqs_queue.nthsqsresources-longclustername-example-com-nth.arn - rule = aws_cloudwatch_event_rule.nthsqsresources-longclustername-e-fkbaoh-InstanceScheduledChange.id -} - -resource "aws_cloudwatch_event_target" "nthsqsresources-longclustername-e-fkbaoh-InstanceStateChange-Target" { - arn = aws_sqs_queue.nthsqsresources-longclustername-example-com-nth.arn - rule = aws_cloudwatch_event_rule.nthsqsresources-longclustername-e-fkbaoh-InstanceStateChange.id -} - -resource "aws_cloudwatch_event_target" "nthsqsresources-longclustername-e-fkbaoh-RebalanceRecommendation-Target" { - arn = aws_sqs_queue.nthsqsresources-longclustername-example-com-nth.arn - rule = aws_cloudwatch_event_rule.nthsqsresources-longclustername-e-fkbaoh-RebalanceRecommendation.id -} - -resource "aws_cloudwatch_event_target" "nthsqsresources-longclustername-e-fkbaoh-SpotInterruption-Target" { - arn = aws_sqs_queue.nthsqsresources-longclustername-example-com-nth.arn - rule = aws_cloudwatch_event_rule.nthsqsresources-longclustername-e-fkbaoh-SpotInterruption.id -} - -resource "aws_ebs_volume" "us-test-1a-etcd-events-nthsqsresources-longclustername-example-com" { - availability_zone = "us-test-1a" - encrypted = false - iops = 3000 - size = 20 - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "us-test-1a.etcd-events.nthsqsresources.longclustername.example.com" - "k8s.io/etcd/events" = "us-test-1a/us-test-1a" - "k8s.io/role/control-plane" = "1" - "k8s.io/role/master" = "1" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - throughput = 125 - type = "gp3" -} - -resource "aws_ebs_volume" "us-test-1a-etcd-main-nthsqsresources-longclustername-example-com" { - availability_zone = "us-test-1a" - encrypted = false - iops = 3000 - size = 20 - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "us-test-1a.etcd-main.nthsqsresources.longclustername.example.com" - "k8s.io/etcd/main" = "us-test-1a/us-test-1a" - "k8s.io/role/control-plane" = "1" - "k8s.io/role/master" = "1" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - throughput = 125 - type = "gp3" -} - -resource "aws_iam_instance_profile" "masters-nthsqsresources-longclustername-example-com" { - name = "masters.nthsqsresources.longclustername.example.com" - role = aws_iam_role.masters-nthsqsresources-longclustername-example-com.name - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "masters.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_iam_instance_profile" "nodes-nthsqsresources-longclustername-example-com" { - name = "nodes.nthsqsresources.longclustername.example.com" - role = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.name - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_iam_role" "masters-nthsqsresources-longclustername-example-com" { - assume_role_policy = file("${path.module}/data/aws_iam_role_masters.nthsqsresources.longclustername.example.com_policy") - name = "masters.nthsqsresources.longclustername.example.com" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "masters.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_iam_role" "nodes-nthsqsresources-longclustername-example-com" { - assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.nthsqsresources.longclustername.example.com_policy") - name = "nodes.nthsqsresources.longclustername.example.com" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_iam_role_policy" "masters-nthsqsresources-longclustername-example-com" { - name = "masters.nthsqsresources.longclustername.example.com" - policy = file("${path.module}/data/aws_iam_role_policy_masters.nthsqsresources.longclustername.example.com_policy") - role = aws_iam_role.masters-nthsqsresources-longclustername-example-com.name -} - -resource "aws_iam_role_policy" "nodes-nthsqsresources-longclustername-example-com" { - name = "nodes.nthsqsresources.longclustername.example.com" - policy = file("${path.module}/data/aws_iam_role_policy_nodes.nthsqsresources.longclustername.example.com_policy") - role = aws_iam_role.nodes-nthsqsresources-longclustername-example-com.name -} - -resource "aws_internet_gateway" "nthsqsresources-longclustername-example-com" { - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -resource "aws_key_pair" "kubernetes-nthsqsresources-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { - key_name = "kubernetes.nthsqsresources.longclustername.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" - public_key = file("${path.module}/data/aws_key_pair_kubernetes.nthsqsresources.longclustername.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_launch_template" "master-us-test-1a-masters-nthsqsresources-longclustername-example-com" { - block_device_mappings { - device_name = "/dev/xvda" - ebs { - delete_on_termination = true - encrypted = true - iops = 3000 - throughput = 125 - volume_size = 64 - volume_type = "gp3" - } - } - block_device_mappings { - device_name = "/dev/sdc" - virtual_name = "ephemeral0" - } - iam_instance_profile { - name = aws_iam_instance_profile.masters-nthsqsresources-longclustername-example-com.id - } - image_id = "ami-12345678" - instance_type = "m3.medium" - key_name = aws_key_pair.kubernetes-nthsqsresources-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id - lifecycle { - create_before_destroy = true - } - metadata_options { - http_endpoint = "enabled" - http_protocol_ipv6 = "disabled" - http_put_response_hop_limit = 1 - http_tokens = "optional" - } - monitoring { - enabled = false - } - name = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - network_interfaces { - associate_public_ip_address = true - delete_on_termination = true - ipv6_address_count = 0 - security_groups = [aws_security_group.masters-nthsqsresources-longclustername-example-com.id] - } - tag_specifications { - resource_type = "instance" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" - "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" - "k8s.io/role/control-plane" = "1" - "k8s.io/role/master" = "1" - "kops.k8s.io/instancegroup" = "master-us-test-1a" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - } - tag_specifications { - resource_type = "volume" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" - "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" - "k8s.io/role/control-plane" = "1" - "k8s.io/role/master" = "1" - "kops.k8s.io/instancegroup" = "master-us-test-1a" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - } - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" - "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" - "k8s.io/role/control-plane" = "1" - "k8s.io/role/master" = "1" - "kops.k8s.io/instancegroup" = "master-us-test-1a" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.nthsqsresources.longclustername.example.com_user_data") -} - -resource "aws_launch_template" "nodes-nthsqsresources-longclustername-example-com" { - block_device_mappings { - device_name = "/dev/xvda" - ebs { - delete_on_termination = true - encrypted = true - iops = 3000 - throughput = 125 - volume_size = 128 - volume_type = "gp3" - } - } - iam_instance_profile { - name = aws_iam_instance_profile.nodes-nthsqsresources-longclustername-example-com.id - } - image_id = "ami-12345678" - instance_type = "t2.medium" - key_name = aws_key_pair.kubernetes-nthsqsresources-longclustername-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id - lifecycle { - create_before_destroy = true - } - metadata_options { - http_endpoint = "enabled" - http_protocol_ipv6 = "disabled" - http_put_response_hop_limit = 1 - http_tokens = "optional" - } - monitoring { - enabled = false - } - name = "nodes.nthsqsresources.longclustername.example.com" - network_interfaces { - associate_public_ip_address = true - delete_on_termination = true - ipv6_address_count = 0 - security_groups = [aws_security_group.nodes-nthsqsresources-longclustername-example-com.id] - } - tag_specifications { - resource_type = "instance" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" - "k8s.io/role/node" = "1" - "kops.k8s.io/instancegroup" = "nodes" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - } - tag_specifications { - resource_type = "volume" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" - "k8s.io/role/node" = "1" - "kops.k8s.io/instancegroup" = "nodes" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - } - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "aws-node-termination-handler/managed" = "" - "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" - "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" - "k8s.io/role/node" = "1" - "kops.k8s.io/instancegroup" = "nodes" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - user_data = filebase64("${path.module}/data/aws_launch_template_nodes.nthsqsresources.longclustername.example.com_user_data") -} - -resource "aws_route" "route-0-0-0-0--0" { - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.nthsqsresources-longclustername-example-com.id - route_table_id = aws_route_table.nthsqsresources-longclustername-example-com.id -} - -resource "aws_route" "route-__--0" { - destination_ipv6_cidr_block = "::/0" - gateway_id = aws_internet_gateway.nthsqsresources-longclustername-example-com.id - route_table_id = aws_route_table.nthsqsresources-longclustername-example-com.id -} - -resource "aws_route_table" "nthsqsresources-longclustername-example-com" { - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - "kubernetes.io/kops/role" = "public" - } - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -resource "aws_route_table_association" "us-test-1a-nthsqsresources-longclustername-example-com" { - route_table_id = aws_route_table.nthsqsresources-longclustername-example-com.id - subnet_id = aws_subnet.us-test-1a-nthsqsresources-longclustername-example-com.id -} - -resource "aws_s3_object" "cluster-completed-spec" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/cluster-completed.spec" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "etcd-cluster-spec-events" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/events/control/etcd-cluster-spec" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "etcd-cluster-spec-main" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/main/control/etcd-cluster-spec" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "kops-version-txt" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/kops-version.txt" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/manifests/etcd/events-master-us-test-1a.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/manifests/etcd/main-master-us-test-1a.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/manifests/static/kube-apiserver-healthcheck.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nodeupconfig-nodes" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/igconfig/node/nodes/nodeupconfig.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-bootstrap" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/bootstrap-channel.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-limit-range-addons-k8s-io" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-limit-range.addons.k8s.io_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-node-termination-handler-aws-k8s-1-11" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-node-termination-handler.aws-k8s-1.11_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_s3_object" "nthsqsresources-longclustername-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { - bucket = "testingBucket" - content = file("${path.module}/data/aws_s3_object_nthsqsresources.longclustername.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") - key = "clusters.example.com/nthsqsresources.longclustername.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" - provider = aws.files - server_side_encryption = "AES256" -} - -resource "aws_security_group" "masters-nthsqsresources-longclustername-example-com" { - description = "Security group for masters" - name = "masters.nthsqsresources.longclustername.example.com" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "masters.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -resource "aws_security_group" "nodes-nthsqsresources-longclustername-example-com" { - description = "Security group for nodes" - name = "nodes.nthsqsresources.longclustername.example.com" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nodes.nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-nthsqsresources-longclustername-example-com" { - cidr_blocks = ["0.0.0.0/0"] - from_port = 22 - protocol = "tcp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 22 - type = "ingress" -} - -resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-nthsqsresources-longclustername-example-com" { - cidr_blocks = ["0.0.0.0/0"] - from_port = 22 - protocol = "tcp" - security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 22 - type = "ingress" -} - -resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-nthsqsresources-longclustername-example-com" { - cidr_blocks = ["0.0.0.0/0"] - from_port = 443 - protocol = "tcp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 443 - type = "ingress" -} - -resource "aws_security_group_rule" "from-masters-nthsqsresources-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { - cidr_blocks = ["0.0.0.0/0"] - from_port = 0 - protocol = "-1" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "egress" -} - -resource "aws_security_group_rule" "from-masters-nthsqsresources-longclustername-example-com-egress-all-0to0-__--0" { - from_port = 0 - ipv6_cidr_blocks = ["::/0"] - protocol = "-1" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "egress" -} - -resource "aws_security_group_rule" "from-masters-nthsqsresources-longclustername-example-com-ingress-all-0to0-masters-nthsqsresources-longclustername-example-com" { - from_port = 0 - protocol = "-1" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "ingress" -} - -resource "aws_security_group_rule" "from-masters-nthsqsresources-longclustername-example-com-ingress-all-0to0-nodes-nthsqsresources-longclustername-example-com" { - from_port = 0 - protocol = "-1" - security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "ingress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-egress-all-0to0-0-0-0-0--0" { - cidr_blocks = ["0.0.0.0/0"] - from_port = 0 - protocol = "-1" - security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "egress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-egress-all-0to0-__--0" { - from_port = 0 - ipv6_cidr_blocks = ["::/0"] - protocol = "-1" - security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "egress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-ingress-all-0to0-nodes-nthsqsresources-longclustername-example-com" { - from_port = 0 - protocol = "-1" - security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 0 - type = "ingress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-ingress-tcp-1to2379-masters-nthsqsresources-longclustername-example-com" { - from_port = 1 - protocol = "tcp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 2379 - type = "ingress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-ingress-tcp-2382to4000-masters-nthsqsresources-longclustername-example-com" { - from_port = 2382 - protocol = "tcp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 4000 - type = "ingress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-ingress-tcp-4003to65535-masters-nthsqsresources-longclustername-example-com" { - from_port = 4003 - protocol = "tcp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 65535 - type = "ingress" -} - -resource "aws_security_group_rule" "from-nodes-nthsqsresources-longclustername-example-com-ingress-udp-1to65535-masters-nthsqsresources-longclustername-example-com" { - from_port = 1 - protocol = "udp" - security_group_id = aws_security_group.masters-nthsqsresources-longclustername-example-com.id - source_security_group_id = aws_security_group.nodes-nthsqsresources-longclustername-example-com.id - to_port = 65535 - type = "ingress" -} - -resource "aws_sqs_queue" "nthsqsresources-longclustername-example-com-nth" { - message_retention_seconds = 300 - name = "nthsqsresources-longclustername-example-com-nth" - policy = file("${path.module}/data/aws_sqs_queue_nthsqsresources-longclustername-example-com-nth_policy") - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources-longclustername-example-com-nth" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_subnet" "us-test-1a-nthsqsresources-longclustername-example-com" { - availability_zone = "us-test-1a" - cidr_block = "172.20.32.0/19" - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "us-test-1a.nthsqsresources.longclustername.example.com" - "SubnetType" = "Public" - "kops.k8s.io/instance-group/master-us-test-1a" = "true" - "kops.k8s.io/instance-group/nodes" = "true" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - "kubernetes.io/role/elb" = "1" - "kubernetes.io/role/internal-elb" = "1" - } - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -resource "aws_vpc" "nthsqsresources-longclustername-example-com" { - assign_generated_ipv6_cidr_block = true - cidr_block = "172.20.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_vpc_dhcp_options" "nthsqsresources-longclustername-example-com" { - domain_name = "us-test-1.compute.internal" - domain_name_servers = ["AmazonProvidedDNS"] - tags = { - "KubernetesCluster" = "nthsqsresources.longclustername.example.com" - "Name" = "nthsqsresources.longclustername.example.com" - "kubernetes.io/cluster/nthsqsresources.longclustername.example.com" = "owned" - } -} - -resource "aws_vpc_dhcp_options_association" "nthsqsresources-longclustername-example-com" { - dhcp_options_id = aws_vpc_dhcp_options.nthsqsresources-longclustername-example-com.id - vpc_id = aws_vpc.nthsqsresources-longclustername-example-com.id -} - -terraform { - required_version = ">= 0.15.0" - required_providers { - aws = { - "configuration_aliases" = [aws.files] - "source" = "hashicorp/aws" - "version" = ">= 4.0.0" - } - } -} diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index 2287ac1c06..b007883d0a 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -615,7 +615,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error { } nth := c.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { + if nth.IsQueueMode() { l.Builders = append(l.Builders, &awsmodel.NodeTerminationHandlerBuilder{ AWSModelContext: awsModelContext, Lifecycle: clusterLifecycle, diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index 5e256ef156..eff67a71a6 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -322,7 +322,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS return url } - dest["EnableSQSTerminationDraining"] = func() bool { return *cluster.Spec.NodeTerminationHandler.EnableSQSTerminationDraining } + dest["EnableSQSTerminationDraining"] = func() bool { return cluster.Spec.NodeTerminationHandler.IsQueueMode() } } dest["ArchitectureOfAMI"] = tf.architectureOfAMI