Merge pull request #14721 from johngmyers/nth-default-queue

Change default for NTH Queue Processor mode to enabled
This commit is contained in:
Kubernetes Prow Robot 2022-12-06 03:18:36 -08:00 committed by GitHub
commit f827ec7f54
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
154 changed files with 6291 additions and 1414 deletions

View File

@ -116,9 +116,10 @@ func (i *integrationTest) withPrivate() *integrationTest {
// withServiceAccountRoles indicates we expect to assign an IAM role for a ServiceAccount (instead of just using the node roles)
func (i *integrationTest) withServiceAccountRole(sa string, inlinePolicy bool) *integrationTest {
i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_%s.sa.%s_policy", sa, i.clusterName))
role := truncate.TruncateString(sa+".sa."+i.clusterName, truncate.TruncateStringOptions{MaxLength: iam.MaxLengthIAMRoleName, AlwaysAddHash: false})
i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_%s_policy", role))
if inlinePolicy {
i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_policy_%s.sa.%s_policy", sa, i.clusterName))
i.expectServiceAccountRolePolicies = append(i.expectServiceAccountRolePolicies, fmt.Sprintf("aws_iam_role_policy_%s_policy", role))
}
return i
}
@ -620,10 +621,10 @@ func TestManyAddons(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
metricsServerAddon,
dnsControllerAddon).
withNTH().
runTestTerraformAWS(t)
}
@ -642,12 +643,12 @@ func TestManyAddonsCCMIRSA(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
metricsServerAddon,
dnsControllerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -666,13 +667,13 @@ func TestManyAddonsCCMIRSA23(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -691,13 +692,13 @@ func TestManyAddonsCCMIRSA24(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -716,13 +717,13 @@ func TestManyAddonsCCMIRSA25(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -742,12 +743,12 @@ func TestManyAddonsCCMIRSA26(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
metricsServerAddon,
dnsControllerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -773,12 +774,12 @@ func TestCCM(t *testing.T) {
"certmanager.io-k8s-1.16",
"cluster-autoscaler.addons.k8s.io-k8s-1.15",
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
dnsControllerAddon,
metricsServerAddon,
).
withNTH().
runTestTerraformAWS(t)
}
@ -934,11 +935,26 @@ func TestAPIServerNodes(t *testing.T) {
runTestTerraformAWS(t)
}
// TestNTHQueueProcessor tests the output for resources required by NTH Queue Processor mode
func TestNTHQueueProcessor(t *testing.T) {
newIntegrationTest("nthsqsresources.longclustername.example.com", "nth_sqs_resources").
withNTH().
withAddons(dnsControllerAddon).
// TestNTHIMDSProcessor tests the output for resources required by NTH IMDS Processor mode
func TestNTHIMDSProcessor(t *testing.T) {
newIntegrationTest("nthimdsprocessor.longclustername.example.com", "nth-imds-processor").
withAddons(
dnsControllerAddon,
"node-termination-handler.aws-k8s-1.11",
).
runTestTerraformAWS(t)
}
// TestNTHIMDSProcessorIRSSA tests the output for resources required by NTH IMDS Processor mode with IRSA
func TestNTHIMDSProcessorIRSA(t *testing.T) {
newIntegrationTest("nthimdsprocessor.longclustername.example.com", "nth-imds-processor-irsa").
withOIDCDiscovery().
withServiceAccountRole("dns-controller.kube-system", true).
withServiceAccountRole("aws-node-termination-handler.kube-system", true).
withAddons(
dnsControllerAddon,
"node-termination-handler.aws-k8s-1.11",
).
runTestTerraformAWS(t)
}

View File

@ -171,12 +171,12 @@ func TestLifecyclePrivateSharedIP(t *testing.T) {
})
}
// TestLifecycleNodeTerminationHandlerQueueProcessor runs the test on a cluster with requisite resources for NTH Queue Processor
func TestLifecycleNodeTerminationHandlerQueueProcessor(t *testing.T) {
// TestLifecycleManyAddons runs the test on a cluster with requisite resources for NTH Queue Processor and other addons.
func TestLifecycleManyAddons(t *testing.T) {
runLifecycleTestAWS(&LifecycleTestOptions{
t: t,
SrcDir: "nth_sqs_resources",
ClusterName: "nthsqsresources.longclustername.example.com",
SrcDir: "many-addons",
ClusterName: "minimal.example.com",
})
}

View File

@ -250,7 +250,7 @@ spec:
{{ kops_feature_table(kops_added_default='1.21') }}
If `enableSQSTerminationDraining` is true Node Termination Handler will operate in Queue Processor mode. In addition to the events mentioned above, Queue Processor mode allows Node Termination Handler to take care of ASG Scale-In, AZ-Rebalance, Unhealthy Instances, EC2 Instance Termination via the API or Console, and more. kOps will provision the necessary infrastructure: an SQS queue, EventBridge rules, and ASG Lifecycle hooks. `managedASGTag` can be configured with Queue Processor mode to distinguish resource ownership between multiple clusters.
If `enableSQSTerminationDraining` is not false Node Termination Handler will operate in Queue Processor mode. In addition to the events mentioned above, Queue Processor mode allows Node Termination Handler to take care of ASG Scale-In, AZ-Rebalance, Unhealthy Instances, EC2 Instance Termination via the API or Console, and more. kOps will provision the necessary infrastructure: an SQS queue, EventBridge rules, and ASG Lifecycle hooks. `managedASGTag` can be configured with Queue Processor mode to distinguish resource ownership between multiple clusters.
The kOps CLI requires additional IAM permissions to manage the requisite EventBridge rules and SQS queue:

View File

@ -29,6 +29,8 @@ with "control-plane-". The names of groups for existing clusters are unchanged.
* New clusters can more easily be configured to use Cilium in ENI mode by setting `--networking=cilium-eni`.
* Node Termination Handler now defaults to Queue-Processor mode.
## GCP
* The default instance type is now `e2-medium` for control-plane and worker nodes, and `e2-micro` for bastions.

View File

@ -5346,7 +5346,7 @@ spec:
enableSQSTerminationDraining:
description: 'EnableSQSTerminationDraining enables queue-processor
mode which drains nodes when an SQS termination event is received.
Default: false'
Default: true'
type: boolean
enableScheduledEventDraining:
description: 'EnableScheduledEventDraining makes node termination

View File

@ -941,7 +941,7 @@ type NodeTerminationHandlerConfig struct {
EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"`
// EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received.
// Default: false
// Default: true
EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"`
// ExcludeFromLoadBalancers makes node termination handler will mark for exclusion from load balancers before node are cordoned.
@ -963,6 +963,10 @@ type NodeTerminationHandlerConfig struct {
Version *string `json:"version,omitempty"`
}
func (n *NodeTerminationHandlerConfig) IsQueueMode() bool {
return n != nil && n.Enabled != nil && *n.Enabled && (n.EnableSQSTerminationDraining == nil || *n.EnableSQSTerminationDraining)
}
// NodeProblemDetector determines the node problem detector configuration.
type NodeProblemDetectorConfig struct {
// Enabled enables the NodeProblemDetector.

View File

@ -967,7 +967,7 @@ type NodeTerminationHandlerConfig struct {
EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"`
// EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received.
// Default: false
// Default: true
EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"`
// ExcludeFromLoadBalancers makes node termination handler will mark for exclusion from load balancers before node are cordoned.

View File

@ -938,7 +938,7 @@ type NodeTerminationHandlerConfig struct {
EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"`
// EnableSQSTerminationDraining enables queue-processor mode which drains nodes when an SQS termination event is received.
// Default: false
// Default: true
EnableSQSTerminationDraining *bool `json:"enableSQSTerminationDraining,omitempty"`
// ManagedASGTag is the tag used to determine which nodes NTH can take action on

View File

@ -56,10 +56,6 @@ func (b *NodeTerminationHandlerOptionsBuilder) BuildOptions(o interface{}) error
nth.EnablePrometheusMetrics = fi.PtrTo(false)
}
if nth.EnableSQSTerminationDraining == nil {
nth.EnableSQSTerminationDraining = fi.PtrTo(false)
}
if nth.ExcludeFromLoadBalancers == nil {
nth.ExcludeFromLoadBalancers = fi.PtrTo(true)
}

View File

@ -149,7 +149,7 @@ func (b *KopsModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (ma
// Apply NTH Labels
nth := b.Cluster.Spec.NodeTerminationHandler
if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) {
if nth.IsQueueMode() {
labels[fi.ValueOf(nth.ManagedASGTag)] = ""
}

View File

@ -435,7 +435,7 @@ func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) {
AddClusterAutoscalerPermissions(p, useStaticInstanceList)
nth := b.Cluster.Spec.NodeTerminationHandler
if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) {
if nth.IsQueueMode() {
AddNodeTerminationHandlerSQSPermissions(p)
}
}

View File

@ -31,6 +31,7 @@ KOPS=$(kops-download-from-base)
# Start with a cluster running nodeTerminationHandler
ARGS="--override=cluster.spec.nodeTerminationHandler.enabled=true"
ARGS="${ARGS} --override=cluster.spec.nodeTerminationHandler.enableSQSTerminationDraining=false"
${KUBETEST2} \
--up \

View File

@ -213,7 +213,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -62,7 +62,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720
manifestHash: 24d22c723a0179f85aacb0b9f9390234829da92e7da424efc72de0b03b1fb707
name: node-termination-handler.aws
prune:
kinds:
@ -77,17 +77,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,22 +125,14 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
containers:
- env:
- name: NODE_NAME
@ -155,7 +148,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +163,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +187,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,16 +198,31 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -218,27 +232,33 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /proc/uptime
name: uptime
- name: token-amazonaws-com
projected:
defaultMode: 420
@ -247,7 +267,25 @@ spec:
audience: amazonaws.com
expirationSeconds: 86400
path: token
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -6,6 +6,6 @@
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:nthsqsresources-longclustername-example-com-nth"
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -42,7 +42,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com

View File

@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -257,6 +262,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
@ -285,6 +295,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -555,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -571,6 +673,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -585,6 +688,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -640,6 +744,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -652,6 +757,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -662,6 +768,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -1073,6 +1180,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -217,7 +217,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -69,7 +69,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720
manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035
name: node-termination-handler.aws
prune:
kinds:
@ -84,17 +84,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,22 +125,14 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
containers:
- env:
- name: NODE_NAME
@ -155,7 +148,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +163,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +187,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,16 +198,31 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -218,27 +232,33 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /proc/uptime
name: uptime
- name: token-amazonaws-com
projected:
defaultMode: 420
@ -247,7 +267,25 @@ spec:
audience: amazonaws.com
expirationSeconds: 86400
path: token
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -42,7 +42,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com

View File

@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -257,6 +262,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
@ -285,6 +295,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -555,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -571,6 +673,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -585,6 +688,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -640,6 +744,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -652,6 +757,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -662,6 +768,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -1081,6 +1188,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -216,7 +216,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -69,7 +69,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720
manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035
name: node-termination-handler.aws
prune:
kinds:
@ -84,17 +84,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,22 +125,14 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
containers:
- env:
- name: NODE_NAME
@ -155,7 +148,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +163,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +187,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,16 +198,31 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -218,27 +232,33 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /proc/uptime
name: uptime
- name: token-amazonaws-com
projected:
defaultMode: 420
@ -247,7 +267,25 @@ spec:
audience: amazonaws.com
expirationSeconds: 86400
path: token
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -42,7 +42,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com

View File

@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -1057,6 +1164,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -215,7 +215,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -69,7 +69,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720
manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035
name: node-termination-handler.aws
prune:
kinds:
@ -84,17 +84,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,22 +125,14 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
containers:
- env:
- name: NODE_NAME
@ -155,7 +148,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +163,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +187,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,16 +198,31 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -218,27 +232,33 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /proc/uptime
name: uptime
- name: token-amazonaws-com
projected:
defaultMode: 420
@ -247,7 +267,25 @@ spec:
audience: amazonaws.com
expirationSeconds: 86400
path: token
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -42,7 +42,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com

View File

@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -1057,6 +1164,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -215,7 +215,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -62,7 +62,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: cf22350355099c28c5542edbfb5d461c9db78f254f5e4bcff3292f5a6b385720
manifestHash: a1de83f808713413c322ac63634fb663cf0a4a396d0c0d66cd478086262a5035
name: node-termination-handler.aws
prune:
kinds:
@ -77,17 +77,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,22 +125,14 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
containers:
- env:
- name: NODE_NAME
@ -155,7 +148,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +163,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +187,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,16 +198,31 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/aws-node-termination-handler.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -218,27 +232,33 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /proc/uptime
name: uptime
- name: token-amazonaws-com
projected:
defaultMode: 420
@ -247,7 +267,25 @@ spec:
audience: amazonaws.com
expirationSeconds: 86400
path: token
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -42,7 +42,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com

View File

@ -187,6 +187,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -247,6 +252,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
@ -270,6 +280,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -540,6 +641,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -554,6 +656,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -566,6 +669,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
@ -619,6 +723,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -630,6 +735,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -639,6 +745,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
@ -1049,6 +1156,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -268,13 +268,16 @@
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
"kms:GenerateRandom",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",

View File

@ -212,7 +212,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -62,7 +62,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: a6ccfd21bb3ab6ffbc5d48580197c2ecbbcf3ad68043b4c068eb4cc40405fd2c
manifestHash: 0c08eb3cb6900ebc1bdd84104d498ae007983f483b71b4628a460ba48181dd81
name: node-termination-handler.aws
prune:
kinds:
@ -77,17 +77,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,11 +125,12 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
@ -136,10 +138,11 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- env:
- name: NODE_NAME
@ -155,7 +158,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +173,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +197,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,12 +208,27 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -214,25 +238,54 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
runAsUser: 1000
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
nodeSelector: null
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /proc/uptime
name: uptime
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -41,7 +41,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
snapshotController:
enabled: true

View File

@ -117,6 +117,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -187,6 +192,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
@ -215,6 +225,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -366,6 +467,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -382,6 +484,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -396,6 +499,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -451,6 +555,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -463,6 +568,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -473,6 +579,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -866,6 +973,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -0,0 +1 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance Rebalance Recommendation"]}

View File

@ -0,0 +1 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -268,13 +268,16 @@
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
"kms:GenerateRandom",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",

View File

@ -205,7 +205,6 @@ spec:
cpuRequest: 50m
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableSQSTerminationDraining: false
enableScheduledEventDraining: false
enableSpotInterruptionDraining: true
enabled: true

View File

@ -62,7 +62,7 @@ spec:
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: a6ccfd21bb3ab6ffbc5d48580197c2ecbbcf3ad68043b4c068eb4cc40405fd2c
manifestHash: 0c08eb3cb6900ebc1bdd84104d498ae007983f483b71b4628a460ba48181dd81
name: node-termination-handler.aws
prune:
kinds:
@ -77,17 +77,19 @@ spec:
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops

View File

@ -99,12 +99,12 @@ subjects:
---
apiVersion: apps/v1
kind: DaemonSet
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
@ -115,6 +115,7 @@ metadata:
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
@ -124,11 +125,12 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: daemonset
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
@ -136,10 +138,11 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- env:
- name: NODE_NAME
@ -155,7 +158,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
@ -170,8 +173,12 @@ spec:
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
@ -190,6 +197,8 @@ spec:
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "true"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
@ -199,12 +208,27 @@ spec:
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: /proc/uptime
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: WORKERS
value: "10"
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.18.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
@ -214,25 +238,54 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
volumeMounts:
- mountPath: /proc/uptime
name: uptime
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
runAsUser: 1000
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
nodeSelector: null
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /proc/uptime
name: uptime
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -0,0 +1,11 @@
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["events.amazonaws.com", "sqs.amazonaws.com"]
},
"Action": "sqs:SendMessage",
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}]
}

View File

@ -41,7 +41,6 @@ spec:
amazonvpc: {}
nodeTerminationHandler:
enabled: true
enableSQSTerminationDraining: false
nonMasqueradeCIDR: 172.20.0.0/16
snapshotController:
enabled: true

View File

@ -117,6 +117,11 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
@ -187,6 +192,11 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
@ -215,6 +225,97 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-RebalanceRecommendation" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-RebalanceRecommendation_event_pattern")
name = "minimal.example.com-RebalanceRecommendation"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-RebalanceRecommendation"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-RebalanceRecommendation-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-RebalanceRecommendation.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
@ -366,6 +467,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -382,6 +484,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -396,6 +499,7 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
@ -451,6 +555,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -463,6 +568,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -473,6 +579,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
@ -858,6 +965,17 @@ resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"

View File

@ -0,0 +1,17 @@
{
"Statement": [
{
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:aws-node-termination-handler"
}
},
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com"
}
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,17 @@
{
"Statement": [
{
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:dns-controller"
}
},
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com"
}
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,26 @@
{
"Statement": [
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeTags",
"ec2:DescribeInstances",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "autoscaling:CompleteLifecycleAction",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,35 @@
{
"Statement": [
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,231 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthimdsprocessor.longclustername.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeTags",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVolumes",
"ec2:DescribeVpcs",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:RegisterTargets",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:ModifyInstanceAttribute",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "nthimdsprocessor.longclustername.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws-test:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

Some files were not shown because too many files have changed in this diff Show More