kops/upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template

387 lines
12 KiB
Plaintext

{{ with .NodeTerminationHandler }}
# Sourced from https://github.com/aws/aws-node-termination-handler/releases/download/v1.16.1/all-resources.yaml
---
# Source: aws-node-termination-handler/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
app.kubernetes.io/version: "{{ .Version }}"
app.kubernetes.io/component: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
---
# Source: aws-node-termination-handler/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aws-node-termination-handler
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/version: "{{ .Version }}"
app.kubernetes.io/component: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: aws-node-termination-handler/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aws-node-termination-handler
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/version: "{{ .Version }}"
app.kubernetes.io/component: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
subjects:
- kind: ServiceAccount
name: aws-node-termination-handler
namespace: kube-system
roleRef:
kind: ClusterRole
name: aws-node-termination-handler
apiGroup: rbac.authorization.k8s.io
{{ if EnableSQSTerminationDraining }}
---
# Source: aws-node-termination-handler/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
app.kubernetes.io/version: "{{ .Version }}"
spec:
replicas: {{ ControlPlaneControllerReplicas true }}
selector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kubernetes.io/os: linux
template:
metadata:
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kubernetes.io/os: linux
kops.k8s.io/nth-mode: sqs
spec:
nodeSelector: null
{{ if not UseServiceAccountExternalPermissions }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
{{ end }}
priorityClassName: system-cluster-critical
serviceAccountName: aws-node-termination-handler
securityContext:
fsGroup: 1000
containers:
- name: aws-node-termination-handler
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:{{ .Version }}
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
value: "/healthz"
- name: LOG_LEVEL
value: "info"
- name: JSON_LOGGING
value: "true"
- name: ENABLE_PROMETHEUS_SERVER
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: CHECK_ASG_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_ASG_TAG
value: "{{ .ManagedASGTag }}"
- name: ASSUME_ASG_TAG_PROPAGATION
value: "false"
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
value: "false"
- name: TAINT_NODE
value: "false"
- name: EXCLUDE_FROM_LOAD_BALANCERS
value: "{{ .ExcludeFromLoadBalancers }}"
- name: DELETE_LOCAL_DATA
value: "true"
- name: IGNORE_DAEMON_SETS
value: "true"
- name: POD_TERMINATION_GRACE_PERIOD
value: "-1"
- name: NODE_TERMINATION_GRACE_PERIOD
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "false"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: "false"
- name: ENABLE_REBALANCE_MONITORING
value: "false"
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "true"
- name: QUEUE_URL
value: "{{ DefaultQueueName }}"
- name: WORKERS
value: "10"
ports:
- name: liveness-probe
protocol: TCP
containerPort: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
resources:
requests:
cpu: {{ .CPURequest }}
memory: {{ .MemoryRequest }}
{{ if not UseServiceAccountExternalPermissions }}
tolerations:
- operator: Exists
{{ end }}
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "topology.kubernetes.io/zone"
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
spec:
selector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxUnavailable: 1
{{ else }}
---
# Source: aws-node-termination-handler/templates/daemonset.linux.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
app.kubernetes.io/version: "{{ .Version }}"
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 25%
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kubernetes.io/os: linux
template:
metadata:
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kubernetes.io/os: linux
spec:
volumes:
- name: "uptime"
hostPath:
path: "/proc/uptime"
priorityClassName: system-node-critical
serviceAccountName: aws-node-termination-handler
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: aws-node-termination-handler
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:{{ .Version }}
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
volumeMounts:
- name: "uptime"
mountPath: "/proc/uptime"
readOnly: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "false"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
value: "/healthz"
- name: LOG_LEVEL
value: "info"
- name: JSON_LOGGING
value: "true"
- name: ENABLE_PROMETHEUS_SERVER
value: "{{ WithDefaultBool .EnablePrometheusMetrics false }}"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: METADATA_TRIES
value: "3"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
value: "false"
- name: TAINT_NODE
value: "false"
- name: EXCLUDE_FROM_LOAD_BALANCERS
value: "{{ WithDefaultBool .ExcludeFromLoadBalancers true }}"
- name: DELETE_LOCAL_DATA
value: "true"
- name: IGNORE_DAEMON_SETS
value: "true"
- name: POD_TERMINATION_GRACE_PERIOD
value: "-1"
- name: NODE_TERMINATION_GRACE_PERIOD
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "{{ WithDefaultBool .EnableSpotInterruptionDraining true }}"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: "{{ WithDefaultBool .EnableScheduledEventDraining false }}"
- name: ENABLE_REBALANCE_MONITORING
value: "{{ WithDefaultBool .EnableRebalanceMonitoring false }}"
- name: ENABLE_REBALANCE_DRAINING
value: "{{ WithDefaultBool .EnableRebalanceDraining false }}"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: "/proc/uptime"
resources:
requests:
cpu: {{ .CPURequest }}
memory: {{ .MemoryRequest }}
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
{{ end }}
{{ end }}