mirror of https://github.com/kubernetes/kops.git
350 lines
9.4 KiB
Plaintext
350 lines
9.4 KiB
Plaintext
# Copyright 2019 The Kubernetes Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
{{- if or (.KubeDNS.UpstreamNameservers) (.KubeDNS.StubDomains) }}
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: kube-dns
|
|
namespace: kube-system
|
|
data:
|
|
{{- if .KubeDNS.UpstreamNameservers }}
|
|
upstreamNameservers: |
|
|
{{ ToJSON .KubeDNS.UpstreamNameservers }}
|
|
{{- end }}
|
|
{{- if .KubeDNS.StubDomains }}
|
|
stubDomains: |
|
|
{{ ToJSON .KubeDNS.StubDomains }}
|
|
{{- end }}
|
|
|
|
---
|
|
{{- end }}
|
|
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: kube-dns-autoscaler
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
k8s-app: kube-dns-autoscaler
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: kube-dns-autoscaler
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: kube-dns-autoscaler
|
|
spec:
|
|
containers:
|
|
- name: autoscaler
|
|
image: {{ if KubeDNS.CPAImage }}{{ KubeDNS.CPAImage }}{{ else }}registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.8{{ end }}
|
|
resources:
|
|
requests:
|
|
cpu: "20m"
|
|
memory: "10Mi"
|
|
command:
|
|
- /cluster-proportional-autoscaler
|
|
- --namespace=kube-system
|
|
- --configmap=kube-dns-autoscaler
|
|
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
|
- --target=Deployment/kube-dns
|
|
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
|
# If using small nodes, "nodesPerReplica" should dominate.
|
|
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
|
|
- --logtostderr=true
|
|
- --v=2
|
|
priorityClassName: system-cluster-critical
|
|
tolerations:
|
|
- key: "CriticalAddonsOnly"
|
|
operator: "Exists"
|
|
serviceAccountName: kube-dns-autoscaler
|
|
|
|
---
|
|
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: kube-dns
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
k8s-app: kube-dns
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
# replicas: not specified here:
|
|
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
|
# 2. Default is 1.
|
|
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
|
strategy:
|
|
rollingUpdate:
|
|
maxSurge: 10%
|
|
maxUnavailable: 0
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: kube-dns
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: kube-dns
|
|
annotations:
|
|
prometheus.io/scrape: 'true'
|
|
prometheus.io/port: '10055'
|
|
spec:
|
|
affinity:
|
|
podAntiAffinity:
|
|
preferredDuringSchedulingIgnoredDuringExecution:
|
|
- weight: 1
|
|
podAffinityTerm:
|
|
labelSelector:
|
|
matchExpressions:
|
|
- key: k8s-app
|
|
operator: In
|
|
values:
|
|
- kube-dns
|
|
topologyKey: kubernetes.io/hostname
|
|
dnsPolicy: Default # Don't use cluster DNS.
|
|
priorityClassName: system-cluster-critical
|
|
serviceAccountName: kube-dns
|
|
volumes:
|
|
- name: kube-dns-config
|
|
configMap:
|
|
name: kube-dns
|
|
optional: true
|
|
|
|
containers:
|
|
- name: kubedns
|
|
image: registry.k8s.io/k8s-dns-kube-dns:1.15.13
|
|
resources:
|
|
# TODO: Set memory limits when we've profiled the container for large
|
|
# clusters, then set request = limit to keep this container in
|
|
# guaranteed class. Currently, this container falls into the
|
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
limits:
|
|
memory: 170Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 70Mi
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /healthcheck/kubedns
|
|
port: 10054
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /readiness
|
|
port: 8081
|
|
scheme: HTTP
|
|
# we poll on pod startup for the Kubernetes master service and
|
|
# only setup the /readiness HTTP server once that's available.
|
|
initialDelaySeconds: 3
|
|
timeoutSeconds: 5
|
|
args:
|
|
- --config-dir=/kube-dns-config
|
|
- --dns-port=10053
|
|
- --domain={{ KubeDNS.Domain }}.
|
|
- --v=2
|
|
env:
|
|
- name: PROMETHEUS_PORT
|
|
value: "10055"
|
|
ports:
|
|
- containerPort: 10053
|
|
name: dns-local
|
|
protocol: UDP
|
|
- containerPort: 10053
|
|
name: dns-tcp-local
|
|
protocol: TCP
|
|
- containerPort: 10055
|
|
name: metrics
|
|
protocol: TCP
|
|
volumeMounts:
|
|
- name: kube-dns-config
|
|
mountPath: /kube-dns-config
|
|
|
|
- name: dnsmasq
|
|
image: registry.k8s.io/k8s-dns-dnsmasq-nanny:1.15.13
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /healthcheck/dnsmasq
|
|
port: 10054
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
args:
|
|
- -v=2
|
|
- -logtostderr
|
|
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
|
- -restartDnsmasq=true
|
|
- --
|
|
- -k
|
|
- --cache-size={{ KubeDNS.CacheMaxSize }}
|
|
- --dns-forward-max={{ KubeDNS.CacheMaxConcurrent }}
|
|
- --no-negcache
|
|
- --log-facility=-
|
|
- --server=/{{ KubeDNS.Domain }}/127.0.0.1#10053
|
|
- --server=/in-addr.arpa/127.0.0.1#10053
|
|
- --server=/in6.arpa/127.0.0.1#10053
|
|
- --min-port=1024
|
|
ports:
|
|
- containerPort: 53
|
|
name: dns
|
|
protocol: UDP
|
|
- containerPort: 53
|
|
name: dns-tcp
|
|
protocol: TCP
|
|
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
|
resources:
|
|
requests:
|
|
cpu: 150m
|
|
memory: 20Mi
|
|
volumeMounts:
|
|
- name: kube-dns-config
|
|
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
|
|
|
- name: sidecar
|
|
image: registry.k8s.io/k8s-dns-sidecar:1.15.13
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /metrics
|
|
port: 10054
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
args:
|
|
- --v=2
|
|
- --logtostderr
|
|
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A
|
|
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A
|
|
ports:
|
|
- containerPort: 10054
|
|
name: metrics
|
|
protocol: TCP
|
|
resources:
|
|
requests:
|
|
memory: 20Mi
|
|
cpu: 10m
|
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: kube-dns
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
k8s-app: kube-dns
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: kube-dns
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
k8s-app: kube-dns
|
|
kubernetes.io/cluster-service: "true"
|
|
kubernetes.io/name: "KubeDNS"
|
|
# Without this resourceVersion value, an update of the Service between versions will yield:
|
|
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
|
|
resourceVersion: "0"
|
|
spec:
|
|
selector:
|
|
k8s-app: kube-dns
|
|
clusterIP: {{ KubeDNS.ServerIP }}
|
|
ports:
|
|
- name: dns
|
|
port: 53
|
|
protocol: UDP
|
|
- name: dns-tcp
|
|
port: 53
|
|
protocol: TCP
|
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: kube-dns-autoscaler
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
|
|
---
|
|
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRole
|
|
metadata:
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
name: kube-dns-autoscaler
|
|
rules:
|
|
- apiGroups: [""]
|
|
resources: ["nodes"]
|
|
verbs: ["list","watch"]
|
|
- apiGroups: [""]
|
|
resources: ["replicationcontrollers/scale"]
|
|
verbs: ["get", "update"]
|
|
- apiGroups: ["extensions", "apps"]
|
|
resources: ["deployments/scale", "replicasets/scale"]
|
|
verbs: ["get", "update"]
|
|
# Remove the configmaps rule once below issue is fixed:
|
|
# kubernetes-incubator/cluster-proportional-autoscaler#16
|
|
- apiGroups: [""]
|
|
resources: ["configmaps"]
|
|
verbs: ["get", "create"]
|
|
|
|
---
|
|
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRoleBinding
|
|
metadata:
|
|
labels:
|
|
k8s-addon: kube-dns.addons.k8s.io
|
|
name: kube-dns-autoscaler
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: ClusterRole
|
|
name: kube-dns-autoscaler
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: kube-dns-autoscaler
|
|
namespace: kube-system
|
|
|
|
---
|
|
|
|
apiVersion: policy/v1beta1
|
|
kind: PodDisruptionBudget
|
|
metadata:
|
|
name: kube-dns
|
|
namespace: kube-system
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: kube-dns
|
|
maxUnavailable: 50%
|