mirror of https://github.com/kubernetes/kops.git
Remove unreferenced template
This commit is contained in:
parent
63ccaa14d6
commit
09df6ac844
|
|
@ -1,230 +0,0 @@
|
||||||
# Copyright 2019 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: kube-dns-autoscaler
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-addon: kube-dns.addons.k8s.io
|
|
||||||
k8s-app: kube-dns-autoscaler
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns-autoscaler
|
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: autoscaler
|
|
||||||
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.0.0
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "20m"
|
|
||||||
memory: "10Mi"
|
|
||||||
command:
|
|
||||||
- /cluster-proportional-autoscaler
|
|
||||||
- --namespace=kube-system
|
|
||||||
- --configmap=kube-dns-autoscaler
|
|
||||||
- --mode=linear
|
|
||||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
|
||||||
- --target=Deployment/kube-dns
|
|
||||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
|
||||||
# If using small nodes, "nodesPerReplica" should dominate.
|
|
||||||
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":2}}
|
|
||||||
- --logtostderr=true
|
|
||||||
- --v=2
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: kube-dns
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-addon: kube-dns.addons.k8s.io
|
|
||||||
k8s-app: kube-dns
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
# replicas: not specified here:
|
|
||||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
|
||||||
# 2. Default is 1.
|
|
||||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
|
||||||
strategy:
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 10%
|
|
||||||
maxUnavailable: 0
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: kubedns
|
|
||||||
image: k8s.gcr.io/kubedns-{{Arch}}:1.9
|
|
||||||
resources:
|
|
||||||
# TODO: Set memory limits when we've profiled the container for large
|
|
||||||
# clusters, then set request = limit to keep this container in
|
|
||||||
# guaranteed class. Currently, this container falls into the
|
|
||||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
||||||
limits:
|
|
||||||
memory: 170Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 70Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz-kubedns
|
|
||||||
port: 8080
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 5
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /readiness
|
|
||||||
port: 8081
|
|
||||||
scheme: HTTP
|
|
||||||
# we poll on pod startup for the Kubernetes master service and
|
|
||||||
# only setup the /readiness HTTP server once that's available.
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
timeoutSeconds: 5
|
|
||||||
args:
|
|
||||||
- --domain={{ KubeDNS.Domain }}.
|
|
||||||
- --dns-port=10053
|
|
||||||
- --config-map=kube-dns
|
|
||||||
- --v=2
|
|
||||||
env:
|
|
||||||
- name: PROMETHEUS_PORT
|
|
||||||
value: "10055"
|
|
||||||
ports:
|
|
||||||
- containerPort: 10053
|
|
||||||
name: dns-local
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 10053
|
|
||||||
name: dns-tcp-local
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 10055
|
|
||||||
name: metrics
|
|
||||||
protocol: TCP
|
|
||||||
- name: dnsmasq
|
|
||||||
image: k8s.gcr.io/k8s-dns-dnsmasq-{{Arch}}:1.14.10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz-dnsmasq
|
|
||||||
port: 8080
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 5
|
|
||||||
args:
|
|
||||||
- --cache-size={{ KubeDNS.CacheMaxSize }}
|
|
||||||
- --dns-forward-max={{ KubeDNS.CacheMaxConcurrent }}
|
|
||||||
- --no-resolv
|
|
||||||
- --server=127.0.0.1#10053
|
|
||||||
- --log-facility=-
|
|
||||||
- --min-port=1024
|
|
||||||
ports:
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns-tcp
|
|
||||||
protocol: TCP
|
|
||||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 150m
|
|
||||||
memory: 10Mi
|
|
||||||
- name: dnsmasq-metrics
|
|
||||||
image: k8s.gcr.io/dnsmasq-metrics-{{Arch}}:1.0
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: 10054
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 5
|
|
||||||
args:
|
|
||||||
- --v=2
|
|
||||||
- --logtostderr
|
|
||||||
ports:
|
|
||||||
- containerPort: 10054
|
|
||||||
name: metrics
|
|
||||||
protocol: TCP
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 10Mi
|
|
||||||
- name: healthz
|
|
||||||
image: k8s.gcr.io/exechealthz-{{Arch}}:1.2
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 50Mi
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
# Note that this container shouldn't really need 50Mi of memory. The
|
|
||||||
# limits are set higher than expected pending investigation on #29688.
|
|
||||||
# The extra memory was stolen from the kubedns container to keep the
|
|
||||||
# net memory requested by the pod constant.
|
|
||||||
memory: 50Mi
|
|
||||||
args:
|
|
||||||
- --cmd=nslookup kubernetes.default.svc.{{ KubeDNS.Domain }} 127.0.0.1 >/dev/null
|
|
||||||
- --url=/healthz-dnsmasq
|
|
||||||
- --cmd=nslookup kubernetes.default.svc.{{ KubeDNS.Domain }} 127.0.0.1:10053 >/dev/null
|
|
||||||
- --url=/healthz-kubedns
|
|
||||||
- --port=8080
|
|
||||||
- --quiet
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
protocol: TCP
|
|
||||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: kube-dns
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-addon: kube-dns.addons.k8s.io
|
|
||||||
k8s-app: kube-dns
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
kubernetes.io/name: "KubeDNS"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
clusterIP: {{ KubeDNS.ServerIP }}
|
|
||||||
ports:
|
|
||||||
- name: dns
|
|
||||||
port: 53
|
|
||||||
protocol: UDP
|
|
||||||
- name: dns-tcp
|
|
||||||
port: 53
|
|
||||||
protocol: TCP
|
|
||||||
Loading…
Reference in New Issue