Merge pull request #9418 from olemarkus/arm64-kubedns

Make dns pods work on arm64 clusters
This commit is contained in:
Kubernetes Prow Robot 2020-06-21 12:28:39 -07:00 committed by GitHub
commit da3ba758e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 42 additions and 27 deletions

View File

@ -1265,7 +1265,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.4.0 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -1280,10 +1280,14 @@ spec:
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2
nodeSelector:
kubernetes.io/arch: amd64
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- key: node-role.kubernetes.io/master
operator: Exists
serviceAccountName: coredns-autoscaler serviceAccountName: coredns-autoscaler
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
@ -2685,7 +2689,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.4.0 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -2701,10 +2705,14 @@ spec:
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2
nodeSelector:
kubernetes.io/arch: amd64
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- key: node-role.kubernetes.io/master
operator: Exists
serviceAccountName: kube-dns-autoscaler serviceAccountName: kube-dns-autoscaler
--- ---
@ -2762,7 +2770,7 @@ spec:
containers: containers:
- name: kubedns - name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-{{Arch}}:1.14.13 image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -2814,7 +2822,7 @@ spec:
mountPath: /kube-dns-config mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.13 image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthcheck/dnsmasq path: /healthcheck/dnsmasq
@ -2856,7 +2864,7 @@ spec:
mountPath: /etc/k8s/dns/dnsmasq-nanny mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar - name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13 image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /metrics path: /metrics
@ -3038,7 +3046,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.1.2-r2 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -3103,7 +3111,7 @@ spec:
containers: containers:
- name: kubedns - name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-{{Arch}}:1.14.10 image: k8s.gcr.io/k8s-dns-kube-dns:1.14.10
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -3155,7 +3163,7 @@ spec:
mountPath: /kube-dns-config mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.10 image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthcheck/dnsmasq path: /healthcheck/dnsmasq
@ -3197,7 +3205,7 @@ spec:
mountPath: /etc/k8s/dns/dnsmasq-nanny mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar - name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 image: k8s.gcr.io/k8s-dns-sidecar:1.14.10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /metrics path: /metrics

View File

@ -103,7 +103,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.4.0 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -118,10 +118,14 @@ spec:
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2
nodeSelector:
kubernetes.io/arch: amd64
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- key: node-role.kubernetes.io/master
operator: Exists
serviceAccountName: coredns-autoscaler serviceAccountName: coredns-autoscaler
--- ---
apiVersion: apps/v1 apiVersion: apps/v1

View File

@ -53,7 +53,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.4.0 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -69,10 +69,14 @@ spec:
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2
nodeSelector:
kubernetes.io/arch: amd64
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- key: node-role.kubernetes.io/master
operator: Exists
serviceAccountName: kube-dns-autoscaler serviceAccountName: kube-dns-autoscaler
--- ---
@ -130,7 +134,7 @@ spec:
containers: containers:
- name: kubedns - name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-{{Arch}}:1.14.13 image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -182,7 +186,7 @@ spec:
mountPath: /kube-dns-config mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.13 image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthcheck/dnsmasq path: /healthcheck/dnsmasq
@ -224,7 +228,7 @@ spec:
mountPath: /etc/k8s/dns/dnsmasq-nanny mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar - name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13 image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /metrics path: /metrics

View File

@ -52,7 +52,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.1.2-r2 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -117,7 +117,7 @@ spec:
containers: containers:
- name: kubedns - name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-{{Arch}}:1.14.10 image: k8s.gcr.io/k8s-dns-kube-dns:1.14.10
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -169,7 +169,7 @@ spec:
mountPath: /kube-dns-config mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.10 image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthcheck/dnsmasq path: /healthcheck/dnsmasq
@ -211,7 +211,7 @@ spec:
mountPath: /etc/k8s/dns/dnsmasq-nanny mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar - name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 image: k8s.gcr.io/k8s-dns-sidecar:1.14.10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /metrics path: /metrics

View File

@ -70,7 +70,6 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
dest["UseBootstrapTokens"] = tf.UseBootstrapTokens dest["UseBootstrapTokens"] = tf.UseBootstrapTokens
dest["UseEtcdTLS"] = tf.UseEtcdTLS dest["UseEtcdTLS"] = tf.UseEtcdTLS
// Remember that we may be on a different arch from the target. Hard-code for now. // Remember that we may be on a different arch from the target. Hard-code for now.
dest["Arch"] = func() string { return "amd64" }
dest["replace"] = func(s, find, replace string) string { dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1) return strings.Replace(s, find, replace, -1)
} }

View File

@ -21,7 +21,7 @@ spec:
- id: k8s-1.6 - id: k8s-1.6
kubernetesVersion: <1.12.0 kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 manifestHash: 7778a47fa6bda0bae9e0eeea1593a117a9b8bba4
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io
@ -29,7 +29,7 @@ spec:
- id: k8s-1.12 - id: k8s-1.12
kubernetesVersion: '>=1.12.0' kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f manifestHash: c1d396ba1997d0eb54586cc14da24fb07548e215
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io

View File

@ -21,7 +21,7 @@ spec:
- id: k8s-1.6 - id: k8s-1.6
kubernetesVersion: <1.12.0 kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 manifestHash: 7778a47fa6bda0bae9e0eeea1593a117a9b8bba4
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io
@ -29,7 +29,7 @@ spec:
- id: k8s-1.12 - id: k8s-1.12
kubernetesVersion: '>=1.12.0' kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f manifestHash: c1d396ba1997d0eb54586cc14da24fb07548e215
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io

View File

@ -21,7 +21,7 @@ spec:
- id: k8s-1.6 - id: k8s-1.6
kubernetesVersion: <1.12.0 kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 manifestHash: 7778a47fa6bda0bae9e0eeea1593a117a9b8bba4
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io
@ -29,7 +29,7 @@ spec:
- id: k8s-1.12 - id: k8s-1.12
kubernetesVersion: '>=1.12.0' kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f manifestHash: c1d396ba1997d0eb54586cc14da24fb07548e215
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io

View File

@ -21,7 +21,7 @@ spec:
- id: k8s-1.6 - id: k8s-1.6
kubernetesVersion: <1.12.0 kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 manifestHash: 7778a47fa6bda0bae9e0eeea1593a117a9b8bba4
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io
@ -29,7 +29,7 @@ spec:
- id: k8s-1.12 - id: k8s-1.12
kubernetesVersion: '>=1.12.0' kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f manifestHash: c1d396ba1997d0eb54586cc14da24fb07548e215
name: kube-dns.addons.k8s.io name: kube-dns.addons.k8s.io
selector: selector:
k8s-addon: kube-dns.addons.k8s.io k8s-addon: kube-dns.addons.k8s.io