mirror of https://github.com/kubernetes/kops.git
Merge pull request #3844 from justinsb/fix_cpu_cni
Automatic merge from submit-queue. Fix CNI CPU allocations * Limit each CNI provider to 100m * Remove CPU limits - they cause serious problems (https://github.com/kubernetes/kubernetes/issues/51135), but this also makes the CPU allocation less problematic. * Bump versions and start introducing the `-kops.1` suffix preemptively. * Upgrade flannel to 0.9.0 as it fixes a lot. Builds on #3843
This commit is contained in:
commit
98b2437bc0
|
@ -20,3 +20,7 @@ Note these are only _requests_, not limits.
|
|||
less if we start reserving capacity on the master.
|
||||
|
||||
* kube-dns is relatively CPU hungry, and runs on the nodes.
|
||||
|
||||
* We restrict CNI controllers to 100m. If a controller needs more, it can support a user-settable option.
|
||||
|
||||
* Setting a resource limit is a bad idea: https://github.com/kubernetes/kubernetes/issues/51135
|
||||
|
|
|
@ -97,9 +97,23 @@ spec:
|
|||
serviceAccountName: flannel
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.9.0-amd64
|
||||
command:
|
||||
- cp
|
||||
args:
|
||||
- -f
|
||||
- /etc/kube-flannel/cni-conf.json
|
||||
- /etc/cni/net.d/10-flannel.conf
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.7.1
|
||||
image: quay.io/coreos/flannel:v0.9.0-amd64
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -114,31 +128,14 @@ spec:
|
|||
fieldPath: metadata.namespace
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
cpu: 100Mi
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.7.1
|
||||
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 25Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 25Mi
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
|
|
|
@ -72,7 +72,6 @@ spec:
|
|||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: run
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
containers:
|
||||
- resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
memory: 100Mi
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
containers:
|
||||
- resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
memory: 100Mi
|
||||
|
|
|
@ -51,7 +51,7 @@ spec:
|
|||
fieldPath: spec.nodeName
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 100m
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
|
|
@ -139,7 +139,7 @@ spec:
|
|||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
|
|
|
@ -149,10 +149,8 @@ spec:
|
|||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 50m
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
|
@ -221,10 +219,9 @@ spec:
|
|||
key: masquerade
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: run
|
||||
|
|
|
@ -131,7 +131,7 @@ spec:
|
|||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
|
|
|
@ -144,7 +144,6 @@ spec:
|
|||
cpu: 10m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
args:
|
||||
- --cloud=aws
|
||||
|
@ -178,7 +177,6 @@ spec:
|
|||
cpu: 10m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
|
@ -209,7 +207,6 @@ spec:
|
|||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
env:
|
||||
- name: NODENAME
|
||||
|
@ -309,7 +306,6 @@ spec:
|
|||
cpu: 10m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
|
@ -336,10 +332,9 @@ spec:
|
|||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
cpu: 45m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
args:
|
||||
- --etcd_use_v2
|
||||
|
|
|
@ -92,10 +92,9 @@ spec:
|
|||
initialDelaySeconds: 30
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -122,10 +121,9 @@ spec:
|
|||
image: 'weaveworks/weave-npc:2.0.5'
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
|
|
@ -92,10 +92,9 @@ spec:
|
|||
initialDelaySeconds: 30
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -124,10 +123,9 @@ spec:
|
|||
image: 'weaveworks/weave-npc:2.0.5'
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
|
|
@ -51,10 +51,9 @@ spec:
|
|||
initialDelaySeconds: 30
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -81,10 +80,9 @@ spec:
|
|||
image: 'weaveworks/weave-npc:2.0.5'
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
memory: 200Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
|
|
@ -341,6 +341,8 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
||||
|
||||
// NOTE: we try to suffix with -kops.1, so that we can increment versions even if the upstream version
|
||||
// hasn't changed. The problem with semver is that there is nothing > 1.0.0 other than 1.0.1-pre.1
|
||||
networkingSelector := map[string]string{"role.kubernetes.io/networking": "1"}
|
||||
|
||||
if b.cluster.Spec.Networking.Kopeio != nil {
|
||||
|
@ -380,8 +382,8 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Weave != nil {
|
||||
key := "networking.weave"
|
||||
|
||||
version := "2.0.5"
|
||||
// 2.0.6-kops.1 = 2.0.5 with kops manifest tweaks. This should go away with the next version bump.
|
||||
version := "2.0.6-kops.1"
|
||||
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
|
@ -431,9 +433,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Flannel != nil {
|
||||
key := "networking.flannel"
|
||||
|
||||
// 0.7.2-kops.1 = 0.7.1 + hairpinMode fix
|
||||
version := "0.7.2-kops.1"
|
||||
version := "0.9.0-kops.1"
|
||||
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
|
@ -503,10 +503,11 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Canal != nil {
|
||||
key := "networking.projectcalico.org.canal"
|
||||
// 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks. This should go away with the next version bump.
|
||||
versions := map[string]string{
|
||||
"pre-k8s-1.6": "2.4.1",
|
||||
"k8s-1.6": "2.4.1",
|
||||
"k8s-1.8": "2.6.2",
|
||||
"pre-k8s-1.6": "2.4.2-kops.1",
|
||||
"k8s-1.6": "2.4.2-kops.1",
|
||||
"k8s-1.8": "2.6.3-kops.1",
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -557,8 +558,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Kuberouter != nil {
|
||||
key := "networking.kuberouter"
|
||||
|
||||
version := "0.1.0"
|
||||
version := "0.1.1-kops.1"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
|
@ -578,8 +578,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Romana != nil {
|
||||
key := "networking.romana"
|
||||
|
||||
version := "v2.0-preview.2"
|
||||
version := "v2.0-preview.3"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
|
|
|
@ -69,18 +69,18 @@ spec:
|
|||
name: networking.weave
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 2.0.5
|
||||
version: 2.0.6-kops.1
|
||||
- id: k8s-1.6
|
||||
kubernetesVersion: '>=1.6.0 <1.7.0'
|
||||
manifest: networking.weave/k8s-1.6.yaml
|
||||
name: networking.weave
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 2.0.5
|
||||
version: 2.0.6-kops.1
|
||||
- id: k8s-1.7
|
||||
kubernetesVersion: '>=1.7.0'
|
||||
manifest: networking.weave/k8s-1.7.yaml
|
||||
name: networking.weave
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 2.0.5
|
||||
version: 2.0.6-kops.1
|
||||
|
|
Loading…
Reference in New Issue