Bump Cilium to 1.11.5

Since this introduced some backwards breaking RBAC changes, the manifest got forked
This commit is contained in:
Ole Markus With 2022-05-18 21:44:19 +02:00
parent 19aafca270
commit 2d50b9ff2c
24 changed files with 1290 additions and 147 deletions

View File

@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error {
}
if c.Version == "" {
c.Version = "v1.11.4"
c.Version = "v1.11.5"
}
if c.EnableEndpointHealthChecking == nil {

View File

@ -223,7 +223,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: disabled
version: v1.11.4
version: v1.11.5
nonMasqueradeCIDR: ::/0
secretStore: memfs://clusters.example.com/minimal-ipv6.example.com/secrets
serviceClusterIPRange: fd00:5e4f:ce::/108

View File

@ -53,8 +53,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 6969c542728bd5bd6dce4120a6f911d9a70462646014a1e21e13985ef9e56610
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 72833833b7b7bc871347d491216e2f8496df762e8bfb5775c71424c878dc48c5
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -57,6 +57,7 @@ data:
kube-proxy-replacement: partial
masquerade: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
@ -105,36 +106,16 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
@ -153,21 +134,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
verbs:
- '*'
@ -193,6 +168,20 @@ rules:
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -379,7 +368,7 @@ spec:
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@ -475,7 +464,7 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
@ -611,7 +600,7 @@ spec:
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.11.4
image: quay.io/cilium/operator:v1.11.5
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -735,7 +735,7 @@ resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s
resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml"
key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -166,7 +166,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal-warmpool.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: VB+wVXktis1pq5AJsf340nKkPeJX5/1mlbANYgA3jQ0=
NodeupConfigHash: dG1Qo5mvZr8F1z5gSortWbpvKxDlufN7K+OddwzKcUg=
__EOF_KUBE_ENV

View File

@ -206,7 +206,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.11.4
version: v1.11.5
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/minimal-warmpool.example.com/secrets

View File

@ -53,8 +53,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 5c6574fadfcf3b3870b94a648c42724d0d1444f0af039acba69a83ae2f44e56b
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 375707f2c54fb09676a0ce6e971a2de8213860aeee1ed7f3abd179313dc0490d
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -57,6 +57,7 @@ data:
kube-proxy-replacement: partial
masquerade: "true"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
@ -105,36 +106,16 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
@ -153,21 +134,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
verbs:
- '*'
@ -193,6 +168,20 @@ rules:
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -379,7 +368,7 @@ spec:
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@ -475,7 +464,7 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
@ -611,7 +600,7 @@ spec:
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.11.4
image: quay.io/cilium/operator:v1.11.5
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -67,8 +67,8 @@ containerdConfig:
logLevel: info
version: 1.4.12
warmPoolImages:
- quay.io/cilium/cilium:v1.11.4
- quay.io/cilium/operator:v1.11.4
- quay.io/cilium/cilium:v1.11.5
- quay.io/cilium/operator:v1.11.5
- registry.k8s.io/kube-proxy:v1.21.0
- registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
- registry.k8s.io/sig-storage/livenessprobe:v2.2.0

View File

@ -618,7 +618,7 @@ resource "aws_s3_object" "minimal-warmpool-example-com-addons-limit-range-addons
resource "aws_s3_object" "minimal-warmpool-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml"
key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -192,7 +192,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.11.4
version: v1.11.5
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/privatecilium.example.com/secrets

View File

@ -53,8 +53,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 8e556db94dd0040ecf256f2af58dba3ba330d443a289f9950a9a695faad258ea
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 377cd477c63b62dbf7a458500824a0d9c2e227cebb334b6f5c70a9ceaa3a6b98
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -57,6 +57,7 @@ data:
kube-proxy-replacement: partial
masquerade: "true"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
@ -105,36 +106,16 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
@ -153,21 +134,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
verbs:
- '*'
@ -193,6 +168,20 @@ rules:
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -379,7 +368,7 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@ -475,7 +464,7 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
@ -611,7 +600,7 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.11.4
image: quay.io/cilium/operator:v1.11.5
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -901,7 +901,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8
resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml"
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -202,7 +202,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: disabled
version: v1.11.4
version: v1.11.5
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/privateciliumadvanced.example.com/secrets

View File

@ -53,8 +53,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 58f1a38f57a73a5d32f7e6c97b3c7ea2435f1afeea87056bef2e880cfa091c96
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 791bef8c3da2a69f8954d83393b97b0964db6e1d3056b49140850cb46563416b
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -71,6 +71,7 @@ data:
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
masquerade: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
@ -119,36 +120,16 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
@ -167,21 +148,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
verbs:
- '*'
@ -207,6 +182,20 @@ rules:
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -393,7 +382,7 @@ spec:
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@ -495,7 +484,7 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.11.4
image: quay.io/cilium/cilium:v1.11.5
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
@ -642,7 +631,7 @@ spec:
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.11.4
image: quay.io/cilium/operator:v1.11.5
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -933,7 +933,7 @@ resource "aws_s3_object" "privateciliumadvanced-example-com-addons-limit-range-a
resource "aws_s3_object" "privateciliumadvanced-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml"
key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -64,11 +64,28 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error {
}
addons.Add(addon)
}
} else if ver.Minor == 10 || ver.Minor == 11 {
} else if ver.Minor == 10 || (ver.Minor == 11 && ver.Patch < 5) {
{
id := "k8s-1.16"
location := key + "/" + id + "-v1.10.yaml"
addon := &api.AddonSpec{
Name: fi.String(key),
Selector: networkingSelector(),
Manifest: fi.String(location),
Id: id,
NeedsRollingUpdate: "all",
}
if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) {
addon.NeedsPKI = true
}
addons.Add(addon)
}
} else if ver.Minor == 11 {
{
id := "k8s-1.16"
location := key + "/" + id + "-v1.11.yaml"
addon := &api.AddonSpec{
Name: fi.String(key),
Selector: networkingSelector(),

View File

@ -53,8 +53,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -60,8 +60,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -67,8 +67,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea
manifest: networking.cilium.io/k8s-1.16-v1.11.yaml
manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea
name: networking.cilium.io
needsRollingUpdate: all
selector: