Merge pull request #15705 from zadjadr/feature/cilium-14

Bump to Cilium 1.14
This commit is contained in:
Kubernetes Prow Robot 2023-10-25 19:54:22 +02:00 committed by GitHub
commit 165cbd5333
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 2215 additions and 540 deletions

View File

@ -1262,8 +1262,8 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version"))
}
if version.Minor != 13 {
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.13 is supported"))
if version.Minor != 14 {
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.14 is supported"))
}
if v.Hubble != nil && fi.ValueOf(v.Hubble.Enabled) {
@ -1311,15 +1311,6 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe
}
allErrs = append(allErrs, IsValidValue(fldPath.Child("encryptionType"), &v.EncryptionType, []kops.CiliumEncryptionType{kops.CiliumEncryptionTypeIPSec, kops.CiliumEncryptionTypeWireguard})...)
if v.EncryptionType == "wireguard" {
// Cilium with Wireguard integration follow-up --> https://github.com/cilium/cilium/issues/15462.
// The following rule of validation should be deleted as this combination
// will be supported on future releases of Cilium (>= v1.11.0).
if fi.ValueOf(v.EnableL7Proxy) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableL7Proxy"), "L7 proxy cannot be enabled if wireguard is enabled."))
}
}
}
if fi.ValueOf(v.EnableL7Proxy) && v.InstallIptablesRules != nil && !*v.InstallIptablesRules {

View File

@ -1023,7 +1023,7 @@ func Test_Validate_Cilium(t *testing.T) {
},
{
Cilium: kops.CiliumNetworkingSpec{
Version: "v1.13.5",
Version: "v1.14.3",
Ingress: &kops.CiliumIngressSpec{
Enabled: fi.PtrTo(true),
DefaultLoadBalancerMode: "bad-value",
@ -1033,7 +1033,7 @@ func Test_Validate_Cilium(t *testing.T) {
},
{
Cilium: kops.CiliumNetworkingSpec{
Version: "v1.13.5",
Version: "v1.14.3",
Ingress: &kops.CiliumIngressSpec{
Enabled: fi.PtrTo(true),
DefaultLoadBalancerMode: "dedicated",
@ -1042,7 +1042,7 @@ func Test_Validate_Cilium(t *testing.T) {
},
{
Cilium: kops.CiliumNetworkingSpec{
Version: "v1.13.5",
Version: "v1.14.3",
Hubble: &kops.HubbleSpec{
Enabled: fi.PtrTo(true),
},

View File

@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error {
}
if c.Version == "" {
c.Version = "v1.13.5"
c.Version = "v1.14.3"
}
if c.EnableEndpointHealthChecking == nil {

View File

@ -226,7 +226,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: disabled
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -105,8 +105,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 33440a8acbacd86a9b5cd6c44eabf93e591e6cdfd0245feae791b75ddc579a3c
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 5e9537b8396c3b141b62b590b619883148d224463969f32eddd9af9601e7b79e
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -39,6 +39,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -57,14 +59,18 @@ data:
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: kubernetes
kube-proxy-replacement: partial
kube-proxy-replacement: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: native
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: disabled
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -84,6 +90,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -131,7 +138,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -139,6 +145,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -178,6 +188,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -190,6 +201,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -201,6 +213,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -212,8 +239,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -221,7 +256,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -309,7 +343,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -318,6 +351,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -326,10 +363,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -354,6 +398,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -374,6 +419,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -387,6 +433,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -394,6 +485,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -408,6 +501,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -455,14 +550,9 @@ spec:
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
preStop:
exec:
command:
@ -481,6 +571,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -491,7 +582,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -512,12 +602,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -533,25 +625,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -568,26 +713,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -596,6 +758,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -604,14 +768,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -624,10 +792,22 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -643,6 +823,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -663,6 +845,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -705,11 +889,11 @@ spec:
value: api.internal.minimal-ipv6.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
host: ::1
path: /healthz
port: 9234
scheme: HTTP

View File

@ -944,7 +944,7 @@ resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s
resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -151,7 +151,7 @@ ConfigServer:
- https://kops-controller.internal.minimal-warmpool.example.com:3988/
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: ixr/jHtjzunYpmsBwkCwqeEL1lBBh7cpOWEMPi1HAvA=
NodeupConfigHash: Ic8Yx6WnZ6jJljDBQI2bf2kvkOboul3gKc16oTpHlDI=
__EOF_KUBE_ENV

View File

@ -218,7 +218,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -98,8 +98,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 3ce725cc07a4344fb82f4666145c6dd4070d10217a9bf43939bada12094cce95
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: d939c9dc17f34da1cf748f890373d6c0d474f5a08e022306c2feaa9f116f2781
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -39,6 +39,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -57,14 +59,19 @@ data:
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: kubernetes
kube-proxy-replacement: partial
kube-proxy-replacement: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: tunnel
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: vxlan
tunnel-protocol: vxlan
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -84,6 +91,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -131,7 +139,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -139,6 +146,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -178,6 +189,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -190,6 +202,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -201,6 +214,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -212,8 +240,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -221,7 +257,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -309,7 +344,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -318,6 +352,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -326,10 +364,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -354,6 +399,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -374,6 +420,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -387,6 +434,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -394,6 +486,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -408,6 +502,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -455,14 +551,9 @@ spec:
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
preStop:
exec:
command:
@ -481,6 +572,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -491,7 +583,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -512,12 +603,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -533,25 +626,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -568,26 +714,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -596,6 +759,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -604,14 +769,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -624,10 +793,22 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -643,6 +824,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -663,6 +846,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -705,7 +890,7 @@ spec:
value: api.internal.minimal-warmpool.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -60,8 +60,8 @@ containerdConfig:
usesLegacyGossip: false
usesNoneDNS: false
warmPoolImages:
- quay.io/cilium/cilium:v1.13.5
- quay.io/cilium/operator:v1.13.5
- quay.io/cilium/cilium:v1.14.3
- quay.io/cilium/operator:v1.14.3
- registry.k8s.io/kube-proxy:v1.26.0
- registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1
- registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.6

View File

@ -712,7 +712,7 @@ resource "aws_s3_object" "minimal-warmpool-example-com-addons-limit-range-addons
resource "aws_s3_object" "minimal-warmpool-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -199,7 +199,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.13.5
version: v1.14.3
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://tests/scw-minimal.k8s.local/secrets

View File

@ -54,8 +54,8 @@ spec:
k8s-addon: scaleway-csi-driver.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 0965eae063f29669172d217374bc812d27eab79b5e2daeeda759de9ba7fdfeb6
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 5ecc7aca559459d06d6474991ec3b6e034f75834c971213fec620aeafe31eb71
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -39,6 +39,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -57,14 +59,19 @@ data:
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: kubernetes
kube-proxy-replacement: strict
kube-proxy-replacement: "true"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: tunnel
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: vxlan
tunnel-protocol: vxlan
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -84,6 +91,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -131,7 +139,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -139,6 +146,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -178,6 +189,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -190,6 +202,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -201,6 +214,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -212,8 +240,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -221,7 +257,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -309,7 +344,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -318,6 +352,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -326,10 +364,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -354,6 +399,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -374,6 +420,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -387,6 +434,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -394,6 +486,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -408,6 +502,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -455,14 +551,9 @@ spec:
value: api.internal.scw-minimal.k8s.local
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
preStop:
exec:
command:
@ -481,6 +572,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -491,7 +583,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -512,12 +603,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -533,25 +626,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.scw-minimal.k8s.local
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -568,26 +714,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.scw-minimal.k8s.local
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -596,6 +759,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -604,14 +769,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -624,10 +793,22 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -643,6 +824,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -663,6 +846,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -705,7 +890,7 @@ spec:
value: api.internal.scw-minimal.k8s.local
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -149,7 +149,7 @@ resource "aws_s3_object" "scw-minimal-k8s-local-addons-limit-range-addons-k8s-io
resource "aws_s3_object" "scw-minimal-k8s-local-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content")
key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -220,7 +220,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: disabled
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -98,8 +98,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: f477be8a0899c266e8a71d80fc70ddd61b6564455ce75560b877d92c6f12a762
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: dc0ffacc5b54ff7ce6d48ad648b291624ae47bbd80cbdd5268f48bc866a6cf3e
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -29,7 +29,6 @@ data:
agent-health-port: "9879"
auto-create-cilium-node-resource: "true"
auto-direct-node-routes: "false"
blacklist-conflicting-routes: "false"
bpf-ct-global-any-max: "262144"
bpf-ct-global-tcp-max: "524288"
bpf-lb-algorithm: random
@ -41,6 +40,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -56,18 +57,23 @@ data:
enable-remote-node-identity: "true"
enable-service-topology: "false"
enable-unreachable-routes: "false"
eni-tags: KubernetesCluster=privatecilium.example.com
identity-allocation-mode: crd
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: eni
kube-proxy-replacement: partial
kube-proxy-replacement: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: native
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: disabled
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -87,6 +93,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -134,7 +141,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -142,6 +148,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -181,6 +191,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -193,6 +204,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -204,6 +216,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -215,8 +242,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -224,7 +259,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -312,7 +346,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -321,6 +354,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -329,10 +366,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -357,6 +401,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -377,6 +422,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -390,6 +436,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -397,6 +488,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -411,6 +504,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -458,14 +553,34 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
@ -484,6 +599,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -494,7 +610,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -515,12 +630,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -536,25 +653,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -571,26 +741,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -599,6 +786,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -607,14 +796,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -627,10 +820,22 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -646,6 +851,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -666,6 +873,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -708,7 +917,7 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8
resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -228,7 +228,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -98,8 +98,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 1bcf01aca5730c31ac7b86d72831968485235c479566cf6a26da17ede4f0c351
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 0d92f3aaa5fcfde3239fba0d07f4d264580c460d4a5e9c2463f8e2b20434c479
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -39,6 +39,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -57,14 +59,19 @@ data:
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: kubernetes
kube-proxy-replacement: partial
kube-proxy-replacement: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: tunnel
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: vxlan
tunnel-protocol: vxlan
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -84,6 +91,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -131,7 +139,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -139,6 +146,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -178,6 +189,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -190,6 +202,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -201,6 +214,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -212,8 +240,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -221,7 +257,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -309,7 +344,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -318,6 +352,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -326,10 +364,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -354,6 +399,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -374,6 +420,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -387,6 +434,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -394,6 +486,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -412,6 +506,8 @@ spec:
test3: awesome
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -459,14 +555,9 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
preStop:
exec:
command:
@ -485,6 +576,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -495,7 +587,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -516,12 +607,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -537,25 +630,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -572,26 +718,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -600,6 +763,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -608,14 +773,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -628,10 +797,22 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -647,6 +828,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -671,6 +854,8 @@ spec:
test3: cilium-operator
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -713,7 +898,7 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8
resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -229,7 +229,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: vxlan
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -161,8 +161,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: ba5c764f4ddeb058c0dc7fd9287d445a6a3e8f186dbac9d63daf56770d81c24c
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 3d77641c2e3c89adfb55bf74f41d865e0af29ff859eb75b2795cbe915d73b827
name: networking.cilium.io
needsPKI: true
needsRollingUpdate: all

View File

@ -53,6 +53,8 @@ data:
cgroup-root: /run/cilium/cgroupv2
cluster-id: "253"
cluster-name: privatecilium.example.com
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -87,14 +89,19 @@ data:
ingress-shared-lb-service-name: private-ingress
install-iptables-rules: "true"
ipam: kubernetes
kube-proxy-replacement: partial
kube-proxy-replacement: "false"
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: tunnel
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: vxlan
tunnel-protocol: vxlan
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -109,8 +116,9 @@ metadata:
apiVersion: v1
data:
config.yaml: |
peer-service: unix:///var/run/cilium/hubble.sock
config.yaml: |-
cluster-name: "privatecilium.example.com"
peer-service: "hubble-peer.kube-system.svc.cluster.local:443"
listen-address: :4245
disable-server-tls: true
@ -130,6 +138,31 @@ metadata:
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: hubble-peer
app.kubernetes.io/part-of: cilium
k8s-app: cilium
role.kubernetes.io/networking: "1"
name: hubble-peer
namespace: kube-system
spec:
internalTrafficPolicy: Local
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
selector:
k8s-app: cilium
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -137,6 +170,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -184,7 +218,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -192,6 +225,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -231,6 +268,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -243,6 +281,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -254,6 +293,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -265,8 +319,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -274,7 +336,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -362,7 +423,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -371,6 +431,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -379,10 +443,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -422,6 +493,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -442,6 +514,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -455,6 +528,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@ -567,6 +685,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
k8s-app: hubble-relay
role.kubernetes.io/networking: "1"
name: hubble-relay
@ -589,6 +709,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -603,6 +725,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -650,14 +774,9 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
preStop:
exec:
command:
@ -676,6 +795,11 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports:
- containerPort: 4244
hostPort: 4244
name: peer-service
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
@ -686,7 +810,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -707,12 +830,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -728,28 +853,81 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -766,26 +944,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -794,6 +989,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -802,14 +999,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -822,17 +1023,32 @@ spec:
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
- name: hubble-tls
secret:
optional: true
secretName: hubble-server-certs
projected:
defaultMode: 256
sources:
- secret:
name: hubble-server-certs
optional: true
updateStrategy:
type: OnDelete
@ -845,6 +1061,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -865,6 +1083,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -907,7 +1127,7 @@ spec:
value: api.internal.privatecilium.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@ -965,6 +1185,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
k8s-app: hubble-relay
role.kubernetes.io/networking: "1"
name: hubble-relay
@ -982,20 +1204,24 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
k8s-app: hubble-relay
kops.k8s.io/managed-by: kops
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
containers:
- args:
- serve
- --peer-service=unix:///var/run/cilium/hubble.sock
- --listen-address=:4245
command:
- hubble-relay
env:
- name: GODEBUG
value: x509ignoreCN=0
image: quay.io/cilium/hubble-relay:v1.13.5
image: quay.io/cilium/hubble-relay:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
tcpSocket:
@ -1007,11 +1233,15 @@ spec:
readinessProbe:
tcpSocket:
port: grpc
securityContext:
capabilities:
drop:
- ALL
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/cilium
name: hubble-sock-dir
readOnly: true
- mountPath: /etc/hubble-relay
name: config
readOnly: true
@ -1019,6 +1249,8 @@ spec:
name: tls
readOnly: true
restartPolicy: Always
securityContext:
fsGroup: 65532
serviceAccount: hubble-relay
serviceAccountName: hubble-relay
terminationGracePeriodSeconds: 0
@ -1036,10 +1268,6 @@ spec:
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- hostPath:
path: /var/run/cilium
type: Directory
name: hubble-sock-dir
- configMap:
items:
- key: config.yaml
@ -1048,6 +1276,7 @@ spec:
name: config
- name: tls
projected:
defaultMode: 256
sources:
- secret:
items:
@ -1089,6 +1318,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
k8s-app: cilium
role.kubernetes.io/networking: "1"
name: hubble-relay-client-certs

View File

@ -1048,7 +1048,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8
resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -232,7 +232,7 @@ spec:
sidecarIstioProxyImage: cilium/istio_proxy
toFqdnsDnsRejectResponseCode: refused
tunnel: disabled
version: v1.13.5
version: v1.14.3
nodeTerminationHandler:
cpuRequest: 50m
enableRebalanceDraining: false

View File

@ -98,8 +98,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: c6b553b26348b9bda91297615c885dfeb20ec41a56cdeedcf433255bd62d4d58
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 1dc85a0c4d6148f60695875f169977272f69564eb1ee8a5cf6c4c7687376449d
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -29,7 +29,6 @@ data:
agent-health-port: "9879"
auto-create-cilium-node-resource: "true"
auto-direct-node-routes: "false"
blacklist-conflicting-routes: "false"
bpf-ct-global-any-max: "262144"
bpf-ct-global-tcp-max: "524288"
bpf-lb-algorithm: random
@ -41,6 +40,8 @@ data:
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
cluster-name: default
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
debug: "false"
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
@ -57,6 +58,7 @@ data:
enable-remote-node-identity: "true"
enable-service-topology: "false"
enable-unreachable-routes: "false"
eni-tags: KubernetesCluster=privateciliumadvanced.example.com
etcd-config: |-
---
endpoints:
@ -69,16 +71,20 @@ data:
identity-change-grace-period: 5s
install-iptables-rules: "true"
ipam: eni
kube-proxy-replacement: strict
kube-proxy-replacement: "true"
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
monitor-aggregation: medium
nodes-gc-interval: 5m0s
preallocate-bpf-maps: "false"
remove-cilium-node-taints: "true"
routing-mode: native
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
tofqdns-dns-reject-response-code: refused
tofqdns-enable-poller: "false"
tunnel: disabled
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
kind: ConfigMap
metadata:
creationTimestamp: null
@ -98,6 +104,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
rules:
@ -145,7 +152,6 @@ rules:
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
@ -153,6 +159,10 @@ rules:
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
@ -192,6 +202,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
verbs:
- patch
@ -204,6 +215,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
rules:
@ -215,6 +227,21 @@ rules:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@ -226,8 +253,16 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
@ -235,7 +270,6 @@ rules:
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@ -323,7 +357,6 @@ rules:
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
@ -332,6 +365,10 @@ rules:
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
resources:
- customresourcedefinitions
verbs:
@ -340,10 +377,17 @@ rules:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
@ -368,6 +412,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium
roleRef:
@ -388,6 +433,7 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-operator
roleRef:
@ -401,6 +447,51 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: cilium
role.kubernetes.io/networking: "1"
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -408,6 +499,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
@ -422,6 +515,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
@ -469,14 +564,34 @@ spec:
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.13.5
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --cni-exclusive=true
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
@ -495,6 +610,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
ports: null
readinessProbe:
failureThreshold: 3
httpGet:
@ -505,7 +621,6 @@ spec:
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
@ -526,12 +641,14 @@ spec:
port: 9879
scheme: HTTP
periodSeconds: 2
successThreshold: null
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/etc/cni/net.d
@ -553,25 +670,78 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.5
- cilium
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- /init-container.sh
@ -588,26 +758,43 @@ spec:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.13.5
- name: KUBERNETES_SERVICE_HOST
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.14.3
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@ -616,6 +803,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@ -624,14 +813,18 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /run/cilium/cgroupv2
type: Directory
name: cilium-cgroup
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
@ -644,7 +837,7 @@ spec:
type: FileOrCreate
name: xtables-lock
- configMap:
defaultMode: 420
defaultMode: 256
items:
- key: etcd-config
path: etcd.config
@ -655,10 +848,22 @@ spec:
type: Directory
name: etcd-secrets
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- configMap:
name: cilium-config
name: cilium-config-path
@ -674,6 +879,8 @@ metadata:
labels:
addon.kops.k8s.io/name: networking.cilium.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
@ -694,6 +901,8 @@ spec:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
kops.k8s.io/managed-by: kops
name: cilium-operator
@ -736,7 +945,7 @@ spec:
value: api.internal.privateciliumadvanced.example.com
- name: KUBERNETES_SERVICE_PORT
value: "443"
image: quay.io/cilium/operator:v1.13.5
image: quay.io/cilium/operator:v1.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -1065,7 +1065,7 @@ resource "aws_s3_object" "privateciliumadvanced-example-com-addons-limit-range-a
resource "aws_s3_object" "privateciliumadvanced-example-com-addons-networking-cilium-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content")
key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml"
key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml"
provider = aws.files
server_side_encryption = "AES256"
}

View File

@ -35,7 +35,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error {
klog.Infof("found cilium (%q) in addons; won't use builtin", key)
} else {
id := "k8s-1.16"
location := key + "/" + id + "-v1.13.yaml"
location := key + "/" + id + "-v1.14.yaml"
addon := &api.AddonSpec{
Name: fi.PtrTo(key),

View File

@ -98,8 +98,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -112,8 +112,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -169,8 +169,8 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.13.yaml
manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab
manifest: networking.cilium.io/k8s-1.16-v1.14.yaml
manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183
name: networking.cilium.io
needsRollingUpdate: all
selector: