Merge pull request #8318 from johngmyers/trim-addons

Remove addons only applicable to unsupported versions of Kubernetes
This commit is contained in:
Kubernetes Prow Robot 2020-01-27 00:19:02 -08:00 committed by GitHub
commit 0c2c2e2e1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 68 additions and 3552 deletions

View File

@ -1,39 +0,0 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: dns-controller
namespace: kube-system
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.0-alpha.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.0-alpha.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
spec:
nodeSelector:
kubernetes.io/role: master
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
containers:
- name: dns-controller
image: kope/dns-controller:1.17.0-alpha.1
command:
{{ range $arg := DnsControllerArgv }}
- "{{ $arg }}"
{{ end }}
resources:
requests:
cpu: 50m
memory: 50Mi

View File

@ -1,39 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: external-dns
namespace: kube-system
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
spec:
replicas: 1
selector:
matchLabels:
k8s-app: external-dns
template:
metadata:
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
spec:
nodeSelector:
kubernetes.io/role: master
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4
args:
{{ range $arg := ExternalDnsArgv }}
- "{{ $arg }}"
{{ end }}
resources:
requests:
cpu: 50m
memory: 50Mi

View File

@ -1,226 +0,0 @@
# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: aws-node
rules:
- apiGroups:
- crd.k8s.amazonaws.com
resources:
- "*"
- namespaces
verbs:
- "*"
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: ["extensions"]
resources:
- daemonsets
verbs: ["list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: aws-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node
subjects:
- kind: ServiceAccount
name: aws-node
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: aws-node
namespace: kube-system
labels:
k8s-app: aws-node
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: aws-node
template:
metadata:
labels:
k8s-app: aws-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: aws-node
hostNetwork: true
tolerations:
- operator: Exists
containers:
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}"
ports:
- containerPort: 61678
name: metrics
name: aws-node
env:
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: AWS_VPC_K8S_CNI_LOGLEVEL
value: DEBUG
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /host/var/log
name: log-dir
- mountPath: /var/run/docker.sock
name: dockersock
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: log-dir
hostPath:
path: /var/log
- name: dockersock
hostPath:
path: /var/run/docker.sock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
spec:
scope: Cluster
group: crd.k8s.amazonaws.com
version: v1alpha1
names:
plural: eniconfigs
singular: eniconfig
kind: ENIConfig
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""

View File

@ -1,109 +0,0 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: flannel
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"forceAddress": true,
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "{{ FlannelBackendType }}"
}
}
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
tier: node
app: flannel
role.kubernetes.io/networking: "1"
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- "/opt/bin/flanneld"
- "--ip-masq"
- "--kube-subnet-mgr"
- "--iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
memory: 100Mi
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
resources:
limits:
cpu: 10m
memory: 25Mi
requests:
cpu: 10m
memory: 25Mi
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -1,40 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kopeio-networking-agent
namespace: kube-system
labels:
k8s-addon: networking.kope.io
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
name: kopeio-networking-agent
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- resources:
requests:
cpu: 50m
memory: 100Mi
limits:
memory: 100Mi
securityContext:
privileged: true
image: kopeio/networking-agent:1.0.20181028
name: networking-agent
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
volumes:
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -1,373 +0,0 @@
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
# Allow the pod to run on all nodes. This is required
# for cluster communication
- effect: NoSchedule
operator: Exists
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
# Calico Roles
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalbgppeers
verbs:
- get
- list
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
- globalbgpconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["alpha.projectcalico.org"]
resources:
- systemnetworkpolicies
verbs:
- get
- list
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

View File

@ -1,456 +0,0 @@
# Canal w/ Calico Version v2.6.2
# https://docs.projectcalico.org/v2.6/releases#v2.6.2
# This manifest includes the following component versions:
# calico/node:v2.6.2
# calico/cni:v1.11.0
# coreos/flannel:v0.9.0 (bug with v0.9.1: https://github.com/kubernetes/kops/issues/4037)
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
# Allow the pod to run on all nodes. This is required
# for cluster communication
- effect: NoSchedule
operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.7
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 50m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.2
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
resources:
limits:
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
# Create all the CustomResourceDefinitions needed for
# Calico policy-only mode.
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
# Calico Roles
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

View File

@ -1,215 +0,0 @@
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key": "CriticalAddonsOnly", "operator": "Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config

View File

@ -1,523 +0,0 @@
{{- $etcd_scheme := EtcdScheme }}
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# etcd servers
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}}
{{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
{{- end }}"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
{{- if eq $etcd_scheme "https" }}
"etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem",
"etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem",
"etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem",
"etcd_scheme": "https",
{{- end }}
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: calico
namespace: kube-system
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: calico
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.9
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Auto-detect the BGP IP address.
- name: IP
value: ""
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to the desired level
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.5
resources:
requests:
cpu: 10m
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The name of calico config file
- name: CNI_CONF_NAME
value: 10-calico.conf
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
---
# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
# be removed entirely once the new kube-controllers deployment has been deployed above.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# Turn this deployment off in favor of the kube-controllers deployment above.
replicas: 0
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
hostNetwork: true
serviceAccountName: calico
containers:
- name: calico-policy-controller
# This shouldn't get updated, since this is the last version we shipped that should be used.
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
volumeMounts:
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.4
resources:
requests:
cpu: 10m
env:
# By default only policy, profile, workloadendpoint are turned
# on, node controller will decommission nodes that do not exist anymore
# this and CALICO_K8S_NODE_REF in calico-node fixes #3224, but invalid nodes that are
# already registered in calico needs to be deleted manually, see
# https://docs.projectcalico.org/v2.6/usage/decommissioning-a-node
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
volumeMounts:
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""
{{- end -}}

View File

@ -1,272 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The calico-etcd PetSet service IP:port
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}}
http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
{{- end }}"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.0
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
# Auto-detect the BGP IP address.
- name: IP
value: ""
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
resources:
requests:
cpu: 10m
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
role.kubernetes.io/networking: "1"
spec:
# The policy controller can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.7.0
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
---
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
kubernetes.io/role: master
{{- end -}}

View File

@ -1,241 +0,0 @@
{{- if WeaveSecret }}
apiVersion: v1
kind: Secret
metadata:
name: weave-net
namespace: kube-system
stringData:
network-password: {{ WeaveSecret }}
---
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
name: weave-net
role.kubernetes.io/networking: "1"
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
{{- if .Networking.Weave.NetExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NetExtraArgs }}"
{{- end }}
{{- if WeaveSecret }}
- name: WEAVE_PASSWORD
valueFrom:
secretKeyRef:
name: weave-net
key: network-password
{{- end }}
image: 'weaveworks/weave-kube:2.3.0'
ports:
- name: metrics
containerPort: 6782
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.3.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }}
memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.NPCCPULimit }}
cpu: {{ .Networking.Weave.NPCCPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }}
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
updateStrategy:
type: RollingUpdate

View File

@ -1,258 +0,0 @@
{{- if WeaveSecret }}
apiVersion: v1
kind: Secret
metadata:
name: weave-net
namespace: kube-system
stringData:
network-password: {{ WeaveSecret }}
---
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- 'networking.k8s.io'
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
# Wait 5 seconds to let pod connect before rolling next pod
minReadySeconds: 5
template:
metadata:
labels:
name: weave-net
role.kubernetes.io/networking: "1"
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
{{- if .Networking.Weave.NetExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NetExtraArgs }}"
{{- end }}
{{- if WeaveSecret }}
- name: WEAVE_PASSWORD
valueFrom:
secretKeyRef:
name: weave-net
key: network-password
{{- end }}
image: 'weaveworks/weave-kube:2.6.0'
ports:
- name: metrics
containerPort: 6782
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
args: []
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.6.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }}
memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.NPCCPULimit }}
cpu: {{ .Networking.Weave.NPCCPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
updateStrategy:
type: RollingUpdate

View File

@ -1,129 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
spec:
template:
metadata:
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"},{"key":"CriticalAddonsOnly", "operator":"Exists"}]
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
image: 'weaveworks/weave-kube:2.3.0'
ports:
- name: metrics
containerPort: 6782
livenessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.3.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
memory: 200Mi
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -1,138 +0,0 @@
# ------------------------------------------
# Config Map
# ------------------------------------------
apiVersion: v1
kind: ConfigMap
metadata:
name: spotinst-kubernetes-cluster-controller-config
namespace: kube-system
data:
spotinst.token: {{ SpotinstToken }}
spotinst.account: {{ SpotinstAccount }}
spotinst.cluster-identifier: {{ ClusterName }}
---
# ------------------------------------------
# Secret
# ------------------------------------------
apiVersion: v1
kind: Secret
metadata:
name: spotinst-kubernetes-cluster-controller-certs
namespace: kube-system
type: Opaque
---
# ------------------------------------------
# Service Account
# ------------------------------------------
apiVersion: v1
kind: ServiceAccount
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Cluster Role
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims", "namespaces"]
verbs: ["get", "delete", "list", "patch", "update"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get","list","patch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get","list"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["patch", "update", "escalate"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["list"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["list"]
- nonResourceURLs: ["/version/", "/version"]
verbs: ["get"]
---
# ------------------------------------------
# Cluster Role Binding
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: spotinst-kubernetes-cluster-controller
subjects:
- kind: ServiceAccount
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Deployment
# ------------------------------------------
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
template:
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
spec:
containers:
- name: spotinst-kubernetes-cluster-controller
imagePullPolicy: Always
image: spotinst/kubernetes-cluster-controller:1.0.39
volumeMounts:
- name: spotinst-kubernetes-cluster-controller-certs
mountPath: /certs
livenessProbe:
httpGet:
path: /healthcheck
port: 4401
initialDelaySeconds: 300
periodSeconds: 30
env:
- name: SPOTINST_TOKEN
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.token
- name: SPOTINST_ACCOUNT
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.account
- name: CLUSTER_IDENTIFIER
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.cluster-identifier
volumes:
- name: spotinst-kubernetes-cluster-controller-certs
secret:
secretName: spotinst-kubernetes-cluster-controller-certs
serviceAccountName: spotinst-kubernetes-cluster-controller
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---

View File

@ -1,24 +0,0 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: default
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: gp2
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2

View File

@ -1,13 +0,0 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
k8s-addon: storage-gce.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard

View File

@ -238,20 +238,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "kube-dns.addons.k8s.io"
version := "1.14.13-kops.1"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -261,7 +247,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -296,7 +282,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -345,12 +331,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
id := "k8s-1.8"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -368,12 +353,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
id := "k8s-1.9"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -398,20 +382,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "dns-controller.addons.k8s.io"
version := "1.17.0-alpha.1"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -421,7 +391,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -447,20 +417,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "external-dns.addons.k8s.io"
version := "0.4.4"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -470,7 +426,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -518,21 +474,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.15.0",
Id: id,
})
}
{
id := "v1.6.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.7.0",
KubernetesVersion: "<1.15.0",
Id: id,
})
}
@ -547,12 +489,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -561,31 +502,16 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "storage-gce.addons.k8s.io"
version := "1.7.0"
{
id := "v1.6.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.7.0",
Id: id,
})
}
{
id := "v1.7.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -593,33 +519,17 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if featureflag.Spotinst.Enabled() {
key := "spotinst-kubernetes-cluster-controller.addons.k8s.io"
{
id := "v1.8.0"
location := key + "/" + id + ".yaml"
version := "1.0.39"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.9.0",
Id: id,
})
}
{
id := "v1.9.0"
location := key + "/" + id + ".yaml"
version := "1.0.39"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
@ -662,20 +572,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "networking.kope.io"
version := "1.0.20181028-kops.1"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -685,7 +581,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -708,53 +604,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Weave != nil {
key := "networking.weave"
versions := map[string]string{
"pre-k8s-1.6": "2.3.0-kops.3",
"k8s-1.6": "2.3.0-kops.3",
"k8s-1.7": "2.6.0-kops.2",
"k8s-1.8": "2.6.0-kops.2",
"k8s-1.12": "2.6.0-kops.3",
}
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
}
{
location := key + "/k8s-1.7.yaml"
id := "k8s-1.7"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.8.0",
Id: id,
})
"k8s-1.8": "2.6.0-kops.2",
"k8s-1.12": "2.6.0-kops.3",
}
{
@ -766,7 +617,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -789,23 +640,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Flannel != nil {
key := "networking.flannel"
versions := map[string]string{
"pre-k8s-1.6": "0.11.0-kops.1",
"k8s-1.6": "0.11.0-kops.2",
"k8s-1.12": "0.11.0-kops.3",
}
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
"k8s-1.6": "0.11.0-kops.2",
"k8s-1.12": "0.11.0-kops.3",
}
{
@ -817,7 +653,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -840,12 +676,10 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Calico != nil {
key := "networking.projectcalico.org"
versions := map[string]string{
"pre-k8s-1.6": "2.4.2-kops.1",
"k8s-1.6": "2.6.9-kops.1",
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.2",
"k8s-1.12": "3.9.3-kops.2",
"k8s-1.16": "3.10.2-kops.1",
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.2",
"k8s-1.12": "3.9.3-kops.2",
"k8s-1.16": "3.10.2-kops.1",
}
{
@ -886,39 +720,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
} else {
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
}
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
@ -928,7 +734,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -938,53 +744,9 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Canal != nil {
key := "networking.projectcalico.org.canal"
versions := map[string]string{
"pre-k8s-1.6": "2.4.2-kops.2",
"k8s-1.6": "2.4.2-kops.2",
"k8s-1.8": "2.6.7-kops.3",
"k8s-1.9": "3.2.3-kops.1",
"k8s-1.12": "3.7.4-kops.1",
"k8s-1.15": "3.10.2-kops.1",
}
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.8.0",
Id: id,
})
}
{
id := "k8s-1.8"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.9.0",
Id: id,
})
"k8s-1.9": "3.2.3-kops.1",
"k8s-1.12": "3.7.4-kops.1",
"k8s-1.15": "3.10.2-kops.1",
}
{
id := "k8s-1.9"
@ -995,7 +757,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1040,7 +802,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1073,7 +835,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1097,26 +859,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "networking.amazon-vpc-routed-eni"
versions := map[string]string{
"k8s-1.7": "1.5.0-kops.1",
"k8s-1.8": "1.5.0-kops.1",
"k8s-1.10": "1.5.0-kops.1",
"k8s-1.12": "1.5.5-kops.1",
}
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.8.0",
Id: id,
})
}
{
id := "k8s-1.8"
location := key + "/" + id + ".yaml"
@ -1126,7 +873,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.10.0",
KubernetesVersion: "<1.10.0",
Id: id,
})
}
@ -1173,7 +920,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1209,7 +956,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: authenticationSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1310,7 +1057,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}

View File

@ -18,16 +18,8 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
@ -43,7 +35,6 @@ spec:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -97,31 +79,15 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.8.0'
manifest: networking.amazon-vpc-routed-eni/k8s-1.7.yaml
manifestHash: 394edf46a78e6d1f6dda920b0214afcd4ce34bc3
name: networking.amazon-vpc-routed-eni
selector:
role.kubernetes.io/networking: "1"
version: 1.5.0-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0 <1.10.0'
kubernetesVersion: <1.10.0
manifest: networking.amazon-vpc-routed-eni/k8s-1.8.yaml
manifestHash: 544fd24d754b32e8896dba6113f1053a4ba86694
name: networking.amazon-vpc-routed-eni

View File

@ -18,16 +18,8 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 90f1e4bedea6da183eb4c6788879f7297119ff3e
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: c74ca65f461c764fc9682c6d9ec171b241bec335
name: kube-dns.addons.k8s.io
@ -43,7 +35,6 @@ spec:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -97,23 +79,15 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: networking.cilium.io/k8s-1.7.yaml
manifestHash: 2d40b9ab7453b4a0a413196fae4c8bdcd62c69ce
name: networking.cilium.io

View File

@ -18,16 +18,8 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
@ -43,7 +35,6 @@ spec:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -97,18 +79,10 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0

View File

@ -18,16 +18,8 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
@ -43,7 +35,6 @@ spec:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -97,47 +79,15 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: networking.weave/pre-k8s-1.6.yaml
manifestHash: 8e7a361fff381e0ed84e0011506ff3bfdc7bc202
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.3.0-kops.3
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.7.0'
manifest: networking.weave/k8s-1.6.yaml
manifestHash: 3f021695840729640da3910d8da357e905d3450c
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.3.0-kops.3
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.8.0'
manifest: networking.weave/k8s-1.7.yaml
manifestHash: 990772f9809ffb0cff4ea9341a9ab7e9094d7587
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.6.0-kops.2
- id: k8s-1.8
kubernetesVersion: '>=1.8.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: networking.weave/k8s-1.8.yaml
manifestHash: 50a20409003956b7c31a479408ca42ec97774854
name: networking.weave