kops/upup/models/bindata.go

16212 lines
536 KiB
Go

// Code generated for package models by go-bindata DO NOT EDIT. (@generated)
// sources:
// upup/models/cloudup/resources/addons/OWNERS
// upup/models/cloudup/resources/addons/anonymous-issuer-discovery.addons.k8s.io/k8s-1.16.yaml.template
// upup/models/cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml
// upup/models/cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template
// upup/models/cloudup/resources/addons/core.addons.k8s.io/addon.yaml
// upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml
// upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template
// upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/README.md
// upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template
// upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
// upup/models/cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml
// upup/models/cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml
// upup/models/cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml
// upup/models/cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml
// upup/models/cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template
// upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template
// upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template
// upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml
// upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template
// upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template
// upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template
// upup/models/cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template
// upup/models/cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template
// upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template
// upup/models/cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml
// upup/models/cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml
// upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.14.0.yaml.template
// upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template
// upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml
// upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml
// upup/models/cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml
// upup/models/cloudup/resources/addons/storage-openstack.addons.k8s.io/k8s-1.16.yaml.template
package models
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _cloudupResourcesAddonsOwners = []byte(`# See the OWNERS docs at https://go.k8s.io/owners
labels:
- area/addons
`)
func cloudupResourcesAddonsOwnersBytes() ([]byte, error) {
return _cloudupResourcesAddonsOwners, nil
}
func cloudupResourcesAddonsOwners() (*asset, error) {
bytes, err := cloudupResourcesAddonsOwnersBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/OWNERS", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: anonymous-access.addons.k8s.io
name: anonymous:service-account-issuer-discovery
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:service-account-issuer-discovery
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:anonymous`)
func cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/anonymous-issuer-discovery.addons.k8s.io/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate = []byte(`---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: iamidentitymappings.iamauthenticator.k8s.aws
spec:
group: iamauthenticator.k8s.aws
version: v1alpha1
scope: Cluster
names:
plural: iamidentitymappings
singular: iamidentitymapping
kind: IAMIdentityMapping
categories:
- all
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- arn
- username
properties:
arn:
type: string
username:
type: string
groups:
type: array
items:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: aws-iam-authenticator
rules:
- apiGroups:
- iamauthenticator.k8s.aws
resources:
- iamidentitymappings
verbs:
- get
- list
- watch
- apiGroups:
- iamauthenticator.k8s.aws
resources:
- iamidentitymappings/status
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- update
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- aws-auth
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-iam-authenticator
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: aws-iam-authenticator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-iam-authenticator
subjects:
- kind: ServiceAccount
name: aws-iam-authenticator
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: kube-system
name: aws-iam-authenticator
labels:
k8s-app: aws-iam-authenticator
annotations:
seccomp.security.alpha.kubernetes.io/pod: runtime/default
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: aws-iam-authenticator
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: aws-iam-authenticator
spec:
# use service account with access to
serviceAccountName: aws-iam-authenticator
# run on the host network (don't depend on CNI)
hostNetwork: true
# run on each master node
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists
# run ` + "`" + `aws-iam-authenticator server` + "`" + ` with three volumes
# - config (mounted from the ConfigMap at /etc/aws-iam-authenticator/config.yaml)
# - state (persisted TLS certificate and keys, mounted from the host)
# - output (output kubeconfig to plug into your apiserver configuration, mounted from the host)
containers:
- name: aws-iam-authenticator
image: {{ or .Authentication.Aws.Image "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-iam-authenticator:v0.5.2-debian-stretch" }}
args:
- server
{{- if or (not .Authentication.Aws.BackendMode) (contains "MountedFile" .Authentication.Aws.BackendMode) }}
- --config=/etc/aws-iam-authenticator/config.yaml
{{- end }}
{{- if or .Authentication.Aws.ClusterID (and .Authentication.Aws.BackendMode (not (contains "MountedFile" .Authentication.Aws.BackendMode)))}}
- --cluster-id={{ or .Authentication.Aws.ClusterID ClusterName }}
{{- end }}
- --state-dir=/var/aws-iam-authenticator
- --kubeconfig-pregenerated=true
{{- if .Authentication.Aws.BackendMode }}
- --backend-mode={{ .Authentication.Aws.BackendMode }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
resources:
requests:
memory: {{ or .Authentication.Aws.MemoryRequest "20Mi" }}
cpu: {{ or .Authentication.Aws.CPURequest "10m" }}
limits:
memory: {{ or .Authentication.Aws.MemoryLimit "20Mi" }}
cpu: {{ or .Authentication.Aws.CPULimit "100m" }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 21362
scheme: HTTPS
volumeMounts:
{{- if or (not .Authentication.Aws.BackendMode) (contains "MountedFile" .Authentication.Aws.BackendMode) }}
- name: config
mountPath: /etc/aws-iam-authenticator/
{{- end }}
- name: state
mountPath: /var/aws-iam-authenticator/
- name: output
mountPath: /etc/kubernetes/aws-iam-authenticator/
volumes:
{{- if or (not .Authentication.Aws.BackendMode) (contains "MountedFile" .Authentication.Aws.BackendMode) }}
- name: config
configMap:
name: aws-iam-authenticator
{{- end }}
- name: output
hostPath:
path: /srv/kubernetes/aws-iam-authenticator/
- name: state
hostPath:
path: /srv/kubernetes/aws-iam-authenticator/
`)
func cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml = []byte(`apiVersion: v1
kind: Namespace
metadata:
name: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
---
apiVersion: v1
kind: Service
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
selector:
app: auth-api
ports:
- port: 443
targetPort: 9002
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
selector:
matchLabels:
app: auth-api
template:
metadata:
labels:
app: auth-api
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: auth-api
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: auth-api
image: kopeio/auth-api:1.0.20171125
imagePullPolicy: Always
ports:
- containerPort: 9001
command:
- /auth-api
- --listen=127.0.0.1:9001
- --secure-port=9002
- --etcd-servers=http://127.0.0.1:4001
- --v=8
- --storage-backend=etcd2
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.auth.kope.io
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
insecureSkipTLSVerify: true
group: auth.kope.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.config.auth.kope.io
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
insecureSkipTLSVerify: true
group: config.auth.kope.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kopeio-auth:auth-api:auth-reader
namespace: kube-system
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kopeio-auth:system:auth-delegator
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
rules:
- apiGroups: ["auth.kope.io"]
resources: ["users"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: auth-api
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth
`)
func cloudupResourcesAddonsAuthenticationKopeIoK8s112YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml, nil
}
func cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsAuthenticationKopeIoK8s112YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate = []byte(`{{ with .ClusterAutoscaler }}
# Sourced from https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws/examples
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["watch", "list", "get"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create","list","watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
app: cluster-autoscaler
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
spec:
serviceAccountName: cluster-autoscaler
tolerations:
- operator: "Exists"
key: node-role.kubernetes.io/master
nodeSelector:
node-role.kubernetes.io/master: ""
containers:
- image: {{ .Image }}
name: cluster-autoscaler
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- ./cluster-autoscaler
- --balance-similar-node-groups={{ .BalanceSimilarNodeGroups }}
- --cloud-provider={{ $.CloudProvider }}
- --expander={{ .Expander }}
{{ range $name, $spec := GetNodeInstanceGroups }}
- --nodes={{ $spec.MinSize }}:{{ $spec.MaxSize }}:{{ $name }}.{{ ClusterName }}
{{ end }}
- --scale-down-utilization-threshold={{ .ScaleDownUtilizationThreshold }}
- --skip-nodes-with-local-storage={{ .SkipNodesWithLocalStorage }}
- --skip-nodes-with-system-pods={{ .SkipNodesWithSystemPods }}
- --stderrthreshold=info
- --v=2
ports:
- containerPort: 8085
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /health-check
port: 8085
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
{{ end }}`)
func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, nil
}
func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml = []byte(`kind: Addons
metadata:
name: core
spec:
addons:
- version: 1.4.0
selector:
k8s-addon: core.addons.k8s.io
manifest: v1.4.0.yaml
`)
func cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, nil
}
func cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: cloud-controller-manager
name: cloud-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: cloud-controller-manager
template:
metadata:
labels:
k8s-app: cloud-controller-manager
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
serviceAccountName: cloud-controller-manager
containers:
- name: cloud-controller-manager
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
# this can be replaced with any other image for out-of-tree providers
image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work?
command:
- /usr/local/bin/cloud-controller-manager
- --cloud-provider={{ .CloudProvider }}
- --leader-elect=true
- --use-service-account-credentials
# these flags will vary for every cloud provider
- --allocate-node-cidrs=true
- --configure-cloud-routes=true
- --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }}
volumeMounts:
- name: ca-certificates
mountPath: /etc/ssl/certs
hostNetwork: true
dnsPolicy: Default
volumes:
- name: ca-certificates
hostPath:
path: /etc/ssl/certs
tolerations:
# this is required so CCM can bootstrap itself
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# this is to have the daemonset runnable on master nodes
# the taint may vary depending on your cluster setup
- key: node-role.kubernetes.io/master
effect: NoSchedule
# this is to restrict CCM to only run on master nodes
# the node selector may vary depending on your cluster setup
- key: "CriticalAddonsOnly"
operator: "Exists"
`)
func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml = []byte(`---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
`)
func cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, nil
}
func cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-addon: coredns.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
k8s-addon: coredns.addons.k8s.io
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
k8s-addon: coredns.addons.k8s.io
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{{- if KubeDNS.ExternalCoreFile }}
{{ KubeDNS.ExternalCoreFile | indent 4 }}
{{- else }}
.:53 {
errors
health {
lameduck 5s
}
kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
loop
cache 30
loadbalance
reload
}
{{- end }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: autoscaler
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
serviceAccountName: coredns-autoscaler
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 10%
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.7.0{{ end }}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: {{ KubeDNS.MemoryLimit }}
requests:
cpu: {{ KubeDNS.CPURequest }}
memory: {{ KubeDNS.MemoryRequest }}
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
# Without this resourceVersion value, an update of the Service between versions will yield:
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
resourceVersion: "0"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ KubeDNS.ServerIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-addon: coredns.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kube-dns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
minAvailable: 1
`)
func cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate = []byte(`---
apiVersion: v1
kind: Secret
metadata:
name: digitalocean
namespace: kube-system
stringData:
# insert your DO access token here
access-token: {{ DO_TOKEN }}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: digitalocean-cloud-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: digitalocean-cloud-controller-manager
template:
metadata:
labels:
k8s-app: digitalocean-cloud-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccountName: cloud-controller-manager
dnsPolicy: Default
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
containers:
- image: digitalocean/digitalocean-cloud-controller-manager:v0.1.24
name: digitalocean-cloud-controller-manager
command:
- "/bin/digitalocean-cloud-controller-manager"
- "--leader-elect=true"
resources:
requests:
cpu: 100m
memory: 50Mi
env:
- name: KUBERNETES_SERVICE_HOST
value: "127.0.0.1"
- name: KUBERNETES_SERVICE_PORT
value: "443"
- name: DO_ACCESS_TOKEN
valueFrom:
secretKeyRef:
name: digitalocean
key: access-token
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- create
- update
- delete
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
`)
func cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate, nil
}
func cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate = []byte(`kind: Deployment
apiVersion: apps/v1
metadata:
name: dns-controller
namespace: kube-system
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.19.0-beta.2
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.19.0-beta.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
serviceAccount: dns-controller
containers:
- name: dns-controller
image: k8s.gcr.io/kops/dns-controller:1.19.0-beta.2
command:
{{ range $arg := DnsControllerArgv }}
- "{{ $arg }}"
{{ end }}
env:
- name: KUBERNETES_SERVICE_HOST
value: "127.0.0.1"
{{- if .EgressProxy }}
{{ range $name, $value := ProxyEnv }}
- name: {{ $name }}
value: {{ $value }}
{{ end }}
{{- end }}
{{- if eq .CloudProvider "digitalocean" }}
- name: DIGITALOCEAN_ACCESS_TOKEN
valueFrom:
secretKeyRef:
name: digitalocean
key: access-token
{{- end }}
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dns-controller
namespace: kube-system
labels:
k8s-addon: dns-controller.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller
`)
func cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd = []byte(`# ExternalDNS
ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers.
## What it does
Inspired by [Kubernetes DNS](https://github.com/kubernetes/dns), Kubernetes' cluster-internal DNS server, ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the [Kubernetes API](https://kubernetes.io/docs/api/) to determine a desired list of DNS records. *Unlike* KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly—e.g. [AWS Route 53](https://aws.amazon.com/route53/) or [Google CloudDNS](https://cloud.google.com/dns/docs/).
In a broader sense, ExternalDNS allows you to control DNS records dynamically via Kubernetes resources in a DNS provider-agnostic way.
## Deploying to a Cluster
The following tutorials are provided:
* [AWS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md)
* [Azure](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/azure.md)
* [Cloudflare](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/cloudflare.md)
* [DigitalOcean](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/digitalocean.md)
* Google Container Engine
* [Using Google's Default Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/gke.md)
* [Using the Nginx Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/nginx-ingress.md)
* [FAQ](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/faq.md)
## Github repository
Source code is managed under kubernetes-incubator at [external-dns](https://github.com/kubernetes-incubator/external-dns).`)
func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes() ([]byte, error) {
return _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, nil
}
func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd() (*asset, error) {
bytes, err := cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/external-dns.addons.k8s.io/README.md", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: kube-system
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
spec:
replicas: 1
selector:
matchLabels:
k8s-app: external-dns
template:
metadata:
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
serviceAccount: external-dns
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4
args:
{{ range $arg := ExternalDnsArgv }}
- "{{ $arg }}"
{{ end }}
resources:
requests:
cpu: 50m
memory: 50Mi
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: kube-system
labels:
k8s-addon: external-dns.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: external-dns.addons.k8s.io
name: kops:external-dns
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- list
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: external-dns.addons.k8s.io
name: kops:external-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: kube-system
`)
func cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate = []byte(`apiVersion: v1
kind: ConfigMap
metadata:
name: kops-controller
namespace: kube-system
labels:
k8s-addon: kops-controller.addons.k8s.io
data:
config.yaml: |
{{ KopsControllerConfig }}
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kops-controller
namespace: kube-system
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.19.0-beta.2
spec:
selector:
matchLabels:
k8s-app: kops-controller
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.19.0-beta.2
{{ if UseKopsControllerForNodeBootstrap }}
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.{{ ClusterName }}
{{ end }}
spec:
priorityClassName: system-node-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
kops.k8s.io/kops-controller-pki: ""
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
serviceAccount: kops-controller
containers:
- name: kops-controller
image: k8s.gcr.io/kops/kops-controller:1.19.0-beta.2
volumeMounts:
{{ if .UseHostCertificates }}
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
{{ end }}
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
command:
{{ range $arg := KopsControllerArgv }}
- "{{ $arg }}"
{{ end }}
{{- if KopsSystemEnv }}
env:
{{ range $var := KopsSystemEnv }}
- name: {{ $var.Name }}
value: {{ $var.Value }}
{{ end }}
{{- end }}
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
volumes:
{{ if .UseHostCertificates }}
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: etc-ssl-certs
{{ end }}
- name: kops-controller-config
configMap:
name: kops-controller
- name: kops-controller-pki
hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kops-controller
namespace: kube-system
labels:
k8s-addon: kops-controller.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- kops-controller-leader
verbs:
- get
- list
- watch
- patch
- update
- delete
# Workaround for https://github.com/kubernetes/kubernetes/issues/80295
# We can't restrict creation of objects by name
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
`)
func cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate = []byte(`# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if or (.KubeDNS.UpstreamNameservers) (.KubeDNS.StubDomains) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
{{- if .KubeDNS.UpstreamNameservers }}
upstreamNameservers: |
{{ ToJSON .KubeDNS.UpstreamNameservers }}
{{- end }}
{{- if .KubeDNS.StubDomains }}
stubDomains: |
{{ ToJSON .KubeDNS.StubDomains }}
{{- end }}
---
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: autoscaler
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
serviceAccountName: kube-dns-autoscaler
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
prometheus.io/scrape: 'true'
prometheus.io/port: '10055'
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
dnsPolicy: Default # Don't use cluster DNS.
priorityClassName: system-cluster-critical
serviceAccountName: kube-dns
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns:1.15.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --config-dir=/kube-dns-config
- --dns-port=10053
- --domain={{ KubeDNS.Domain }}.
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size={{ KubeDNS.CacheMaxSize }}
- --dns-forward-max={{ KubeDNS.CacheMaxConcurrent }}
- --no-negcache
- --log-facility=-
- --server=/{{ KubeDNS.Domain }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/in6.arpa/127.0.0.1#10053
- --min-port=1024
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar:1.15.13
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
# Without this resourceVersion value, an update of the Service between versions will yield:
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
resourceVersion: "0"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ KubeDNS.ServerIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-addon: kube-dns.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kube-dns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
minAvailable: 1
`)
func cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml = []byte(`---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
# TODO: perhaps change the client cerificate, place into a group and using a group selector instead?
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api
`)
func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, nil
}
func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml = []byte(`kind: Addons
metadata:
name: limit-range
spec:
addons:
- version: 1.5.0
selector:
k8s-addon: limit-range.addons.k8s.io
manifest: v1.5.0.yaml
`)
func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml, nil
}
func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml = []byte(`apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"
`)
func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml, nil
}
func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml = []byte(`kind: Addons
metadata:
name: metadata-proxy
spec:
addons:
- version: 0.1.12
selector:
k8s-addon: metadata-proxy.addons.k8s.io
manifest: v0.12.yaml
`)
func cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml, nil
}
func cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml = []byte(`# Borrowed from https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/metadata-proxy
apiVersion: v1
kind: ServiceAccount
metadata:
name: metadata-proxy
namespace: kube-system
labels:
k8s-app: metadata-proxy
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: metadata-proxy-v0.12
namespace: kube-system
labels:
k8s-app: metadata-proxy
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.12
spec:
selector:
matchLabels:
k8s-app: metadata-proxy
version: v0.12
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: metadata-proxy
kubernetes.io/cluster-service: "true"
version: v0.12
spec:
priorityClassName: system-node-critical
serviceAccountName: metadata-proxy
hostNetwork: true
dnsPolicy: Default
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
hostNetwork: true
initContainers:
- name: update-ipdtables
securityContext:
privileged: true
image: gcr.io/google_containers/k8s-custom-iptables:1.0
imagePullPolicy: Always
command: [ "/bin/sh", "-c", "/sbin/iptables -t nat -I PREROUTING -p tcp --dport 80 -d 169.254.169.254 -j DNAT --to-destination 127.0.0.1:988" ]
volumeMounts:
- name: host
mountPath: /host
volumes:
- name: host
hostPath:
path: /
type: Directory
containers:
- name: metadata-proxy
image: k8s.gcr.io/metadata-proxy:v0.1.12
securityContext:
privileged: true
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "25Mi"
cpu: "30m"
limits:
memory: "25Mi"
cpu: "30m"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.5.0
# Request and limit resources to get guaranteed QoS.
resources:
requests:
memory: "20Mi"
cpu: "2m"
limits:
memory: "20Mi"
cpu: "2m"
command:
- /monitor
- --stackdriver-prefix=custom.googleapis.com/addons
- --source=metadata_proxy:http://127.0.0.1:989?whitelisted=request_count
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD
nodeSelector:
cloud.google.com/metadata-proxy-ready: "true"
beta.kubernetes.io/os: linux
terminationGracePeriodSeconds: 30
`)
func cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml, nil
}
func cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate = []byte(`# sourced from https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: {{ or .MetricsServer.Image "k8s.gcr.io/metrics-server/metrics-server:v0.3.7" }}
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
{{ if not UseKopsControllerForNodeBootstrap }}
- --kubelet-insecure-tls
{{ end }}
ports:
- name: main-port
containerPort: 4443
protocol: TCP
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: tmp-dir
mountPath: /tmp
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: main-port
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server`)
func cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate, nil
}
func cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate = []byte(`# Vendored from https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.5.5/config/v1.5/aws-k8s-cni.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aws-node
rules:
- apiGroups:
- crd.k8s.amazonaws.com
resources:
- "*"
- namespaces
verbs:
- "*"
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: ["extensions"]
resources:
- daemonsets
verbs: ["list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aws-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node
subjects:
- kind: ServiceAccount
name: aws-node
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: aws-node
namespace: kube-system
labels:
k8s-app: aws-node
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: aws-node
template:
metadata:
labels:
k8s-app: aws-node
spec:
priorityClassName: system-node-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "beta.kubernetes.io/os"
operator: In
values:
- linux
- key: "beta.kubernetes.io/arch"
operator: In
values:
- amd64
serviceAccountName: aws-node
hostNetwork: true
tolerations:
- operator: Exists
containers:
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5" }}"
imagePullPolicy: Always
ports:
- containerPort: 61678
name: metrics
name: aws-node
env:
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: AWS_VPC_K8S_CNI_LOGLEVEL
value: DEBUG
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- range .Networking.AmazonVPC.Env }}
- name: {{ .Name }}
value: "{{ .Value }}"
{{- end }}
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /host/var/log
name: log-dir
- mountPath: /var/run/docker.sock
name: dockersock
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: log-dir
hostPath:
path: /var/log
- name: dockersock
hostPath:
path: /var/run/docker.sock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
spec:
scope: Cluster
group: crd.k8s.amazonaws.com
versions:
- name: v1alpha1
served: true
storage: true
names:
plural: eniconfigs
singular: eniconfig
kind: ENIConfig
`)
func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate = []byte(`# Vendored from https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/release-1.7/config/v1.7/aws-k8s-cni.yaml
---
"apiVersion": "rbac.authorization.k8s.io/v1"
"kind": "ClusterRoleBinding"
"metadata":
"name": "aws-node"
"roleRef":
"apiGroup": "rbac.authorization.k8s.io"
"kind": "ClusterRole"
"name": "aws-node"
"subjects":
- "kind": "ServiceAccount"
"name": "aws-node"
"namespace": "kube-system"
---
"apiVersion": "rbac.authorization.k8s.io/v1"
"kind": "ClusterRole"
"metadata":
"name": "aws-node"
"rules":
- "apiGroups":
- "crd.k8s.amazonaws.com"
"resources":
- "eniconfigs"
"verbs":
- "get"
- "list"
- "watch"
- "apiGroups":
- ""
"resources":
- "pods"
- "namespaces"
"verbs":
- "list"
- "watch"
- "get"
- "apiGroups":
- ""
"resources":
- "nodes"
"verbs":
- "list"
- "watch"
- "get"
- "update"
- "apiGroups":
- "extensions"
- "apps"
"resources":
- "*"
"verbs":
- "list"
- "watch"
---
"apiVersion": "apiextensions.k8s.io/v1beta1"
"kind": "CustomResourceDefinition"
"metadata":
"name": "eniconfigs.crd.k8s.amazonaws.com"
"spec":
"group": "crd.k8s.amazonaws.com"
"names":
"kind": "ENIConfig"
"plural": "eniconfigs"
"singular": "eniconfig"
"scope": "Cluster"
"versions":
- "name": "v1alpha1"
"served": true
"storage": true
---
"apiVersion": "apps/v1"
"kind": "DaemonSet"
"metadata":
"labels":
"k8s-app": "aws-node"
"name": "aws-node"
"namespace": "kube-system"
"spec":
"selector":
"matchLabels":
"k8s-app": "aws-node"
"template":
"metadata":
"labels":
"k8s-app": "aws-node"
"spec":
"affinity":
"nodeAffinity":
"requiredDuringSchedulingIgnoredDuringExecution":
"nodeSelectorTerms":
- "matchExpressions":
- "key": "kubernetes.io/os"
"operator": "In"
"values":
- "linux"
- "key": "kubernetes.io/arch"
"operator": "In"
"values":
- "amd64"
- "arm64"
- "key": "eks.amazonaws.com/compute-type"
"operator": "NotIn"
"values":
- "fargate"
"containers":
- "env":
- "name": "ADDITIONAL_ENI_TAGS"
"value": "{}"
- "name": "AWS_VPC_CNI_NODE_PORT_SUPPORT"
"value": "true"
- "name": "AWS_VPC_ENI_MTU"
"value": "9001"
- "name": "AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER"
"value": "false"
- "name": "AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG"
"value": "false"
- "name": "AWS_VPC_K8S_CNI_EXTERNALSNAT"
"value": "false"
- "name": "AWS_VPC_K8S_CNI_LOGLEVEL"
"value": "DEBUG"
- "name": "AWS_VPC_K8S_CNI_LOG_FILE"
"value": "/host/var/log/aws-routed-eni/ipamd.log"
- "name": "AWS_VPC_K8S_CNI_RANDOMIZESNAT"
"value": "prng"
- "name": "AWS_VPC_K8S_CNI_VETHPREFIX"
"value": "eni"
- "name": "AWS_VPC_K8S_PLUGIN_LOG_FILE"
"value": "/var/log/aws-routed-eni/plugin.log"
- "name": "AWS_VPC_K8S_PLUGIN_LOG_LEVEL"
"value": "DEBUG"
- "name": "DISABLE_INTROSPECTION"
"value": "false"
- "name": "DISABLE_METRICS"
"value": "false"
- "name": "ENABLE_POD_ENI"
"value": "false"
- "name": "MY_NODE_NAME"
"valueFrom":
"fieldRef":
"fieldPath": "spec.nodeName"
- "name": "WARM_ENI_TARGET"
"value": "1"
- "name": "CLUSTER_NAME"
"value": "{{ ClusterName }}"
{{- range .Networking.AmazonVPC.Env }}
- "name": "{{ .Name }}"
"value": "{{ .Value }}"
{{- end }}
"image": "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.7.5" }}"
"imagePullPolicy": "Always"
"livenessProbe":
"exec":
"command":
- "/app/grpc-health-probe"
- "-addr=:50051"
"initialDelaySeconds": 60
"name": "aws-node"
"ports":
- "containerPort": 61678
"name": "metrics"
"readinessProbe":
"exec":
"command":
- "/app/grpc-health-probe"
- "-addr=:50051"
"initialDelaySeconds": 1
"resources":
"requests":
"cpu": "10m"
"securityContext":
"capabilities":
"add":
- "NET_ADMIN"
"volumeMounts":
- "mountPath": "/host/opt/cni/bin"
"name": "cni-bin-dir"
- "mountPath": "/host/etc/cni/net.d"
"name": "cni-net-dir"
- "mountPath": "/host/var/log/aws-routed-eni"
"name": "log-dir"
- "mountPath": "/var/run/aws-node"
"name": "run-dir"
- "mountPath": "/var/run/dockershim.sock"
"name": "dockershim"
- "mountPath": "/run/xtables.lock"
"name": "xtables-lock"
"hostNetwork": true
"initContainers":
- "env":
- "name": "DISABLE_TCP_EARLY_DEMUX"
"value": "false"
"image": "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni-init:v1.7.5"
"imagePullPolicy": "Always"
"name": "aws-vpc-cni-init"
"securityContext":
"privileged": true
"volumeMounts":
- "mountPath": "/host/opt/cni/bin"
"name": "cni-bin-dir"
"priorityClassName": "system-node-critical"
"serviceAccountName": "aws-node"
"terminationGracePeriodSeconds": 10
"tolerations":
- "operator": "Exists"
"volumes":
- "hostPath":
"path": "/opt/cni/bin"
"name": "cni-bin-dir"
- "hostPath":
"path": "/etc/cni/net.d"
"name": "cni-net-dir"
- "hostPath":
"path": "/var/run/dockershim.sock"
"name": "dockershim"
- "hostPath":
"path": "/run/xtables.lock"
"name": "xtables-lock"
- "hostPath":
"path": "/var/log/aws-routed-eni"
"type": "DirectoryOrCreate"
"name": "log-dir"
- "hostPath":
"path": "/var/run/aws-node"
"type": "DirectoryOrCreate"
"name": "run-dir"
"updateStrategy":
"rollingUpdate":
"maxUnavailable": "10%"
"type": "RollingUpdate"
---
"apiVersion": "v1"
"kind": "ServiceAccount"
"metadata":
"name": "aws-node"
"namespace": "kube-system"
...
`)
func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate = []byte(`{{- if CiliumSecret }}
apiVersion: v1
kind: Secret
metadata:
name: cilium-ipsec-keys
namespace: kube-system
stringData:
{{ CiliumSecret }}
---
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
{{ with .Networking.Cilium }}
{{- if .EtcdManaged }}
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
etcd-config: |-
---
endpoints:
- https://{{ $.MasterInternalName }}:4003
trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt'
key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key'
cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt'
{{ end }}
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
identity-allocation-mode: crd
# If you want to run cilium in debug mode change this value to true
debug: "{{ .Debug }}"
{{ if .EnablePrometheusMetrics }}
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":{{ .AgentPrometheusPort }}"
operator-prometheus-serve-addr: ":6942"
enable-metrics: "true"
{{ end }}
{{ if .EnableEncryption }}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
{{ end }}
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ .MonitorAggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}"
bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "{{ .Tunnel }}"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "{{ .ClusterName }}"
# DNS response code for rejecting DNS requests,
# available options are "nameError" and "refused"
tofqdns-dns-reject-response-code: "{{ .ToFqdnsDNSRejectResponseCode }}"
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "{{- if .ToFqdnsEnablePoller -}}true{{- else -}}false{{- end -}}"
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
# Enable fetching of container-runtime specific metadata
#
# By default, the Kubernetes pod and namespace labels are retrieved and
# associated with endpoints for identification purposes. By integrating
# with the container runtime, container runtime specific labels can be
# retrieved, such labels will be prefixed with container:
#
# CAUTION: The container runtime labels can include information such as pod
# annotations which may result in each pod being associated a unique set of
# labels which can result in excessive security identities being allocated.
# Please review the labels filter when enabling container runtime labels.
#
# Supported values:
# - containerd
# - crio
# - docker
# - none
# - auto (automatically detect the container runtime)
#
container-runtime: "{{ .ContainerRuntimeLabels }}"
masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}"
install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}"
auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}"
enable-node-port: "{{ .EnableNodePort }}"
kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}"
enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity -}}"
{{ with .Ipam }}
ipam: {{ . }}
{{ if eq . "eni" }}
enable-endpoint-routes: "true"
auto-create-cilium-node-resource: "true"
blacklist-conflicting-routes: "false"
{{ end }}
{{ end }}
{{ if WithDefaultBool .Hubble.Enabled false }}
# Enable Hubble gRPC service.
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
{{ if .Hubble.Metrics }}
hubble-metrics-server: ":9091"
hubble-metrics:
{{- range .Hubble.Metrics }}
{{ . }}
{{- end }}
{{ end }}
{{ end }}
{{ end }} # With .Networking.Cilium end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- nodes
- endpoints
- componentstatuses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to automatically read from k8s and import the node's pod CIDR to cilium's
# etcd so all nodes know how to reach another pod running in a different
# node.
- nodes
# to perform the translation of a CNP that contains ` + "`" + `ToGroup` + "`" + ` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumidentities/status
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
name: cilium
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
template:
metadata:
annotations:
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- cilium
topologyKey: kubernetes.io/hostname
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE
valueFrom:
configMapKeyRef:
key: cni-chaining-mode
name: cilium-config
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom:
configMapKeyRef:
key: custom-cni-conf
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "{{.MasterInternalName}}"
- name: KUBERNETES_SERVICE_PORT
value: "443"
{{ with .Networking.Cilium.EnablePolicy }}
- name: CILIUM_ENABLE_POLICY
value: {{ . }}
{{ end }}
{{ with .Networking.Cilium }}
image: "docker.io/cilium/cilium:{{ .Version }}"
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: 9876
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 10
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
{{ if or .EnablePrometheusMetrics .Hubble.Metrics }}
ports:
{{ if .EnablePrometheusMetrics }}
- containerPort: {{ .AgentPrometheusPort }}
name: prometheus
protocol: TCP
{{ end }}
{{- if .Hubble.Metrics }}
- containerPort: 9091
hostPort: 9091
name: hubble-metrics
protocol: TCP
{{- end }}
{{ end }}
readinessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: 9876
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
mountPropagation: HostToContainer
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
{{ if .EtcdManaged }}
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
{{ end }}
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
# Needed to be able to load kernel modules
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
{{ if CiliumSecret }}
- mountPath: /etc/ipsec
name: cilium-ipsec-secrets
{{ end }}
hostNetwork: true
initContainers:
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
image: "docker.io/cilium/cilium:{{ .Version }}"
## end of ` + "`" + `with .Networking.Cilium` + "`" + `
#{{ end }}
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
memory: 100Mi
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
# To keep state between restarts / upgrades for bpf maps
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
# To install cilium cni plugin in the host
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
# To install cilium cni configuration in the host
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
# To be able to load kernel modules
- hostPath:
path: /lib/modules
name: lib-modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
# To read the clustermesh configuration
{{- if .Networking.Cilium.EtcdManaged }}
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the Cilium etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: /etc/kubernetes/pki/cilium
type: Directory
{{- end }}
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
# To read the configuration from the config map
- configMap:
name: cilium-config
name: cilium-config-path
{{ if CiliumSecret }}
- name: cilium-ipsec-secrets
secret:
secretName: cilium-ipsec-keys
{{ end }}
updateStrategy:
type: OnDelete
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
name: cilium-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator
env:
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "{{.MasterInternalName}}"
- name: KUBERNETES_SERVICE_PORT
value: "443"
{{ with .Networking.Cilium }}
image: "docker.io/cilium/operator:{{ .Version }}"
imagePullPolicy: IfNotPresent
name: cilium-operator
{{ if .EnablePrometheusMetrics }}
ports:
- containerPort: 6942
hostPort: 6942
name: prometheus
protocol: TCP
{{ end }}
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
{{- if .EtcdManaged }}
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
{{- end }}
hostNetwork: true
priorityClassName: system-cluster-critical
restartPolicy: Always
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
volumes:
# To read the configuration from the config map
- configMap:
name: cilium-config
name: cilium-config-path
{{- if .EtcdManaged }}
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: /etc/kubernetes/pki/cilium
type: Directory
{{- end }}
{{ if eq .Ipam "eni" }}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
{{ end }}
{{ end }}
{{ if WithDefaultBool .Networking.Cilium.Hubble.Enabled false }}
---
# Source: cilium/charts/hubble-relay/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
k8s-app: hubble-relay
spec:
type: ClusterIP
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
port: 80
targetPort: 4245
---
# Source: cilium/charts/hubble-relay/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
k8s-app: hubble-relay
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
template:
metadata:
labels:
k8s-app: hubble-relay
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "k8s-app"
operator: In
values:
- cilium
topologyKey: "kubernetes.io/hostname"
containers:
- name: hubble-relay
image: "docker.io/cilium/hubble-relay:{{ .Networking.Cilium.Version }}"
imagePullPolicy: IfNotPresent
command:
- "hubble-relay"
args:
- "serve"
- "--peer-service=unix:///var/run/cilium/hubble.sock"
- "--listen-address=:4245"
ports:
- name: grpc
containerPort: 4245
readinessProbe:
tcpSocket:
port: grpc
livenessProbe:
tcpSocket:
port: grpc
volumeMounts:
- mountPath: /var/run/cilium
name: hubble-sock-dir
readOnly: true
restartPolicy: Always
terminationGracePeriodSeconds: 0
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/run/cilium
type: Directory
name: hubble-sock-dir
{{ end }}`)
func cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate = []byte(`{{- if CiliumSecret }}
apiVersion: v1
kind: Secret
metadata:
name: cilium-ipsec-keys
namespace: kube-system
stringData:
{{ CiliumSecret }}
---
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
{{ with .Networking.Cilium }}
{{- if .EtcdManaged }}
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
etcd-config: |-
---
endpoints:
- https://{{ $.MasterInternalName }}:4003
trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt'
key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key'
cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt'
{{ end }}
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
identity-allocation-mode: crd
# If you want to run cilium in debug mode change this value to true
debug: "{{ .Debug }}"
{{ if .EnablePrometheusMetrics }}
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":{{ .AgentPrometheusPort }}"
{{ end }}
{{ if .EnableEncryption }}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
{{ end }}
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ .MonitorAggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}"
bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "{{ .Tunnel }}"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "{{ .ClusterName }}"
# DNS response code for rejecting DNS requests,
# available options are "nameError" and "refused"
tofqdns-dns-reject-response-code: "{{ .ToFqdnsDNSRejectResponseCode }}"
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "{{- if .ToFqdnsEnablePoller -}}true{{- else -}}false{{- end -}}"
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
# Enable fetching of container-runtime specific metadata
#
# By default, the Kubernetes pod and namespace labels are retrieved and
# associated with endpoints for identification purposes. By integrating
# with the container runtime, container runtime specific labels can be
# retrieved, such labels will be prefixed with container:
#
# CAUTION: The container runtime labels can include information such as pod
# annotations which may result in each pod being associated a unique set of
# labels which can result in excessive security identities being allocated.
# Please review the labels filter when enabling container runtime labels.
#
# Supported values:
# - containerd
# - crio
# - docker
# - none
# - auto (automatically detect the container runtime)
#
container-runtime: "{{ .ContainerRuntimeLabels }}"
masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}"
install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}"
auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}"
enable-node-port: "{{ .EnableNodePort }}"
kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}"
enable-remote-node-identity: "{{- if .EnableRemoteNodeIdentity -}}true{{- else -}}false{{- end -}}"
{{ with .Ipam }}
ipam: {{ . }}
{{ if eq . "eni" }}
enable-endpoint-routes: "true"
auto-create-cilium-node-resource: "true"
blacklist-conflicting-routes: "false"
{{ else if eq . "hostscope" }}
k8s-require-ipv4-pod-cidr: "true"
{{ end }}
{{ end }}
{{ end }} # With .Networking.Cilium end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- nodes
- endpoints
- componentstatuses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumidentities/status
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to automatically read from k8s and import the node's pod CIDR to cilium's
# etcd so all nodes know how to reach another pod running in a different
# node.
- nodes
# to perform the translation of a CNP that contains ` + "`" + `ToGroup` + "`" + ` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumidentities/status
verbs:
- '*'
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
role.kubernetes.io/networking: "1"
name: cilium
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
template:
metadata:
annotations:
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
spec:
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE
valueFrom:
configMapKeyRef:
key: cni-chaining-mode
name: cilium-config
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom:
configMapKeyRef:
key: custom-cni-conf
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "{{.MasterInternalName}}"
- name: KUBERNETES_SERVICE_PORT
value: "443"
{{ with .Networking.Cilium.EnablePolicy }}
- name: CILIUM_ENABLE_POLICY
value: {{ . }}
{{ end }}
{{ with .Networking.Cilium }}
image: "docker.io/cilium/cilium:{{ .Version }}"
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
exec:
command:
- cilium
- status
- --brief
failureThreshold: 10
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
{{ if .EnablePrometheusMetrics }}
ports:
- containerPort: {{ .AgentPrometheusPort }}
hostPort: {{ .AgentPrometheusPort }}
name: prometheus
protocol: TCP
{{ end }}
readinessProbe:
exec:
command:
- cilium
- status
- --brief
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
mountPropagation: HostToContainer
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
{{ if .EtcdManaged }}
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
{{ end }}
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
# Needed to be able to load kernel modules
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
{{ if CiliumSecret }}
- mountPath: /etc/ipsec
name: cilium-ipsec-secrets
{{ end }}
hostNetwork: true
initContainers:
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
image: "docker.io/cilium/cilium:{{ .Version }}"
## end of ` + "`" + `with .Networking.Cilium` + "`" + `
#{{ end }}
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
# To keep state between restarts / upgrades for bpf maps
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
# To install cilium cni plugin in the host
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
# To install cilium cni configuration in the host
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
# To be able to load kernel modules
- hostPath:
path: /lib/modules
name: lib-modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
# To read the clustermesh configuration
{{- if .Networking.Cilium.EtcdManaged }}
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the Cilium etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: /etc/kubernetes/pki/cilium
type: Directory
{{- end }}
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
# To read the configuration from the config map
- configMap:
name: cilium-config
name: cilium-config-path
{{ if CiliumSecret }}
- name: cilium-ipsec-secrets
secret:
secretName: cilium-ipsec-keys
{{ end }}
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
role.kubernetes.io/networking: "1"
name: cilium-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --identity-allocation-mode=$(CILIUM_IDENTITY_ALLOCATION_MODE)
{{ with .Networking.Cilium }}
{{ if .EnablePrometheusMetrics }}
- --enable-metrics
{{ end }}
{{ end }}
command:
- cilium-operator
env:
- name: CILIUM_IDENTITY_ALLOCATION_MODE
valueFrom:
configMapKeyRef:
key: identity-allocation-mode
name: cilium-config
optional: true
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_IPAM
valueFrom:
configMapKeyRef:
key: ipam
name: cilium-config
optional: true
- name: CILIUM_DISABLE_ENDPOINT_CRD
valueFrom:
configMapKeyRef:
key: disable-endpoint-crd
name: cilium-config
optional: true
- name: CILIUM_KVSTORE
valueFrom:
configMapKeyRef:
key: kvstore
name: cilium-config
optional: true
- name: CILIUM_KVSTORE_OPT
valueFrom:
configMapKeyRef:
key: kvstore-opt
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "{{.MasterInternalName}}"
- name: KUBERNETES_SERVICE_PORT
value: "443"
{{ with .Networking.Cilium }}
image: "docker.io/cilium/operator:{{ .Version }}"
imagePullPolicy: IfNotPresent
name: cilium-operator
{{ if .EnablePrometheusMetrics }}
ports:
- containerPort: 6942
hostPort: 6942
name: prometheus
protocol: TCP
{{ end }}
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
{{- if .EtcdManaged }}
volumeMounts:
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
{{- end }}
hostNetwork: true
priorityClassName: system-cluster-critical
restartPolicy: Always
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
{{- if .EtcdManaged }}
volumes:
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: /etc/kubernetes/pki/cilium
type: Directory
{{- end }}
{{ if eq .Ipam "eni" }}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
{{ end }}
{{ end }}
`)
func cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate = []byte(`# Pulled and modified from: https://raw.githubusercontent.com/coreos/flannel/v0.13.0/Documentation/kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/dev/net"
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
k8s-app: flannel
tier: node
app: flannel
role.kubernetes.io/networking: "1"
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "{{ FlannelBackendType }}"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
k8s-app: flannel
tier: node
app: flannel
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
tier: node
app: flannel
role.kubernetes.io/networking: "1"
template:
metadata:
labels:
tier: node
app: flannel
role.kubernetes.io/networking: "1"
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: dev-net
mountPath: /dev/net
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: dev-net
hostPath:
path: /dev/net
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
`)
func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml = []byte(`apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kopeio-networking-agent
namespace: kube-system
labels:
k8s-addon: networking.kope.io
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
name: kopeio-networking-agent
role.kubernetes.io/networking: "1"
template:
metadata:
labels:
name: kopeio-networking-agent
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- resources:
requests:
cpu: 50m
memory: 100Mi
limits:
memory: 100Mi
securityContext:
privileged: true
image: kopeio/networking-agent:1.0.20181028
name: networking-agent
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
serviceAccountName: kopeio-networking-agent
priorityClassName: system-node-critical
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kopeio-networking-agent
namespace: kube-system
labels:
k8s-addon: networking.kope.io
role.kubernetes.io/networking: "1"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
k8s-addon: networking.kope.io
name: kopeio:networking-agent
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: networking.kope.io
name: kopeio:networking-agent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kopeio:networking-agent
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kopeio-networking-agent
`)
func cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, nil
}
func cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate = []byte(`# Pulled and modified from https://raw.githubusercontent.com/cloudnativelabs/kube-router/v1.0.1/daemonset/kubeadm-kuberouter.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
},
{
"type": "portmap",
"capabilities": {
"snat": true,
"portMappings": true
}
}
]
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-router
tier: node
template:
metadata:
labels:
k8s-app: kube-router
tier: node
spec:
priorityClassName: system-node-critical
serviceAccountName: kube-router
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router:v1.0.1
args:
- --run-router=true
- --run-firewall=true
- --run-service-proxy=true
- --bgp-graceful-restart=true
- --kubeconfig=/var/lib/kube-router/kubeconfig
- --metrics-port=12013
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 100m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router/kubeconfig
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
initContainers:
- name: install-cni
image: docker.io/cloudnativelabs/kube-router:v1.0.1
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
hostNetwork: true
tolerations:
- operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
hostPath:
path: /var/lib/kube-router/kubeconfig
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system
- kind: User
name: system:kube-router
`)
func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.9/manifests/calico-typha.yaml
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# You must set a non-zero value for Typha replicas below.
typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use
{{- if .Networking.Calico.MTU }}
veth_mtu: "{{ .Networking.Calico.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
{{ if .Networking.Calico.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Calico.TyphaReplicas "0" }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
template:
metadata:
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
priorityClassName: system-cluster-critical
containers:
- image: calico/typha:v3.9.6
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
# Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end -}}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: calico/cni:v3.9.6
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.9.6
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.9.6
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.9.6
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
# was value: "k8s,bgp"
value: "kops,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "{{- or .Networking.Calico.IPv4AutoDetectionMethod "first-found" }}"
- name: IP6_AUTODETECTION_METHOD
value: "{{- or .Networking.Calico.IPv6AutoDetectionMethod "first-found" }}"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}} {{- or .Networking.Calico.IPIPMode "Always" -}} {{- end -}}"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within ` + "`" + `--cluster-cidr` + "`" + `.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
# Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to the desired level
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Calico.IptablesBackend "Legacy" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Calico.CPURequest "100m" }}
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.9.6
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.2
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"
type: Directory
nodeSelector:
node-role.kubernetes.io/master: ""
{{- end -}}
`)
func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.17/manifests/calico-typha.yaml
{{- if .Networking.Calico.BPFEnabled }}
---
# Set these to the IP and port of your API server; In BPF mode, we need to connect directly to the
# API server because we take over kube-proxy's role.
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: kube-system
data:
KUBERNETES_SERVICE_HOST: "{{ .MasterInternalName }}"
KUBERNETES_SERVICE_PORT: "443"
{{- end }}
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# You must set a non-zero value for Typha replicas below.
typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
{{- if .Networking.Calico.MTU }}
veth_mtu: "{{ .Networking.Calico.MTU }}"
{{- else }}
veth_mtu: "0"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
listKind: BGPConfigurationList
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: BGPConfiguration contains the configuration for any BGP routing.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPConfigurationSpec contains the values of the BGP configuration.
properties:
asNumber:
description: 'ASNumber is the default AS number used by a node. [Default:
64512]'
format: int32
type: integer
communities:
description: Communities is a list of BGP community values and their
arbitrary names for tagging routes.
items:
description: Community contains standard or large community value
and its name.
properties:
name:
description: Name given to community value.
type: string
value:
description: Value must be of format ` + "`" + `aa:nn` + "`" + ` or ` + "`" + `aa:nn:mm` + "`" + `.
For standard community use ` + "`" + `aa:nn` + "`" + ` format, where ` + "`" + `aa` + "`" + ` and
` + "`" + `nn` + "`" + ` are 16 bit number. For large community use ` + "`" + `aa:nn:mm` + "`" + `
format, where ` + "`" + `aa` + "`" + `, ` + "`" + `nn` + "`" + ` and ` + "`" + `mm` + "`" + ` are 32 bit number. Where,
` + "`" + `aa` + "`" + ` is an AS Number, ` + "`" + `nn` + "`" + ` and ` + "`" + `mm` + "`" + ` are per-AS identifier.
pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
type: string
type: object
type: array
listenPort:
description: ListenPort is the port where BGP protocol should listen.
Defaults to 179
maximum: 65535
minimum: 1
type: integer
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: INFO]'
type: string
nodeToNodeMeshEnabled:
description: 'NodeToNodeMeshEnabled sets whether full node to node
BGP mesh is enabled. [Default: true]'
type: boolean
prefixAdvertisements:
description: PrefixAdvertisements contains per-prefix advertisement
configuration.
items:
description: PrefixAdvertisement configures advertisement properties
for the specified CIDR.
properties:
cidr:
description: CIDR for which properties should be advertised.
type: string
communities:
description: Communities can be list of either community names
already defined in ` + "`" + `Specs.Communities` + "`" + ` or community value
of format ` + "`" + `aa:nn` + "`" + ` or ` + "`" + `aa:nn:mm` + "`" + `. For standard community use
` + "`" + `aa:nn` + "`" + ` format, where ` + "`" + `aa` + "`" + ` and ` + "`" + `nn` + "`" + ` are 16 bit number. For
large community use ` + "`" + `aa:nn:mm` + "`" + ` format, where ` + "`" + `aa` + "`" + `, ` + "`" + `nn` + "`" + ` and
` + "`" + `mm` + "`" + ` are 32 bit number. Where,` + "`" + `aa` + "`" + ` is an AS Number, ` + "`" + `nn` + "`" + ` and
` + "`" + `mm` + "`" + ` are per-AS identifier.
items:
type: string
type: array
type: object
type: array
serviceClusterIPs:
description: ServiceClusterIPs are the CIDR blocks from which service
cluster IPs are allocated. If specified, Calico will advertise these
blocks, as well as any cluster IPs within them.
items:
description: ServiceClusterIPBlock represents a single allowed ClusterIP
CIDR block.
properties:
cidr:
type: string
type: object
type: array
serviceExternalIPs:
description: ServiceExternalIPs are the CIDR blocks for Kubernetes
Service External IPs. Kubernetes Service ExternalIPs will only be
advertised if they are within one of these blocks.
items:
description: ServiceExternalIPBlock represents a single allowed
External IP CIDR block.
properties:
cidr:
type: string
type: object
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: BGPPeer
listKind: BGPPeerList
plural: bgppeers
singular: bgppeer
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPPeerSpec contains the specification for a BGPPeer resource.
properties:
asNumber:
description: The AS Number of the peer.
format: int32
type: integer
keepOriginalNextHop:
default: false
description: Option to keep the original nexthop field when routes
are sent to a BGP Peer. Setting "true" configures the selected BGP
Peers node to use the "next hop keep;" instead of "next hop self;"(default)
in the specific branch of the Node on "bird.cfg".
type: boolean
node:
description: The node name identifying the Calico node instance that
is targeted by this peer. If this is not set, and no nodeSelector
is specified, then this BGP peer selects all nodes in the cluster.
type: string
nodeSelector:
description: Selector for the nodes that should have this peering. When
this is set, the Node field must be empty.
type: string
password:
description: Optional BGP password for the peerings generated by this
BGPPeer resource.
properties:
secretKeyRef:
description: Selects a key of a secret in the node pod's namespace.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
type: object
peerIP:
description: The IP address of the peer followed by an optional port
number to peer with. If port number is given, format should be ` + "`" + `[<IPv6>]:port` + "`" + `
or ` + "`" + `<IPv4>:<port>` + "`" + ` for IPv4. If optional port number is not set,
and this peer IP and ASNumber belongs to a calico/node with ListenPort
set in BGPConfiguration, then we use that port to peer.
type: string
peerSelector:
description: Selector for the remote nodes to peer with. When this
is set, the PeerIP and ASNumber fields must be empty. For each
peering between the local node and selected remote nodes, we configure
an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
or the global default if that is not set.
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: BlockAffinity
listKind: BlockAffinityList
plural: blockaffinities
singular: blockaffinity
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BlockAffinitySpec contains the specification for a BlockAffinity
resource.
properties:
cidr:
type: string
deleted:
description: Deleted indicates that this block affinity is being deleted.
This field is a string for compatibility with older releases that
mistakenly treat this field as a string.
type: string
node:
type: string
state:
type: string
required:
- cidr
- deleted
- node
- state
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
listKind: ClusterInformationList
plural: clusterinformations
singular: clusterinformation
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ClusterInformation contains the cluster specific information.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ClusterInformationSpec contains the values of describing
the cluster.
properties:
calicoVersion:
description: CalicoVersion is the version of Calico that the cluster
is running
type: string
clusterGUID:
description: ClusterGUID is the GUID of the cluster
type: string
clusterType:
description: ClusterType describes the type of the cluster
type: string
datastoreReady:
description: DatastoreReady is used during significant datastore migrations
to signal to components such as Felix that it should wait before
accessing the datastore.
type: boolean
variant:
description: Variant declares which variant of Calico should be active.
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
listKind: FelixConfigurationList
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: Felix Configuration contains the configuration for Felix.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: FelixConfigurationSpec contains the values of the Felix configuration.
properties:
allowIPIPPacketsFromWorkloads:
description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
will add a rule to drop IPIP encapsulated traffic from workloads
[Default: false]'
type: boolean
allowVXLANPacketsFromWorkloads:
description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
will add a rule to drop VXLAN encapsulated traffic from workloads
[Default: false]'
type: boolean
awsSrcDstCheck:
description: 'Set source-destination-check on AWS EC2 instances. Accepted
value must be one of "DoNothing", "Enabled" or "Disabled". [Default:
DoNothing]'
enum:
- DoNothing
- Enable
- Disable
type: string
bpfConnectTimeLoadBalancingEnabled:
description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
controls whether Felix installs the connection-time load balancer. The
connect-time load balancer is required for the host to be able to
reach Kubernetes services and it improves the performance of pod-to-service
connections. The only reason to disable it is for debugging purposes. [Default:
true]'
type: boolean
bpfDataIfacePattern:
description: BPFDataIfacePattern is a regular expression that controls
which interfaces Felix should attach BPF programs to in order to
catch traffic to/from the network. This needs to match the interfaces
that Calico workload traffic flows over as well as any interfaces
that handle incoming traffic to nodeports and services from outside
the cluster. It should not match the workload interfaces (usually
named cali...).
type: string
bpfDisableUnprivileged:
description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
sysctl to disable unprivileged use of BPF. This ensures that unprivileged
users cannot access Calico''s BPF maps and cannot insert their own
BPF programs to interfere with Calico''s. [Default: true]'
type: boolean
bpfEnabled:
description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
[Default: false]'
type: boolean
bpfExternalServiceMode:
description: 'BPFExternalServiceMode in BPF mode, controls how connections
from outside the cluster to services (node ports and cluster IPs)
are forwarded to remote workloads. If set to "Tunnel" then both
request and response traffic is tunneled to the remote node. If
set to "DSR", the request traffic is tunneled but the response traffic
is sent directly from the remote node. In "DSR" mode, the remote
node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]'
type: string
bpfKubeProxyEndpointSlicesEnabled:
description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
whether Felix's embedded kube-proxy accepts EndpointSlices or not.
type: boolean
bpfKubeProxyIptablesCleanupEnabled:
description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s
iptables chains. Should only be enabled if kube-proxy is not running. [Default:
true]'
type: boolean
bpfKubeProxyMinSyncPeriod:
description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
minimum time between updates to the dataplane for Felix''s embedded
kube-proxy. Lower values give reduced set-up latency. Higher values
reduce Felix CPU usage by batching up more work. [Default: 1s]'
type: string
bpfLogLevel:
description: 'BPFLogLevel controls the log level of the BPF programs
when in BPF dataplane mode. One of "Off", "Info", or "Debug". The
logs are emitted to the BPF trace pipe, accessible with the command
` + "`" + `tc exec bpf debug` + "`" + `. [Default: Off].'
type: string
chainInsertMode:
description: 'ChainInsertMode controls whether Felix hooks the kernel''s
top-level iptables chains by inserting a rule at the top of the
chain or by appending a rule at the bottom. insert is the safe default
since it prevents Calico''s rules from being bypassed. If you switch
to append mode, be sure that the other rules in the chains signal
acceptance by falling through to the Calico rules, otherwise the
Calico policy will be bypassed. [Default: insert]'
type: string
dataplaneDriver:
type: string
debugDisableLogDropping:
type: boolean
debugMemoryProfilePath:
type: string
debugSimulateCalcGraphHangAfter:
type: string
debugSimulateDataplaneHangAfter:
type: string
defaultEndpointToHostAction:
description: 'DefaultEndpointToHostAction controls what happens to
traffic that goes from a workload endpoint to the host itself (after
the traffic hits the endpoint egress policy). By default Calico
blocks traffic from workload endpoints to the host itself with an
iptables “DROP” action. If you want to allow some or all traffic
from endpoint to host, set this parameter to RETURN or ACCEPT. Use
RETURN if you have your own rules in the iptables “INPUT” chain;
Calico will insert its rules at the top of that chain, then “RETURN”
packets to the “INPUT” chain once it has completed processing workload
endpoint egress policy. Use ACCEPT to unconditionally accept packets
from workloads after processing workload endpoint egress policy.
[Default: Drop]'
type: string
deviceRouteProtocol:
description: This defines the route protocol added to programmed device
routes, by default this will be RTPROT_BOOT when left blank.
type: integer
deviceRouteSourceAddress:
description: This is the source address to use on programmed device
routes. By default the source address is left blank, leaving the
kernel to choose the source address used.
type: string
disableConntrackInvalidCheck:
type: boolean
endpointReportingDelay:
type: string
endpointReportingEnabled:
type: boolean
externalNodesList:
description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
which may source tunnel traffic and have the tunneled traffic be
accepted at calico nodes.
items:
type: string
type: array
failsafeInboundHostPorts:
description: 'FailsafeInboundHostPorts is a comma-delimited list of
UDP/TCP ports that Felix will allow incoming traffic to host endpoints
on irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. Each
port should be specified as tcp:<port-number> or udp:<port-number>.
For back-compatibility, if the protocol is not specified, it defaults
to “tcp”. To disable all inbound host ports, use the value none.
The default value allows ssh access and DHCP. [Default: tcp:22,
udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'
items:
description: ProtoPort is combination of protocol and port, both
must be specified.
properties:
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
failsafeOutboundHostPorts:
description: 'FailsafeOutboundHostPorts is a comma-delimited list
of UDP/TCP ports that Felix will allow outgoing traffic from host
endpoints to irrespective of the security policy. This is useful
to avoid accidentally cutting off a host with incorrect configuration.
Each port should be specified as tcp:<port-number> or udp:<port-number>.
For back-compatibility, if the protocol is not specified, it defaults
to “tcp”. To disable all outbound host ports, use the value none.
The default value opens etcd''s standard ports to ensure that Felix
does not get cut off from etcd as well as allowing DHCP and DNS.
[Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667,
udp:53, udp:67]'
items:
description: ProtoPort is combination of protocol and port, both
must be specified.
properties:
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
featureDetectOverride:
description: FeatureDetectOverride is used to override the feature
detection. Values are specified in a comma separated list with no
spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=".
"true" or "false" will force the feature, empty or omitted values
are auto-detected.
type: string
genericXDPEnabled:
description: 'GenericXDPEnabled enables Generic XDP so network cards
that don''t support XDP offload or driver modes can use XDP. This
is not recommended since it doesn''t provide better performance
than iptables. [Default: false]'
type: boolean
healthEnabled:
type: boolean
healthHost:
type: string
healthPort:
type: integer
interfaceExclude:
description: 'InterfaceExclude is a comma-separated list of interfaces
that Felix should exclude when monitoring for host endpoints. The
default value ensures that Felix ignores Kubernetes'' IPVS dummy
interface, which is used internally by kube-proxy. If you want to
exclude multiple interface names using a single value, the list
supports regular expressions. For regular expressions you must wrap
the value with ''/''. For example having values ''/^kube/,veth1''
will exclude all interfaces that begin with ''kube'' and also the
interface ''veth1''. [Default: kube-ipvs0]'
type: string
interfacePrefix:
description: 'InterfacePrefix is the interface name prefix that identifies
workload endpoints and so distinguishes them from host endpoint
interfaces. Note: in environments other than bare metal, the orchestrators
configure this appropriately. For example our Kubernetes and Docker
integrations set the ''cali'' value, and our OpenStack integration
sets the ''tap'' value. [Default: cali]'
type: string
interfaceRefreshInterval:
description: InterfaceRefreshInterval is the period at which Felix
rescans local interfaces to verify their state. The rescan can be
disabled by setting the interval to 0.
type: string
ipipEnabled:
type: boolean
ipipMTU:
description: 'IPIPMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
type: integer
ipsetsRefreshInterval:
description: 'IpsetsRefreshInterval is the period at which Felix re-checks
all iptables state to ensure that no other process has accidentally
broken Calico''s rules. Set to 0 to disable iptables refresh. [Default:
90s]'
type: string
iptablesBackend:
description: IptablesBackend specifies which backend of iptables will
be used. The default is legacy.
type: string
iptablesFilterAllowAction:
type: string
iptablesLockFilePath:
description: 'IptablesLockFilePath is the location of the iptables
lock file. You may need to change this if the lock file is not in
its standard location (for example if you have mapped it into Felix''s
container at a different path). [Default: /run/xtables.lock]'
type: string
iptablesLockProbeInterval:
description: 'IptablesLockProbeInterval is the time that Felix will
wait between attempts to acquire the iptables lock if it is not
available. Lower values make Felix more responsive when the lock
is contended, but use more CPU. [Default: 50ms]'
type: string
iptablesLockTimeout:
description: 'IptablesLockTimeout is the time that Felix will wait
for the iptables lock, or 0, to disable. To use this feature, Felix
must share the iptables lock file with all other processes that
also take the lock. When running Felix inside a container, this
requires the /run directory of the host to be mounted into the calico/node
or calico/felix container. [Default: 0s disabled]'
type: string
iptablesMangleAllowAction:
type: string
iptablesMarkMask:
description: 'IptablesMarkMask is the mask that Felix selects its
IPTables Mark bits from. Should be a 32 bit hexadecimal number with
at least 8 bits set, none of which clash with any other mark bits
in use on the system. [Default: 0xff000000]'
format: int32
type: integer
iptablesNATOutgoingInterfaceFilter:
type: string
iptablesPostWriteCheckInterval:
description: 'IptablesPostWriteCheckInterval is the period after Felix
has done a write to the dataplane that it schedules an extra read
back in order to check the write was not clobbered by another process.
This should only occur if another application on the system doesn''t
respect the iptables lock. [Default: 1s]'
type: string
iptablesRefreshInterval:
description: 'IptablesRefreshInterval is the period at which Felix
re-checks the IP sets in the dataplane to ensure that no other process
has accidentally broken Calico''s rules. Set to 0 to disable IP
sets refresh. Note: the default for this value is lower than the
other refresh intervals as a workaround for a Linux kernel bug that
was fixed in kernel version 4.11. If you are using v4.11 or greater
you may want to set this to, a higher value to reduce Felix CPU
usage. [Default: 10s]'
type: string
ipv6Support:
type: boolean
kubeNodePortRanges:
description: 'KubeNodePortRanges holds list of port ranges used for
service node ports. Only used if felix detects kube-proxy running
in ipvs mode. Felix uses these ranges to separate host and workload
traffic. [Default: 30000:32767].'
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
logFilePath:
description: 'LogFilePath is the full path to the Felix log. Set to
none to disable file logging. [Default: /var/log/calico/felix.log]'
type: string
logPrefix:
description: 'LogPrefix is the log prefix that Felix uses when rendering
LOG rules. [Default: calico-packet]'
type: string
logSeverityFile:
description: 'LogSeverityFile is the log severity above which logs
are sent to the log file. [Default: Info]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
logSeveritySys:
description: 'LogSeveritySys is the log severity above which logs
are sent to the syslog. Set to None for no logging to syslog. [Default:
Info]'
type: string
maxIpsetSize:
type: integer
metadataAddr:
description: 'MetadataAddr is the IP address or domain name of the
server that can answer VM queries for cloud-init metadata. In OpenStack,
this corresponds to the machine running nova-api (or in Ubuntu,
nova-api-metadata). A value of none (case insensitive) means that
Felix should not set up any NAT rule for the metadata path. [Default:
127.0.0.1]'
type: string
metadataPort:
description: 'MetadataPort is the port of the metadata server. This,
combined with global.MetadataAddr (if not ''None''), is used to
set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
In most cases this should not need to be changed [Default: 8775].'
type: integer
mtuIfacePattern:
description: MTUIfacePattern is a regular expression that controls
which interfaces Felix should scan in order to calculate the host's
MTU. This should not match workload interfaces (usually named cali...).
type: string
natOutgoingAddress:
description: NATOutgoingAddress specifies an address to use when performing
source NAT for traffic in a natOutgoing pool that is leaving the
network. By default the address used is an address on the interface
the traffic is leaving on (ie it uses the iptables MASQUERADE target)
type: string
natPortRange:
anyOf:
- type: integer
- type: string
description: NATPortRange specifies the range of ports that is used
for port mapping when doing outgoing NAT. When unset the default
behavior of the network stack is used.
pattern: ^.*
x-kubernetes-int-or-string: true
netlinkTimeout:
type: string
openstackRegion:
description: 'OpenstackRegion is the name of the region that a particular
Felix belongs to. In a multi-region Calico/OpenStack deployment,
this must be configured somehow for each Felix (here in the datamodel,
or in felix.cfg or the environment on each compute node), and must
match the [calico] openstack_region value configured in neutron.conf
on each node. [Default: Empty]'
type: string
policySyncPathPrefix:
description: 'PolicySyncPathPrefix is used to by Felix to communicate
policy changes to external services, like Application layer policy.
[Default: Empty]'
type: string
prometheusGoMetricsEnabled:
description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
prometheusMetricsEnabled:
description: 'PrometheusMetricsEnabled enables the Prometheus metrics
server in Felix if set to true. [Default: false]'
type: boolean
prometheusMetricsHost:
description: 'PrometheusMetricsHost is the host that the Prometheus
metrics server should bind to. [Default: empty]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. [Default: 9091]'
type: integer
prometheusProcessMetricsEnabled:
description: 'PrometheusProcessMetricsEnabled disables process metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
removeExternalRoutes:
description: Whether or not to remove device routes that have not
been programmed by Felix. Disabling this will allow external applications
to also add device routes. This is enabled by default which means
we will remove externally added routes.
type: boolean
reportingInterval:
description: 'ReportingInterval is the interval at which Felix reports
its status into the datastore or 0 to disable. Must be non-zero
in OpenStack deployments. [Default: 30s]'
type: string
reportingTTL:
description: 'ReportingTTL is the time-to-live setting for process-wide
status reports. [Default: 90s]'
type: string
routeRefreshInterval:
description: 'RouteRefreshInterval is the period at which Felix re-checks
the routes in the dataplane to ensure that no other process has
accidentally broken Calico''s rules. Set to 0 to disable route refresh.
[Default: 90s]'
type: string
routeSource:
description: 'RouteSource configures where Felix gets its routing
information. - WorkloadIPs: use workload endpoints to construct
routes. - CalicoIPAM: the default - use IPAM data to construct routes.'
type: string
routeTableRange:
description: Calico programs additional Linux route tables for various
purposes. RouteTableRange specifies the indices of the route tables
that Calico should use.
properties:
max:
type: integer
min:
type: integer
required:
- max
- min
type: object
serviceLoopPrevention:
description: 'When service IP advertisement is enabled, prevent routing
loops to service IPs that are not in use, by dropping or rejecting
packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled",
in which case such routing loops continue to be allowed. [Default:
Drop]'
type: string
sidecarAccelerationEnabled:
description: 'SidecarAccelerationEnabled enables experimental sidecar
acceleration [Default: false]'
type: boolean
usageReportingEnabled:
description: 'UsageReportingEnabled reports anonymous Calico version
number and cluster size to projectcalico.org. Logs warnings returned
by the usage server. For example, if a significant security vulnerability
has been discovered in the version of Calico being used. [Default:
true]'
type: boolean
usageReportingInitialDelay:
description: 'UsageReportingInitialDelay controls the minimum delay
before Felix makes a report. [Default: 300s]'
type: string
usageReportingInterval:
description: 'UsageReportingInterval controls the interval at which
Felix makes reports. [Default: 86400s]'
type: string
useInternalDataplaneDriver:
type: boolean
vxlanEnabled:
type: boolean
vxlanMTU:
description: 'VXLANMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
type: integer
vxlanPort:
type: integer
vxlanVNI:
type: integer
wireguardEnabled:
description: 'WireguardEnabled controls whether Wireguard is enabled.
[Default: false]'
type: boolean
wireguardInterfaceName:
description: 'WireguardInterfaceName specifies the name to use for
the Wireguard interface. [Default: wg.calico]'
type: string
wireguardListeningPort:
description: 'WireguardListeningPort controls the listening port used
by Wireguard. [Default: 51820]'
type: integer
wireguardMTU:
description: 'WireguardMTU controls the MTU on the Wireguard interface.
See Configuring MTU [Default: 1420]'
type: integer
wireguardRoutingRulePriority:
description: 'WireguardRoutingRulePriority controls the priority value
to use for the Wireguard routing rule. [Default: 99]'
type: integer
xdpEnabled:
description: 'XDPEnabled enables XDP acceleration for suitable untracked
incoming deny rules. [Default: true]'
type: boolean
xdpRefreshInterval:
description: 'XDPRefreshInterval is the period at which Felix re-checks
all XDP state to ensure that no other process has accidentally broken
Calico''s BPF maps or attached programs. Set to 0 to disable XDP
refresh. [Default: 90s]'
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
listKind: GlobalNetworkPolicyList
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
applyOnForward:
description: ApplyOnForward indicates to apply the rules in this policy
on forward traffic.
type: boolean
doNotTrack:
description: DoNotTrack indicates whether packets matched by the rules
in this policy should go through the data plane's connection tracking,
such as Linux conntrack. If True, the rules in this policy are
applied before any data plane connection tracking, and packets allowed
by this policy are marked as not to be tracked.
type: boolean
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with ”Not”. All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a ` + "`" + `exact` + "`" + ` or a ` + "`" + `prefix` + "`" + ` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with ”Not”. All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a ` + "`" + `exact` + "`" + ` or a ` + "`" + `prefix` + "`" + ` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
required:
- action
type: object
type: array
namespaceSelector:
description: NamespaceSelector is an optional field for an expression
used to select a pod based on namespaces.
type: string
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
preDNAT:
description: PreDNAT indicates to apply the rules in this policy before
any DNAT.
type: boolean
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress rules are present in the policy. The
default is: \n - [ PolicyTypeIngress ], if there are no Egress rules
(including the case where there are also no Ingress rules) \n
- [ PolicyTypeEgress ], if there are Egress rules but no Ingress
rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are
both Ingress and Egress rules. \n When the policy is read back again,
Types will always be one of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
listKind: GlobalNetworkSetList
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
that share labels to allow rules to refer to them via selectors. The labels
of GlobalNetworkSet are not namespaced.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: GlobalNetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
listKind: HostEndpointList
plural: hostendpoints
singular: hostendpoint
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HostEndpointSpec contains the specification for a HostEndpoint
resource.
properties:
expectedIPs:
description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
If \"InterfaceName\" is not present, Calico will look for an interface
matching any of the IPs in the list and apply policy to that. Note:
\tWhen using the selector match criteria in an ingress or egress
security Policy \tor Profile, Calico converts the selector into
a set of IP addresses. For host \tendpoints, the ExpectedIPs field
is used for that purpose. (If only the interface \tname is specified,
Calico does not learn the IPs of the interface for use in match
\tcriteria.)"
items:
type: string
type: array
interfaceName:
description: "Either \"*\", or the name of a specific Linux interface
to apply policy to; or empty. \"*\" indicates that this HostEndpoint
governs all traffic to, from or through the default network namespace
of the host named by the \"Node\" field; entering and leaving that
namespace via any interface, including those from/to non-host-networked
local workloads. \n If InterfaceName is not \"*\", this HostEndpoint
only governs traffic that enters or leaves the host through the
specific interface named by InterfaceName, or - when InterfaceName
is empty - through the specific interface that has one of the IPs
in ExpectedIPs. Therefore, when InterfaceName is empty, at least
one expected IP must be specified. Only external interfaces (such
as “eth0”) are supported here; it isn't possible for a HostEndpoint
to protect traffic through a specific local workload interface.
\n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints;
initially just pre-DNAT policy. Please check Calico documentation
for the latest position."
type: string
node:
description: The node name identifying the Calico node instance.
type: string
ports:
description: Ports contains the endpoint's named ports, which may
be referenced in security policy rules.
items:
properties:
name:
type: string
port:
type: integer
protocol:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
required:
- name
- port
- protocol
type: object
type: array
profiles:
description: A list of identifiers of security Profile objects that
apply to this endpoint. Each profile is applied in the order that
they appear in this list. Profile rules are applied after the selector-based
security policy.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: IPAMBlock
listKind: IPAMBlockList
plural: ipamblocks
singular: ipamblock
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMBlockSpec contains the specification for an IPAMBlock
resource.
properties:
affinity:
type: string
allocations:
items:
type: integer
# TODO: This nullable is manually added in. We should update controller-gen
# to handle []*int properly itself.
nullable: true
type: array
attributes:
items:
properties:
handle_id:
type: string
secondary:
additionalProperties:
type: string
type: object
type: object
type: array
cidr:
type: string
deleted:
type: boolean
strictAffinity:
type: boolean
unallocated:
items:
type: integer
type: array
required:
- allocations
- attributes
- cidr
- strictAffinity
- unallocated
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: IPAMConfig
listKind: IPAMConfigList
plural: ipamconfigs
singular: ipamconfig
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMConfigSpec contains the specification for an IPAMConfig
resource.
properties:
autoAllocateBlocks:
type: boolean
maxBlocksPerHost:
description: MaxBlocksPerHost, if non-zero, is the max number of blocks
that can be affine to each host.
type: integer
strictAffinity:
type: boolean
required:
- autoAllocateBlocks
- strictAffinity
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: IPAMHandle
listKind: IPAMHandleList
plural: ipamhandles
singular: ipamhandle
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMHandleSpec contains the specification for an IPAMHandle
resource.
properties:
block:
additionalProperties:
type: integer
type: object
deleted:
type: boolean
handleID:
type: string
required:
- block
- handleID
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: IPPool
listKind: IPPoolList
plural: ippools
singular: ippool
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPPoolSpec contains the specification for an IPPool resource.
properties:
blockSize:
description: The block size to use for IP address assignments from
this pool. Defaults to 26 for IPv4 and 112 for IPv6.
type: integer
cidr:
description: The pool CIDR.
type: string
disabled:
description: When disabled is true, Calico IPAM will not assign addresses
from this pool.
type: boolean
ipip:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
properties:
enabled:
description: When enabled is true, ipip tunneling will be used
to deliver packets to destinations within this pool.
type: boolean
mode:
description: The IPIP mode. This can be one of "always" or "cross-subnet". A
mode of "always" will also use IPIP tunneling for routing to
destination IP addresses within this pool. A mode of "cross-subnet"
will only use IPIP tunneling when the destination node is on
a different subnet to the originating node. The default value
(if not specified) is "always".
type: string
type: object
ipipMode:
description: Contains configuration for IPIP tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
is disabled).
type: string
nat-outgoing:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
type: boolean
natOutgoing:
description: When nat-outgoing is true, packets sent from Calico networked
containers in this pool to destinations outside of this pool will
be masqueraded.
type: boolean
nodeSelector:
description: Allows IPPool to allocate for a specific node by label
selector.
type: string
vxlanMode:
description: Contains configuration for VXLAN tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. VXLAN
tunneling is disabled).
type: string
required:
- cidr
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: KubeControllersConfiguration
listKind: KubeControllersConfigurationList
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: KubeControllersConfigurationSpec contains the values of the
Kubernetes controllers configuration.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host endpoints.
Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation of
host endpoints for every node. [Default: Disabled]'
type: string
type: object
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
type: object
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
required:
- controllers
type: object
status:
description: KubeControllersConfigurationStatus represents the status
of the configuration. It's useful for admins to be able to see the actual
config that was applied, which can be modified by environment variables
on the kube-controllers process.
properties:
environmentVars:
additionalProperties:
type: string
description: EnvironmentVars contains the environment variables on
the kube-controllers that influenced the RunningConfig.
type: object
runningConfig:
description: RunningConfig contains the effective config that is running
in the kube-controllers pod, after merging the API resource with
any environment variables.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace
controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host
endpoints. Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation
of host endpoints for every node. [Default: Disabled]'
type: string
type: object
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
type: object
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which
logs are sent to the stdout. [Default: Info]'
type: string
required:
- controllers
type: object
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
listKind: NetworkPolicyList
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with ”Not”. All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a ` + "`" + `exact` + "`" + ` or a ` + "`" + `prefix` + "`" + ` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with ”Not”. All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a ` + "`" + `exact` + "`" + ` or a ` + "`" + `prefix` + "`" + ` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, ` + "`" + `global()` + "`" + ` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label “my_label”. \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label “my_label”.
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
type: object
required:
- action
type: object
type: array
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress are present in the policy. The default
is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including
the case where there are also no Ingress rules) \n - [ PolicyTypeEgress
], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress,
PolicyTypeEgress ], if there are both Ingress and Egress rules.
\n When the policy is read back again, Types will always be one
of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
listKind: NetworkSetList
plural: networksets
singular: networkset
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
{{ if .Networking.Calico.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Calico.TyphaReplicas "0" }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: docker.io/calico/typha:v3.17.0
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.17.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.17.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.17.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.17.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
{{- if .Networking.Calico.TyphaReplicas }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "{{- or .Networking.Calico.IPv4AutoDetectionMethod "first-found" }}"
- name: IP6_AUTODETECTION_METHOD
value: "{{- or .Networking.Calico.IPv6AutoDetectionMethod "first-found" }}"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}} {{- or .Networking.Calico.IPIPMode "Always" -}} {{- end -}}"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within ` + "`" + `--cluster-cidr` + "`" + `.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
# Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Enable source/destination checks for AWS
- name: FELIX_AWSSRCDSTCHECK
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}Disable{{- else -}} {{- or .Networking.Calico.AwsSrcDstCheck "DoNothing" -}} {{- end -}}"
# Enable eBPF dataplane mode
- name: FELIX_BPFENABLED
value: "{{ .Networking.Calico.BPFEnabled }}"
# Controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled
- name: FELIX_BPFEXTERNALSERVICEMODE
value: "{{- or .Networking.Calico.BPFExternalServiceMode "Tunnel" }}"
# Controls whether Felix will clean up the iptables rules created by the Kubernetes kube-proxy
- name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED
value: "{{- .Networking.Calico.BPFKubeProxyIptablesCleanupEnabled }}"
# Controls the log level used by the BPF programs
- name: FELIX_BPFLOGLEVEL
value: "{{- or .Networking.Calico.BPFLogLevel "Off" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Calico.ChainInsertMode "insert" }}"
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Calico.IptablesBackend "Auto" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
# Enable WireGuard encryption for all on-the-wire pod-to-pod traffic
- name: FELIX_WIREGUARDENABLED
value: "{{ .Networking.Calico.WireguardEnabled }}"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Calico.CPURequest "100m" }}
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.17.0
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the "k8s-ec2-srcdst" container, which
# disables source/destination IP address checks for ENIs attached to
# EC2 instances hosting Kubernetes nodes.
#
# Disabling these checks allows Calico to send unencapsulated packets
# to and from pods within the same VPC subnet, where either a given
# packet's source address (originating from a pod) may not match the
# sending machine's address or the destination address (heading to a
# pod) may not match the receiving machine's address.
#
# This only applies for AWS environments.
# This is a deprecated setting, use awsSrcDstCheck instead
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 0
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
priorityClassName: system-cluster-critical
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.3.0
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"
type: Directory
nodeSelector:
node-role.kubernetes.io/master: ""
{{ end -}}
`)
func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.7/manifests/canal.yaml
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# MTU default is 1500, can be overridden
veth_mtu: "{{- or .Networking.Canal.MTU "1500" }}"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"mtu": __CNI_MTU__,
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the canal serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/k8s-manifests/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the Calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: canal
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure canal gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: canal
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.7.5
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.7.5
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "INFO"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
{{- if eq .Networking.Canal.DisableFlannelForwardRules true }}
- name: FLANNELD_IPTABLES_FORWARD_RULES
value: "false"
{{- end }}
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used by flannel.
- name: flannel-cfg
configMap:
name: canal-config
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
`)
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.12/manifests/canal.yaml
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# Typha is disabled.
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# Configure the MTU to use
{{- if .Networking.Canal.MTU }}
veth_mtu: "{{ .Networking.Canal.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
{{ if .Networking.Canal.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the canal-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Canal.TyphaReplicas 0 }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
template:
metadata:
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/role: master
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: canal
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: calico/typha:v3.12.2
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure canal gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: canal
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.12.2
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.12.2
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.12.2
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
{{- if .Networking.Canal.TyphaReplicas }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: canal-config
key: typha_service_name
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
# was value: "k8s,bgp"
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "INFO"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
{{- if eq .Networking.Canal.DisableFlannelForwardRules true }}
- name: FLANNELD_IPTABLES_FORWARD_RULES
value: "false"
{{- end }}
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by canal.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used by flannel.
- name: flannel-cfg
configMap:
name: canal-config
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
`)
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.13/manifests/canal.yaml
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# Typha is disabled.
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# Configure the MTU to use
{{- if .Networking.Canal.MTU }}
veth_mtu: "{{ .Networking.Canal.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
{{ if .Networking.Canal.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the canal-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Canal.TyphaReplicas 0 }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: canal
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: calico/typha:v3.13.4
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure canal gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: canal
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.13.4
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.13.4
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.13.4
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
{{- if .Networking.Canal.TyphaReplicas }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: canal-config
key: typha_service_name
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Canal.LogSeveritySys "info" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
{{- if eq .Networking.Canal.DisableFlannelForwardRules true }}
- name: FLANNELD_IPTABLES_FORWARD_RULES
value: "false"
{{- end }}
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by canal.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used by flannel.
- name: flannel-cfg
configMap:
name: canal-config
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
`)
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate = []byte(`# Pulled and modified from: https://github.com/weaveworks/weave/releases/download/v2.7.0/weave-daemonset-k8s-1.11.yaml
{{- if WeaveSecret }}
apiVersion: v1
kind: Secret
metadata:
name: weave-net
namespace: kube-system
stringData:
network-password: {{ WeaveSecret }}
---
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- 'networking.k8s.io'
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
spec:
# Wait 5 seconds to let pod connect before rolling next pod
selector:
matchLabels:
name: weave-net
role.kubernetes.io/networking: "1"
minReadySeconds: 5
template:
metadata:
labels:
name: weave-net
role.kubernetes.io/networking: "1"
annotations:
prometheus.io/scrape: "true"
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.NoMasqLocal }}
- name: NO_MASQ_LOCAL
value: "{{ .Networking.Weave.NoMasqLocal }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
{{- if .Networking.Weave.NetExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NetExtraArgs }}"
{{- end }}
{{- if WeaveSecret }}
- name: WEAVE_PASSWORD
valueFrom:
secretKeyRef:
name: weave-net
key: network-password
{{- end }}
image: 'weaveworks/weave-kube:{{ or .Networking.Weave.Version "2.7.0" }}'
ports:
- name: metrics
containerPort: 6782
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
{{- if .Networking.Weave.NPCExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NPCExtraArgs }}"
{{- end }}
image: 'weaveworks/weave-npc:{{ or .Networking.Weave.Version "2.7.0" }}'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }}
memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.NPCCPULimit }}
cpu: {{ .Networking.Weave.NPCCPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
`)
func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate = []byte(`{{- $proxy := .EgressProxy -}}
{{- $na := .NodeAuthorization.NodeAuthorizer -}}
{{- $name := "node-authorizer" -}}
{{- $namespace := "kube-system" -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $name }}
namespace: {{ $namespace }}
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kops:{{ $name }}:nodes-viewer
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
rules:
- apiGroups:
- "*"
resources:
- nodes
verbs:
- get
- list
---
# permits the node access to create a CSR
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kops:{{ $name }}:system:bootstrappers
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
roleRef:
kind: ClusterRole
name: system:node-bootstrapper
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
---
# indicates to the controller to auto-sign the CSR for this group
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kops:{{ $name }}:approval
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
---
# the service permission requires to create the bootstrap tokens
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kops:{{ $namespace }}:{{ $name }}
namespace: {{ $namespace }}
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
rules:
- apiGroups:
- "*"
resources:
- secrets
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kops:{{ $namespace }}:{{ $name }}
namespace: {{ $namespace }}
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops:{{ $namespace }}:{{ $name }}
subjects:
- kind: ServiceAccount
name: {{ $name }}
namespace: {{ $namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kops:{{ $name }}:nodes-viewer
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:{{ $name }}:nodes-viewer
subjects:
- kind: ServiceAccount
name: {{ $name }}
namespace: {{ $namespace }}
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ $name }}
namespace: {{ $namespace }}
labels:
k8s-app: {{ $name }}
k8s-addon: {{ $name }}.addons.k8s.io
spec:
selector:
matchLabels:
k8s-app: {{ $name }}
template:
metadata:
labels:
k8s-app: {{ $name }}
annotations:
dns.alpha.kubernetes.io/internal: {{ $name }}-internal.{{ ClusterName }}
prometheus.io/port: "{{ $na.Port }}"
prometheus.io/scheme: "https"
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/role: master
priorityClassName: system-node-critical
serviceAccount: {{ $name }}
securityContext:
fsGroup: 1000
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
volumes:
- name: config
hostPath:
path: /srv/kubernetes/node-authorizer
type: DirectoryOrCreate
containers:
- name: {{ $name }}
image: {{ $na.Image }}
args:
- server
- --authorization-timeout={{ $na.Timeout.Duration }}
- --authorizer={{ $na.Authorizer }}
- --cluster-name={{ ClusterName }}
{{- range $na.Features }}
- --feature={{ . }}
{{- end }}
- --listen=0.0.0.0:{{ $na.Port }}
- --tls-cert=/config/tls.pem
- --tls-client-ca=/config/ca.pem
- --tls-private-key=/config/tls-key.pem
- --token-ttl={{ $na.TokenTTL.Duration }}
{{- if $proxy }}
env:
- name: http_proxy
value: {{ $proxy.HTTPProxy.Host }}:{{ $proxy.HTTPProxy.Port }}
{{- if $proxy.ProxyExcludes }}
- name: no_proxy
value: {{ $proxy.ProxyExcludes }}
{{- end }}
{{- end }}
resources:
limits:
cpu: 100m
memory: 64Mi
requests:
cpu: 10m
memory: 10Mi
volumeMounts:
- mountPath: /config
readOnly: true
name: config
`)
func cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate = []byte(`{{ with .NodeTerminationHandler }}
# Sourced from https://github.com/aws/aws-node-termination-handler/releases/download/v1.7.0/all-resources.yaml
---
# Source: aws-node-termination-handler/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
app.kubernetes.io/version: "1.8.0"
---
# Source: aws-node-termination-handler/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aws-node-termination-handler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
---
# Source: aws-node-termination-handler/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aws-node-termination-handler
subjects:
- kind: ServiceAccount
name: aws-node-termination-handler
namespace: kube-system
roleRef:
kind: ClusterRole
name: aws-node-termination-handler
apiGroup: rbac.authorization.k8s.io
---
# Source: aws-node-termination-handler/templates/daemonset.linux.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: aws-node-termination-handler
namespace: kube-system
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
app.kubernetes.io/version: "1.8.0"
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
kubernetes.io/os: linux
template:
metadata:
labels:
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/instance: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kubernetes.io/os: linux
spec:
volumes:
- name: "uptime"
hostPath:
path: "/proc/uptime"
priorityClassName: "system-node-critical"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "kubernetes.io/os"
operator: In
values:
- linux
- key: "kubernetes.io/arch"
operator: In
values:
- amd64
- arm64
- arm
serviceAccountName: aws-node-termination-handler
hostNetwork: true
dnsPolicy: "ClusterFirstWithHostNet"
containers:
- name: aws-node-termination-handler
image: amazon/aws-node-termination-handler:v1.8.0
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
volumeMounts:
- name: "uptime"
mountPath: "/proc/uptime"
readOnly: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SPOT_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DELETE_LOCAL_DATA
value: "true"
- name: IGNORE_DAEMON_SETS
value: "true"
- name: POD_TERMINATION_GRACE_PERIOD
value: "-1"
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "{{ .EnableSpotInterruptionDraining }}"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: "{{ .EnableScheduledEventDraining }}"
- name: JSON_LOGGING
value: "true"
- name: ENABLE_PROMETHEUS_SERVER
value: "{{ .EnablePrometheusMetrics }}"
resources:
limits:
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
{{ end }}`)
func cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, nil
}
func cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate = []byte(`# Vendored from https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-local-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns-upstream
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNSUpstream"
spec:
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
selector:
k8s-app: kube-dns
---
apiVersion: v1
kind: ConfigMap
metadata:
name: node-local-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
Corefile: |
{{ KubeDNS.Domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind {{ KubeDNS.NodeLocalDNS.LocalIP }}
forward . {{ NodeLocalDNSClusterIP }} {
force_tcp
}
prometheus :9253
health {{ KubeDNS.NodeLocalDNS.LocalIP }}:{{ NodeLocalDNSHealthCheck }}
}
{{- if KubeDNS.NodeLocalDNS.ForwardToKubeDNS }}
.:53 {
errors
cache 30
reload
loop
bind {{ KubeDNS.NodeLocalDNS.LocalIP }}
forward . {{ NodeLocalDNSClusterIP }} {
force_tcp
}
prometheus :9253
}
{{- else }}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind {{ KubeDNS.NodeLocalDNS.LocalIP }}
forward . {{ NodeLocalDNSClusterIP }} {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind {{ KubeDNS.NodeLocalDNS.LocalIP }}
forward . {{ NodeLocalDNSClusterIP }} {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind {{ KubeDNS.NodeLocalDNS.LocalIP }}
forward . __PILLAR__UPSTREAM__SERVERS__
prometheus :9253
}
{{- end }}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-local-dns
namespace: kube-system
labels:
k8s-app: node-local-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 10%
selector:
matchLabels:
k8s-app: node-local-dns
template:
metadata:
labels:
k8s-app: node-local-dns
annotations:
prometheus.io/port: "9253"
prometheus.io/scrape: "true"
spec:
priorityClassName: system-node-critical
serviceAccountName: node-local-dns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: "NoExecute"
operator: "Exists"
- effect: "NoSchedule"
operator: "Exists"
containers:
- name: node-cache
image: k8s.gcr.io/dns/k8s-dns-node-cache:1.15.14
resources:
requests:
cpu: {{ KubeDNS.NodeLocalDNS.CPURequest }}
memory: {{ KubeDNS.NodeLocalDNS.MemoryRequest }}
args:
- -localip={{ .KubeDNS.NodeLocalDNS.LocalIP }}
- -conf=/etc/Corefile
- -upstreamsvc=kube-dns-upstream
- -setupiptables=false
securityContext:
privileged: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
livenessProbe:
httpGet:
host: {{ .KubeDNS.NodeLocalDNS.LocalIP }}
path: /health
port: {{ NodeLocalDNSHealthCheck }}
initialDelaySeconds: 60
timeoutSeconds: 5
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: config-volume
mountPath: /etc/coredns
- name: kube-dns-config
mountPath: /etc/kube-dns
volumes:
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
- name: config-volume
configMap:
name: node-local-dns
items:
- key: Corefile
path: Corefile.base
`)
func cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate = []byte(`---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:cloud-node-controller
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-node-controller
subjects:
- kind: ServiceAccount
name: cloud-node-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:cloud-controller-manager
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- create
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- '*'
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- get
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-node-controller
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: kube-system
name: openstack-cloud-provider
labels:
k8s-app: openstack-cloud-provider
k8s-addon: openstack.addons.k8s.io
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
name: openstack-cloud-provider
template:
metadata:
labels:
name: openstack-cloud-provider
spec:
# run on the host network (don't depend on CNI)
hostNetwork: true
# run on each master node
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
securityContext:
runAsUser: 1001
serviceAccountName: cloud-controller-manager
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: openstack-cloud-controller-manager
image: "{{- if .ExternalCloudControllerManager.Image -}} {{ .ExternalCloudControllerManager.Image }} {{- else -}} docker.io/k8scloudprovider/openstack-cloud-controller-manager:{{OpenStackCCMTag}} {{- end -}}"
args:
- /bin/openstack-cloud-controller-manager
{{- range $arg := CloudControllerConfigArgv }}
- {{ $arg }}
{{- end }}
- --cloud-config=/etc/kubernetes/cloud.config
- --address=127.0.0.1
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
{{ if .UseHostCertificates }}
- mountPath: /etc/ssl/certs
name: etc-ssl-certs
readOnly: true
{{ end }}
volumes:
- hostPath:
path: /etc/kubernetes/cloud.config
name: cloudconfig
{{ if .UseHostCertificates }}
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: etc-ssl-certs
{{ end }}
`)
func cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate, nil
}
func cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate = []byte(`---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
k8s-addon: podsecuritypolicy.addons.k8s.io
name: kube-system
spec:
allowedCapabilities:
- '*'
fsGroup:
rule: RunAsAny
hostPID: true
hostIPC: true
hostNetwork: true
hostPorts:
- min: 1
max: 65536
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
k8s-addon: podsecuritypolicy.addons.k8s.io
name: kops:kube-system:psp
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- kube-system
verbs:
- use
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kops:kube-system:psp
roleRef:
kind: ClusterRole
name: kops:kube-system:psp
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: Group
name: system:masters
apiGroup: rbac.authorization.k8s.io
# permit the kubelets to access this policy (used for manifests)
- kind: User
name: kubelet
apiGroup: rbac.authorization.k8s.io
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations:
k8s-addon: podsecuritypolicy.addons.k8s.io
name: kops:kube-system:psp
namespace: kube-system
roleRef:
kind: ClusterRole
name: kops:kube-system:psp
apiGroup: rbac.authorization.k8s.io
subjects:
# permit the cluster wise admin to use this policy
- kind: Group
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io
`)
func cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate, nil
}
func cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml = []byte(`# Source: https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/rbac/kubelet-binding.yaml
# The GKE environments don't have kubelets with certificates that
# identify the system:nodes group. They use the kubelet identity
# TODO: remove this once new nodes are granted individual identities and the
# NodeAuthorizer is enabled.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-cluster-admin
labels:
k8s-addon: rbac.addons.k8s.io
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet
`)
func cloudupResourcesAddonsRbacAddonsK8sIoK8s18YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml, nil
}
func cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsRbacAddonsK8sIoK8s18YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml = []byte(`kind: ConfigMap
apiVersion: v1
metadata:
name: scheduler-policy
namespace: kube-system
labels:
k8s-addon: scheduler.addons.k8s.io
data:
policy.cfg: |
{
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "NoDiskConflict"},
{"name": "GeneralPredicates"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodeCondition"},
{"name": "PodToleratesNodeTaints"},
{"name": "NoVolumeNodeConflict"}
],
"priorities" : [
{"name": "SelectorSpreadPriority", "weight" : 1},
{"name": "LeastRequestedPriority", "weight" : 1},
{"name": "BalancedResourceAllocation", "weight" : 1},
{"name": "NodePreferAvoidPodsPriority", "weight" : 1},
{"name": "NodeAffinityPriority", "weight" : 1},
{"name": "TaintTolerationPriority", "weight" : 1},
{"name": "InterPodAffinityPriority", "weight" : 1}
],
"hardPodAffinitySymmetricWeight" : 1
}`)
func cloudupResourcesAddonsSchedulerAddonsK8sIoV170YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml, nil
}
func cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsSchedulerAddonsK8sIoV170YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplate = []byte(`# ------------------------------------------------------------------------------
# Config Map
# ------------------------------------------------------------------------------
apiVersion: v1
kind: ConfigMap
metadata:
name: spotinst-kubernetes-cluster-controller-config
namespace: kube-system
data:
spotinst.cluster-identifier: {{ ClusterName }}
---
# ------------------------------------------------------------------------------
# Secret
# ------------------------------------------------------------------------------
apiVersion: v1
kind: Secret
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
type: Opaque
data:
token: {{ SpotinstTokenBase64 }}
account: {{ SpotinstAccountBase64 }}
---
# ------------------------------------------------------------------------------
# Service Account
# ------------------------------------------------------------------------------
apiVersion: v1
kind: ServiceAccount
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------------------------------------------
# Cluster Role
# ------------------------------------------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spotinst-kubernetes-cluster-controller
rules:
# ----------------------------------------------------------------------------
# Required for functional operation (read-only).
# ----------------------------------------------------------------------------
- apiGroups: [""]
resources: ["pods", "nodes", "services", "namespaces", "replicationcontrollers", "limitranges", "events", "persistentvolumes", "persistentvolumeclaims"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["deployments", "daemonsets", "statefulsets", "replicasets"]
verbs: ["get","list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["get","list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list"]
- nonResourceURLs: ["/version/", "/version"]
verbs: ["get"]
# ----------------------------------------------------------------------------
# Required by the draining feature and for functional operation.
# ----------------------------------------------------------------------------
- apiGroups: [""]
resources: ["nodes"]
verbs: ["patch", "update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
# ----------------------------------------------------------------------------
# Required by the Spotinst Cleanup feature.
# ----------------------------------------------------------------------------
- apiGroups: [""]
resources: ["nodes"]
verbs: ["delete"]
# ----------------------------------------------------------------------------
# Required by the Spotinst CSR Approval feature.
# ----------------------------------------------------------------------------
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests"]
verbs: ["get", "list"]
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/approval"]
verbs: ["patch", "update"]
- apiGroups: ["certificates.k8s.io"]
resources: ["signers"]
resourceNames: ["kubernetes.io/kubelet-serving", "kubernetes.io/kube-apiserver-client-kubelet"]
verbs: ["approve"]
# ----------------------------------------------------------------------------
# Required by the Spotinst Auto Update feature.
# ----------------------------------------------------------------------------
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
resourceNames: ["spotinst-kubernetes-cluster-controller"]
verbs: ["patch", "update", "escalate"]
- apiGroups: ["apps"]
resources: ["deployments"]
resourceNames: ["spotinst-kubernetes-cluster-controller"]
verbs: ["patch","update"]
# ----------------------------------------------------------------------------
# Required by the Spotinst Apply feature.
# ----------------------------------------------------------------------------
- apiGroups: ["apps"]
resources: ["deployments", "daemonsets"]
verbs: ["get", "list", "patch","update","create","delete"]
- apiGroups: ["extensions"]
resources: ["daemonsets"]
verbs: ["get", "list", "patch","update","create","delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "patch", "update", "create", "delete"]
# ----------------------------------------------------------------------------
# Required by Spotinst Wave.
# ----------------------------------------------------------------------------
- apiGroups: ["sparkoperator.k8s.io"]
resources: ["sparkapplications", "scheduledsparkapplications"]
verbs: ["get", "list"]
---
# ------------------------------------------------------------------------------
# Cluster Role Binding
# ------------------------------------------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: spotinst-kubernetes-cluster-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: spotinst-kubernetes-cluster-controller
subjects:
- kind: ServiceAccount
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------------------------------------------
# Deployment
# ------------------------------------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
template:
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
spec:
priorityClassName: system-cluster-critical
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-addon
operator: In
values:
- spotinst-kubernetes-cluster-controller.addons.k8s.io
topologyKey: kubernetes.io/hostname
containers:
- name: spotinst-kubernetes-cluster-controller
imagePullPolicy: Always
image: spotinst/kubernetes-cluster-controller:1.0.68
livenessProbe:
httpGet:
path: /healthcheck
port: 4401
initialDelaySeconds: 300
periodSeconds: 20
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthcheck
port: 4401
initialDelaySeconds: 20
periodSeconds: 20
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
env:
- name: SPOTINST_TOKEN
valueFrom:
secretKeyRef:
name: spotinst-kubernetes-cluster-controller
key: token
optional: true
- name: SPOTINST_ACCOUNT
valueFrom:
secretKeyRef:
name: spotinst-kubernetes-cluster-controller
key: account
optional: true
- name: SPOTINST_TOKEN_LEGACY
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.token
optional: true
- name: SPOTINST_ACCOUNT_LEGACY
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.account
optional: true
- name: CLUSTER_IDENTIFIER
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.cluster-identifier
- name: DISABLE_AUTO_UPDATE
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: disable-auto-update
optional: true
- name: ENABLE_CSR_APPROVAL
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: enable-csr-approval
optional: true
- name: PROXY_URL
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: proxy-url
optional: true
- name: BASE_SPOTINST_URL
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: base-url
optional: true
- name: POD_ID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
serviceAccountName: spotinst-kubernetes-cluster-controller
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoExecute
operator: Exists
tolerationSeconds: 150
- key: node.kubernetes.io/unreachable
effect: NoExecute
operator: Exists
tolerationSeconds: 150
- key: node-role.kubernetes.io/master
operator: Exists
`)
func cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplate, nil
}
func cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.14.0.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplate = []byte(`# ------------------------------------------
# Config Map
# ------------------------------------------
apiVersion: v1
kind: ConfigMap
metadata:
name: spotinst-kubernetes-cluster-controller-config
namespace: kube-system
data:
spotinst.token: {{ SpotinstToken }}
spotinst.account: {{ SpotinstAccount }}
spotinst.cluster-identifier: {{ ClusterName }}
---
# ------------------------------------------
# Service Account
# ------------------------------------------
apiVersion: v1
kind: ServiceAccount
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Cluster Role
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims", "namespaces"]
verbs: ["get", "delete", "list", "patch", "update"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get","list","patch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get","list"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["patch", "update", "escalate"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["list"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["list"]
- nonResourceURLs: ["/version/", "/version"]
verbs: ["get"]
---
# ------------------------------------------
# Cluster Role Binding
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: spotinst-kubernetes-cluster-controller
subjects:
- kind: ServiceAccount
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Deployment
# ------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
template:
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
spec:
containers:
- name: spotinst-kubernetes-cluster-controller
imagePullPolicy: Always
image: spotinst/kubernetes-cluster-controller:1.0.39
livenessProbe:
httpGet:
path: /healthcheck
port: 4401
initialDelaySeconds: 300
periodSeconds: 30
env:
- name: SPOTINST_TOKEN
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.token
- name: SPOTINST_ACCOUNT
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.account
- name: CLUSTER_IDENTIFIER
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.cluster-identifier
serviceAccountName: spotinst-kubernetes-cluster-controller
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
`)
func cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplate, nil
}
func cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150Yaml = []byte(`apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gp2
annotations:
storageclass.beta.kubernetes.io/is-default-class: "false"
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: kops-ssd-1-17
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
encrypted: "true"
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system
`)
func cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150Yaml, nil
}
func cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsStorageAwsAddonsK8sIoV170Yaml = []byte(`apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gp2
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
`)
func cloudupResourcesAddonsStorageAwsAddonsK8sIoV170YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsStorageAwsAddonsK8sIoV170Yaml, nil
}
func cloudupResourcesAddonsStorageAwsAddonsK8sIoV170Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsStorageAwsAddonsK8sIoV170YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsStorageGceAddonsK8sIoV170Yaml = []byte(`apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
k8s-addon: storage-gce.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
`)
func cloudupResourcesAddonsStorageGceAddonsK8sIoV170YamlBytes() ([]byte, error) {
return _cloudupResourcesAddonsStorageGceAddonsK8sIoV170Yaml, nil
}
func cloudupResourcesAddonsStorageGceAddonsK8sIoV170Yaml() (*asset, error) {
bytes, err := cloudupResourcesAddonsStorageGceAddonsK8sIoV170YamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplate = []byte(`# Sourced from https://github.com/kubernetes/cloud-provider-openstack/tree/master/manifests/cinder-csi-plugin
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-controller-sa
namespace: kube-system
labels:
k8s-addon: storage-openstack.addons.k8s.io
---
# external attacher
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-binding
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# external Provisioner
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-binding
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# external snapshotter
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-binding
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
# External Resizer
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-binding
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-resizer-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-resizer-cfg
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role-cfg
namespace: kube-system
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: Role
name: external-resizer-cfg
apiGroup: rbac.authorization.k8s.io
---
# This YAML file contains CSI Controller Plugin Sidecars
# external-attacher, external-provisioner, external-snapshotter
kind: Service
apiVersion: v1
metadata:
name: csi-cinder-controller-service
namespace: kube-system
labels:
app: csi-cinder-controllerplugin
k8s-addon: storage-openstack.addons.k8s.io
spec:
selector:
app: csi-cinder-controllerplugin
ports:
- name: placeholder
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-cinder-controllerplugin
namespace: kube-system
labels:
k8s-addon: storage-openstack.addons.k8s.io
spec:
serviceName: "csi-cinder-controller-service"
replicas: 1
selector:
matchLabels:
app: csi-cinder-controllerplugin
template:
metadata:
labels:
app: csi-cinder-controllerplugin
k8s-addon: storage-openstack.addons.k8s.io
spec:
serviceAccount: csi-cinder-controller-sa
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v2.2.0
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.6.0
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
{{ if WithDefaultBool .CloudConfig.Openstack.BlockStorage.CSITopologySupport false }}
- --feature-gates=Topology=true
{{ end }}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v1.2.2
args:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:v0.4.0
args:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: cinder-csi-plugin
image: "{{- if .CloudConfig.Openstack.BlockStorage.CSIPluginImage -}} {{ .CloudConfig.Openstack.BlockStorage.CSIPluginImage }} {{- else -}} docker.io/k8scloudprovider/cinder-csi-plugin:{{OpenStackCCMTag}} {{- end -}}"
args:
- /bin/cinder-csi-plugin
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/kubernetes/cloud.config
- name: CLUSTER_NAME
value: kubernetes
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
volumes:
- name: socket-dir
emptyDir: {}
- hostPath:
path: /etc/kubernetes/cloud.config
type: ""
name: cloudconfig
---
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-node-sa
namespace: kube-system
labels:
k8s-addon: storage-openstack.addons.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-role
labels:
k8s-addon: storage-openstack.addons.k8s.io
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-binding
labels:
k8s-addon: storage-openstack.addons.k8s.io
subjects:
- kind: ServiceAccount
name: csi-cinder-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-nodeplugin-role
apiGroup: rbac.authorization.k8s.io
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for cinder.
kind: DaemonSet
apiVersion: apps/v1
metadata:
labels:
k8s-addon: storage-openstack.addons.k8s.io
name: csi-cinder-nodeplugin
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-cinder-nodeplugin
template:
metadata:
labels:
app: csi-cinder-nodeplugin
k8s-addon: storage-openstack.addons.k8s.io
spec:
serviceAccount: csi-cinder-node-sa
tolerations:
- operator: Exists
hostNetwork: true
containers:
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/cinder.csi.openstack.org /registration/cinder.csi.openstack.org-reg.sock"]
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: cinder-csi-plugin
securityContext:
privileged: true
runAsNonRoot: false
runAsUser: 0
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{- if .CloudConfig.Openstack.BlockStorage.CSIPluginImage -}} {{ .CloudConfig.Openstack.BlockStorage.CSIPluginImage }} {{- else -}} docker.io/k8scloudprovider/cinder-csi-plugin:{{OpenStackCCMTag}} {{- end -}}"
args :
- /bin/cinder-csi-plugin
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/kubernetes/cloud.config
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: pods-cloud-data
mountPath: /var/lib/cloud/data
readOnly: true
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/cinder.csi.openstack.org
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: pods-cloud-data
hostPath:
path: /var/lib/cloud/data
type: Directory
- name: pods-probe-dir
hostPath:
path: /dev
type: Directory
- hostPath:
path: /etc/kubernetes/cloud.config
type: ""
name: cloudconfig
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
labels:
k8s-addon: storage-openstack.addons.k8s.io
name: cinder.csi.openstack.org
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
- Ephemeral
{{ if WithDefaultBool .CloudConfig.Openstack.BlockStorage.CreateStorageClass true }}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-openstack.addons.k8s.io
provisioner: cinder.csi.openstack.org
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
{{ end }}`)
func cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplateBytes() ([]byte, error) {
return _cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplate, nil
}
func cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplate() (*asset, error) {
bytes, err := cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplateBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cloudup/resources/addons/storage-openstack.addons.k8s.io/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"cloudup/resources/addons/OWNERS": cloudupResourcesAddonsOwners,
"cloudup/resources/addons/anonymous-issuer-discovery.addons.k8s.io/k8s-1.16.yaml.template": cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate,
"cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template": cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate,
"cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml": cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml,
"cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template": cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate,
"cloudup/resources/addons/core.addons.k8s.io/addon.yaml": cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml,
"cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml": cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml,
"cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template": cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate,
"cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/external-dns.addons.k8s.io/README.md": cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd,
"cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template": cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate,
"cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml": cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml,
"cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml": cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml,
"cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml": cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml,
"cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml": cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml,
"cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml": cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml,
"cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template": cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate,
"cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate,
"cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate,
"cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate,
"cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate,
"cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate,
"cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml": cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml,
"cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate,
"cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate,
"cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate,
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate,
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate,
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate,
"cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate,
"cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template": cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate,
"cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template": cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate,
"cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate,
"cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml": cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml,
"cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml": cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml,
"cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.14.0.yaml.template": cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplate,
"cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template": cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplate,
"cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml": cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150Yaml,
"cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml": cloudupResourcesAddonsStorageAwsAddonsK8sIoV170Yaml,
"cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml": cloudupResourcesAddonsStorageGceAddonsK8sIoV170Yaml,
"cloudup/resources/addons/storage-openstack.addons.k8s.io/k8s-1.16.yaml.template": cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplate,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"cloudup": {nil, map[string]*bintree{
"resources": {nil, map[string]*bintree{
"addons": {nil, map[string]*bintree{
"OWNERS": {cloudupResourcesAddonsOwners, map[string]*bintree{}},
"anonymous-issuer-discovery.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.16.yaml.template": {cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate, map[string]*bintree{}},
}},
"authentication.aws": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate, map[string]*bintree{}},
}},
"authentication.kope.io": {nil, map[string]*bintree{
"k8s-1.12.yaml": {cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml, map[string]*bintree{}},
}},
"cluster-autoscaler.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.15.yaml.template": {cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, map[string]*bintree{}},
}},
"core.addons.k8s.io": {nil, map[string]*bintree{
"addon.yaml": {cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, map[string]*bintree{}},
"k8s-1.12.yaml.template": {cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
"v1.4.0.yaml": {cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, map[string]*bintree{}},
}},
"coredns.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"digitalocean-cloud-controller.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.8.yaml.template": {cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate, map[string]*bintree{}},
}},
"dns-controller.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"external-dns.addons.k8s.io": {nil, map[string]*bintree{
"README.md": {cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, map[string]*bintree{}},
"k8s-1.12.yaml.template": {cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"kops-controller.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.16.yaml.template": {cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate, map[string]*bintree{}},
}},
"kube-dns.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"kubelet-api.rbac.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.9.yaml": {cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, map[string]*bintree{}},
}},
"limit-range.addons.k8s.io": {nil, map[string]*bintree{
"addon.yaml": {cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml, map[string]*bintree{}},
"v1.5.0.yaml": {cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml, map[string]*bintree{}},
}},
"metadata-proxy.addons.k8s.io": {nil, map[string]*bintree{
"addon.yaml": {cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml, map[string]*bintree{}},
"v0.1.12.yaml": {cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml, map[string]*bintree{}},
}},
"metrics-server.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.11.yaml.template": {cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate, map[string]*bintree{}},
}},
"networking.amazon-vpc-routed-eni": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate, map[string]*bintree{}},
"k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate, map[string]*bintree{}},
}},
"networking.cilium.io": {nil, map[string]*bintree{
"k8s-1.12-v1.8.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, map[string]*bintree{}},
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"networking.flannel": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, map[string]*bintree{}},
}},
"networking.kope.io": {nil, map[string]*bintree{
"k8s-1.12.yaml": {cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, map[string]*bintree{}},
}},
"networking.kuberouter": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, map[string]*bintree{}},
}},
"networking.projectcalico.org": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate, map[string]*bintree{}},
"k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, map[string]*bintree{}},
}},
"networking.projectcalico.org.canal": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, map[string]*bintree{}},
"k8s-1.15.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, map[string]*bintree{}},
"k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, map[string]*bintree{}},
}},
"networking.weave": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, map[string]*bintree{}},
}},
"node-authorizer.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"node-termination-handler.aws": {nil, map[string]*bintree{
"k8s-1.11.yaml.template": {cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, map[string]*bintree{}},
}},
"nodelocaldns.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"openstack.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.13.yaml.template": {cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate, map[string]*bintree{}},
}},
"podsecuritypolicy.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.12.yaml.template": {cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}},
}},
"rbac.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.8.yaml": {cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml, map[string]*bintree{}},
}},
"scheduler.addons.k8s.io": {nil, map[string]*bintree{
"v1.7.0.yaml": {cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml, map[string]*bintree{}},
}},
"spotinst-kubernetes-cluster-controller.addons.k8s.io": {nil, map[string]*bintree{
"v1.14.0.yaml.template": {cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV1140YamlTemplate, map[string]*bintree{}},
"v1.9.0.yaml.template": {cloudupResourcesAddonsSpotinstKubernetesClusterControllerAddonsK8sIoV190YamlTemplate, map[string]*bintree{}},
}},
"storage-aws.addons.k8s.io": {nil, map[string]*bintree{
"v1.15.0.yaml": {cloudupResourcesAddonsStorageAwsAddonsK8sIoV1150Yaml, map[string]*bintree{}},
"v1.7.0.yaml": {cloudupResourcesAddonsStorageAwsAddonsK8sIoV170Yaml, map[string]*bintree{}},
}},
"storage-gce.addons.k8s.io": {nil, map[string]*bintree{
"v1.7.0.yaml": {cloudupResourcesAddonsStorageGceAddonsK8sIoV170Yaml, map[string]*bintree{}},
}},
"storage-openstack.addons.k8s.io": {nil, map[string]*bintree{
"k8s-1.16.yaml.template": {cloudupResourcesAddonsStorageOpenstackAddonsK8sIoK8s116YamlTemplate, map[string]*bintree{}},
}},
}},
}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}