mirror of https://github.com/kubernetes/kops.git
Merge pull request #2337 from justinsb/calico_16_configuration
Split calico configuration into 1.5 and 1.6
This commit is contained in:
commit
e36c055cd8
|
|
@ -0,0 +1,327 @@
|
||||||
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: calico-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# The calico-etcd PetSet service IP:port
|
||||||
|
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
|
||||||
|
{{- range $j, $member := $cluster.Members -}}
|
||||||
|
{{- if $j }},{{ end -}}
|
||||||
|
http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
|
||||||
|
{{- end }}"
|
||||||
|
|
||||||
|
# True enables BGP networking, false tells Calico to enforce
|
||||||
|
# policy only, using native networking.
|
||||||
|
enable_bgp: "true"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"type": "calico",
|
||||||
|
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||||
|
"log_level": "info",
|
||||||
|
"ipam": {
|
||||||
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s",
|
||||||
|
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||||
|
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# The default IP Pool to be created for the cluster.
|
||||||
|
# Pod IP addresses will be assigned from this pool.
|
||||||
|
ippool.yaml: |
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ipPool
|
||||||
|
metadata:
|
||||||
|
cidr: {{ .NonMasqueradeCIDR }}
|
||||||
|
spec:
|
||||||
|
ipip:
|
||||||
|
enabled: true
|
||||||
|
nat-outgoing: true
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest installs the calico/node container, as well
|
||||||
|
# as the Calico CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
# Runs calico/node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: calico/node:v1.1.1
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Enable BGP. Disable to enforce policy only.
|
||||||
|
- name: CALICO_NETWORKING
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: enable_bgp
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# Don't configure a default pool. This is done by the Job
|
||||||
|
# below.
|
||||||
|
- name: NO_DEFAULT_POOLS
|
||||||
|
value: "true"
|
||||||
|
# Auto-detect the BGP IP address.
|
||||||
|
- name: IP
|
||||||
|
value: ""
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
# This container installs the Calico CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: calico/cni:v1.6.1
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
imagePullPolicy: Always
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: cni_network_config
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
volumes:
|
||||||
|
# Used by calico/node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest deploys the Calico policy controller on Kubernetes.
|
||||||
|
# See https://github.com/projectcalico/k8s-policy
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
# The policy controller can only have a single active instance.
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy-controller
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
# The policy controller must run in the host network namespace so that
|
||||||
|
# it isn't governed by policy that would prevent it from working.
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
- name: calico-policy-controller
|
||||||
|
image: calico/kube-policy-controller:v0.5.4
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# The location of the Kubernetes API. Use the default Kubernetes
|
||||||
|
# service for API access.
|
||||||
|
- name: K8S_API
|
||||||
|
value: "https://kubernetes.default:443"
|
||||||
|
# Since we're running in the host namespace and might not have KubeDNS
|
||||||
|
# access, configure the container's /etc/hosts to resolve
|
||||||
|
# kubernetes.default to the correct service clusterIP.
|
||||||
|
- name: CONFIGURE_ETC_HOSTS
|
||||||
|
value: "true"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## This manifest deploys a Job which performs one time
|
||||||
|
# configuration of Calico
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: configure-calico
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: configure-calico
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
# Writes basic configuration to datastore.
|
||||||
|
- name: configure-calico
|
||||||
|
image: calico/ctl:v1.1.1
|
||||||
|
args:
|
||||||
|
- apply
|
||||||
|
- -f
|
||||||
|
- /etc/config/calico/ippool.yaml
|
||||||
|
volumeMounts:
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /etc/config
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
volumes:
|
||||||
|
- name: config-volume
|
||||||
|
configMap:
|
||||||
|
name: calico-config
|
||||||
|
items:
|
||||||
|
- key: ippool.yaml
|
||||||
|
path: calico/ippool.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- extensions
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico
|
||||||
|
namespace: kube-system
|
||||||
|
|
@ -294,16 +294,35 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
||||||
key := "networking.projectcalico.org"
|
key := "networking.projectcalico.org"
|
||||||
version := "2.1.1"
|
version := "2.1.1"
|
||||||
|
|
||||||
location := key + "/v" + version + ".yaml"
|
{
|
||||||
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
|
id := "pre-k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
|
|
||||||
manifests[key] = "addons/" + location
|
{
|
||||||
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Canal != nil {
|
if b.cluster.Spec.Networking.Canal != nil {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue