mirror of https://github.com/kubernetes/kops.git
Merge pull request #4041 from rajansandeep/corednskops
CoreDNS in Kops as an addon
This commit is contained in:
commit
cc25a5a977
|
|
@ -266,6 +266,26 @@ Will make kube-scheduler use the scheduler policy from configmap "scheduler-poli
|
|||
|
||||
Note that as of Kubernetes 1.8.0 kube-scheduler does not reload its configuration from configmap automatically. You will need to ssh into the master instance and restart the Docker container manually.
|
||||
|
||||
### kubeDNS
|
||||
|
||||
This block contains configurations for `kube-dns`.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
kubeDNS:
|
||||
provider: KubeDNS
|
||||
```
|
||||
|
||||
Specifying KubeDNS will install kube-dns as the default service discovery.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
kubeDNS:
|
||||
provider: CoreDNS
|
||||
```
|
||||
|
||||
This will install [CoreDNS](https://coredns.io/) instead of kube-dns.
|
||||
|
||||
### kubeControllerManager
|
||||
This block contains configurations for the `controller-manager`.
|
||||
|
||||
|
|
|
|||
|
|
@ -300,6 +300,8 @@ type KubeDNSConfig struct {
|
|||
CacheMaxSize int `json:"cacheMaxSize,omitempty"`
|
||||
// CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq
|
||||
CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"`
|
||||
// Provider indicates whether CoreDNS or kube-dns will be the default service discovery.
|
||||
Provider string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
|
|
|
|||
|
|
@ -299,6 +299,8 @@ type KubeDNSConfig struct {
|
|||
CacheMaxSize int `json:"cacheMaxSize,omitempty"`
|
||||
// CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq
|
||||
CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"`
|
||||
// Provider indicates whether CoreDNS or kube-dns will be the default service discovery.
|
||||
Provider string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
|
|
|
|||
|
|
@ -2099,6 +2099,7 @@ func autoConvert_v1alpha1_KubeDNSConfig_To_kops_KubeDNSConfig(in *KubeDNSConfig,
|
|||
out.ServerIP = in.ServerIP
|
||||
out.CacheMaxSize = in.CacheMaxSize
|
||||
out.CacheMaxConcurrent = in.CacheMaxConcurrent
|
||||
out.Provider = in.Provider
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -2114,6 +2115,7 @@ func autoConvert_kops_KubeDNSConfig_To_v1alpha1_KubeDNSConfig(in *kops.KubeDNSCo
|
|||
out.ServerIP = in.ServerIP
|
||||
out.CacheMaxSize = in.CacheMaxSize
|
||||
out.CacheMaxConcurrent = in.CacheMaxConcurrent
|
||||
out.Provider = in.Provider
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -297,6 +297,8 @@ type KubeDNSConfig struct {
|
|||
CacheMaxSize int `json:"cacheMaxSize,omitempty"`
|
||||
// CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq
|
||||
CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"`
|
||||
// Provider indicates whether CoreDNS or kube-dns will be the default service discovery.
|
||||
Provider string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
|
|
|
|||
|
|
@ -2363,6 +2363,7 @@ func autoConvert_v1alpha2_KubeDNSConfig_To_kops_KubeDNSConfig(in *KubeDNSConfig,
|
|||
out.ServerIP = in.ServerIP
|
||||
out.CacheMaxSize = in.CacheMaxSize
|
||||
out.CacheMaxConcurrent = in.CacheMaxConcurrent
|
||||
out.Provider = in.Provider
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -2378,6 +2379,7 @@ func autoConvert_kops_KubeDNSConfig_To_v1alpha2_KubeDNSConfig(in *kops.KubeDNSCo
|
|||
out.ServerIP = in.ServerIP
|
||||
out.CacheMaxSize = in.CacheMaxSize
|
||||
out.CacheMaxConcurrent = in.CacheMaxConcurrent
|
||||
out.Provider = in.Provider
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
// legacy contains validation functions that don't match the apimachinery style
|
||||
|
||||
// ValidateCluster is responsible for checking the validitity of the Cluster spec
|
||||
// ValidateCluster is responsible for checking the validity of the Cluster spec
|
||||
func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
||||
fieldSpec := field.NewPath("Spec")
|
||||
var err error
|
||||
|
|
@ -270,24 +270,22 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
// Check KubeDNS.ServerIP
|
||||
if c.Spec.KubeDNS != nil {
|
||||
serverIPString := c.Spec.KubeDNS.ServerIP
|
||||
if serverIPString == "" {
|
||||
return field.Required(fieldSpec.Child("KubeDNS", "ServerIP"), "Cluster did not have KubeDNS.ServerIP set")
|
||||
}
|
||||
if serverIPString != "" {
|
||||
dnsServiceIP := net.ParseIP(serverIPString)
|
||||
if dnsServiceIP == nil {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Cluster had an invalid KubeDNS.ServerIP")
|
||||
}
|
||||
|
||||
dnsServiceIP := net.ParseIP(serverIPString)
|
||||
if dnsServiceIP == nil {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Cluster had an invalid KubeDNS.ServerIP")
|
||||
}
|
||||
if !serviceClusterIPRange.Contains(dnsServiceIP) {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, serverIPString))
|
||||
}
|
||||
|
||||
if !serviceClusterIPRange.Contains(dnsServiceIP) {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, serverIPString))
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Kubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "MasterKubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Kubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "MasterKubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,158 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
k8s-addon: coredns.addons.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
k8s-addon: coredns.addons.k8s.io
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
k8s-addon: coredns.addons.k8s.io
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
k8s-addon: coredns.addons.k8s.io
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-addon: coredns.addons.k8s.io
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ KubeDNS.ServerIP }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
|
@ -131,38 +131,64 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
key := "kube-dns.addons.k8s.io"
|
||||
version := "1.14.10"
|
||||
kubeDNS := b.cluster.Spec.KubeDNS
|
||||
if kubeDNS.Provider == "KubeDNS" || kubeDNS.Provider == "" {
|
||||
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
id := "pre-k8s-1.6"
|
||||
key := "kube-dns.addons.k8s.io"
|
||||
version := "1.14.10"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
id := "pre-k8s-1.6"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if kubeDNS.Provider == "CoreDNS" {
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
key := "coredns.addons.k8s.io"
|
||||
version := "1.0.6"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue