From eaac019e34599dcb12702ecabd362a78ca5594aa Mon Sep 17 00:00:00 2001 From: Sandeep Rajan Date: Mon, 11 Dec 2017 14:47:24 -0500 Subject: [PATCH] CoreDNS in kops as an addon fix test --- docs/cluster_spec.md | 20 +++ pkg/apis/kops/cluster.go | 2 + pkg/apis/kops/v1alpha1/cluster.go | 2 + .../kops/v1alpha1/zz_generated.conversion.go | 2 + pkg/apis/kops/v1alpha2/cluster.go | 2 + .../kops/v1alpha2/zz_generated.conversion.go | 2 + pkg/apis/kops/validation/legacy.go | 32 ++-- .../k8s-1.6.yaml.template | 158 ++++++++++++++++++ .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 76 ++++++--- 9 files changed, 254 insertions(+), 42 deletions(-) create mode 100644 upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template diff --git a/docs/cluster_spec.md b/docs/cluster_spec.md index d798ea2487..4eecbce0b4 100644 --- a/docs/cluster_spec.md +++ b/docs/cluster_spec.md @@ -266,6 +266,26 @@ Will make kube-scheduler use the scheduler policy from configmap "scheduler-poli Note that as of Kubernetes 1.8.0 kube-scheduler does not reload its configuration from configmap automatically. You will need to ssh into the master instance and restart the Docker container manually. +### kubeDNS + +This block contains configurations for `kube-dns`. + + ```yaml + spec: + kubeDNS: + provider: KubeDNS +``` + +Specifying KubeDNS will install kube-dns as the default service discovery. + + ```yaml + spec: + kubeDNS: + provider: CoreDNS +``` + +This will install [CoreDNS](https://coredns.io/) instead of kube-dns. + ### kubeControllerManager This block contains configurations for the `controller-manager`. diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 6e9010b967..b7b8443748 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -300,6 +300,8 @@ type KubeDNSConfig struct { CacheMaxSize int `json:"cacheMaxSize,omitempty"` // CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"` + // Provider indicates whether CoreDNS or kube-dns will be the default service discovery. + Provider string `json:"provider,omitempty"` } // ExternalDNSConfig are options of the dns-controller diff --git a/pkg/apis/kops/v1alpha1/cluster.go b/pkg/apis/kops/v1alpha1/cluster.go index 5456110e7a..16e734ea50 100644 --- a/pkg/apis/kops/v1alpha1/cluster.go +++ b/pkg/apis/kops/v1alpha1/cluster.go @@ -299,6 +299,8 @@ type KubeDNSConfig struct { CacheMaxSize int `json:"cacheMaxSize,omitempty"` // CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"` + // Provider indicates whether CoreDNS or kube-dns will be the default service discovery. + Provider string `json:"provider,omitempty"` } // ExternalDNSConfig are options of the dns-controller diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index bba196e2de..ac3992042e 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -2097,6 +2097,7 @@ func autoConvert_v1alpha1_KubeDNSConfig_To_kops_KubeDNSConfig(in *KubeDNSConfig, out.ServerIP = in.ServerIP out.CacheMaxSize = in.CacheMaxSize out.CacheMaxConcurrent = in.CacheMaxConcurrent + out.Provider = in.Provider return nil } @@ -2112,6 +2113,7 @@ func autoConvert_kops_KubeDNSConfig_To_v1alpha1_KubeDNSConfig(in *kops.KubeDNSCo out.ServerIP = in.ServerIP out.CacheMaxSize = in.CacheMaxSize out.CacheMaxConcurrent = in.CacheMaxConcurrent + out.Provider = in.Provider return nil } diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 2318f03adc..97a13666dc 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -297,6 +297,8 @@ type KubeDNSConfig struct { CacheMaxSize int `json:"cacheMaxSize,omitempty"` // CacheMaxConcurrent is the maximum number of concurrent queries for dnsmasq CacheMaxConcurrent int `json:"cacheMaxConcurrent,omitempty"` + // Provider indicates whether CoreDNS or kube-dns will be the default service discovery. + Provider string `json:"provider,omitempty"` } // ExternalDNSConfig are options of the dns-controller diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 4896e016c5..2b446dee8f 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -2361,6 +2361,7 @@ func autoConvert_v1alpha2_KubeDNSConfig_To_kops_KubeDNSConfig(in *KubeDNSConfig, out.ServerIP = in.ServerIP out.CacheMaxSize = in.CacheMaxSize out.CacheMaxConcurrent = in.CacheMaxConcurrent + out.Provider = in.Provider return nil } @@ -2376,6 +2377,7 @@ func autoConvert_kops_KubeDNSConfig_To_v1alpha2_KubeDNSConfig(in *kops.KubeDNSCo out.ServerIP = in.ServerIP out.CacheMaxSize = in.CacheMaxSize out.CacheMaxConcurrent = in.CacheMaxConcurrent + out.Provider = in.Provider return nil } diff --git a/pkg/apis/kops/validation/legacy.go b/pkg/apis/kops/validation/legacy.go index bed5061f90..495da49c9d 100644 --- a/pkg/apis/kops/validation/legacy.go +++ b/pkg/apis/kops/validation/legacy.go @@ -33,7 +33,7 @@ import ( // legacy contains validation functions that don't match the apimachinery style -// ValidateCluster is responsible for checking the validitity of the Cluster spec +// ValidateCluster is responsible for checking the validity of the Cluster spec func ValidateCluster(c *kops.Cluster, strict bool) *field.Error { fieldSpec := field.NewPath("Spec") var err error @@ -263,24 +263,22 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error { // Check KubeDNS.ServerIP if c.Spec.KubeDNS != nil { serverIPString := c.Spec.KubeDNS.ServerIP - if serverIPString == "" { - return field.Required(fieldSpec.Child("KubeDNS", "ServerIP"), "Cluster did not have KubeDNS.ServerIP set") - } + if serverIPString != "" { + dnsServiceIP := net.ParseIP(serverIPString) + if dnsServiceIP == nil { + return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Cluster had an invalid KubeDNS.ServerIP") + } - dnsServiceIP := net.ParseIP(serverIPString) - if dnsServiceIP == nil { - return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Cluster had an invalid KubeDNS.ServerIP") - } + if !serviceClusterIPRange.Contains(dnsServiceIP) { + return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, serverIPString)) + } - if !serviceClusterIPRange.Contains(dnsServiceIP) { - return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, serverIPString)) - } - - if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP { - return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Kubelet ClusterDNS did not match cluster KubeDNS.ServerIP") - } - if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP { - return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "MasterKubelet ClusterDNS did not match cluster KubeDNS.ServerIP") + if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP { + return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "Kubelet ClusterDNS did not match cluster KubeDNS.ServerIP") + } + if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP { + return field.Invalid(fieldSpec.Child("KubeDNS", "ServerIP"), serverIPString, "MasterKubelet ClusterDNS did not match cluster KubeDNS.ServerIP") + } } } diff --git a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template new file mode 100644 index 0000000000..54978883e0 --- /dev/null +++ b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template @@ -0,0 +1,158 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + k8s-addon: coredns.addons.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + k8s-addon: coredns.addons.k8s.io + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + k8s-addon: coredns.addons.k8s.io + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | + .:53 { + errors + log + health + kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + } +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: coredns + image: coredns/coredns:1.0.6 + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{ KubeDNS.ServerIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index e4b03e4238..2a95ad8ec4 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -131,38 +131,64 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri } } - { - key := "kube-dns.addons.k8s.io" - version := "1.14.10" + kubeDNS := b.cluster.Spec.KubeDNS + if kubeDNS.Provider == "KubeDNS" || kubeDNS.Provider == "" { { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" + key := "kube-dns.addons.k8s.io" + version := "1.14.10" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - manifests[key+"-"+id] = "addons/" + location + { + location := key + "/pre-k8s-1.6.yaml" + id := "pre-k8s-1.6" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: "<1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } + + { + location := key + "/k8s-1.6.yaml" + id := "k8s-1.6" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: ">=1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } } + } + if kubeDNS.Provider == "CoreDNS" { { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" + key := "coredns.addons.k8s.io" + version := "1.0.6" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0", - Id: id, - }) - manifests[key+"-"+id] = "addons/" + location + { + location := key + "/k8s-1.6.yaml" + id := "k8s-1.6" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: ">=1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } } }