Merge pull request #3229 from gambol99/manifest_files

Automatic merge from submit-queue.

Manifest files

The current implementation uses flags embedded into the command line rather than argument which is more consistent with other manifests and kubeadm. Logging currently pushes all the logs into the host, which isn't really ideal with an ephemeral host model, users should be pushed to implement a proper remote logging stack to retain logs not ssh to boxes.

- changed the manifest to use arguments rather than option flags
- changed the logging to use journal rather than relying on host logging files

```shell
core@ip-10-250-34-21 ~ $ cat /etc/kubernetes/manifests/kube-apiserver.manifest  | grep -A 6 args
  - args:
    - --address=127.0.0.1
    - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,PodTolerationRestriction
    - --allow-privileged=true
    - --anonymous-auth=false
    - --apiserver-count=3
    - --authorization-mode=RBAC
```
This commit is contained in:
Kubernetes Submit Queue 2017-09-27 09:55:48 -07:00 committed by GitHub
commit 383a37ad16
16 changed files with 200 additions and 224 deletions

View File

@ -19,6 +19,7 @@ package model
import ( import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"sort"
"strconv" "strconv"
"k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
@ -143,3 +144,30 @@ func buildPrivateKeyRequest(c *fi.ModelBuilderContext, b *NodeupModelContext, na
return nil return nil
} }
// sortedStrings is just a one liner helper methods
func sortedStrings(list []string) []string {
sort.Strings(list)
return list
}
// addHostPathMapping is shorthand for mapping a host path into a container
func addHostPathMapping(pod *v1.Pod, container *v1.Container, name, path string) *v1.VolumeMount {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: true,
})
return &container.VolumeMounts[len(container.VolumeMounts)-1]
}

View File

@ -21,15 +21,16 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/kubeconfig" "k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
) )
const PathAuthnConfig = "/etc/kubernetes/authn.config" const PathAuthnConfig = "/etc/kubernetes/authn.config"
@ -47,8 +48,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return nil return nil
} }
err := b.writeAuthenticationConfig(c) if err := b.writeAuthenticationConfig(c); err != nil {
if err != nil {
return err return err
} }
@ -80,12 +80,11 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling manifest to yaml: %v", err) return fmt.Errorf("error marshalling manifest to yaml: %v", err)
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-apiserver.manifest", Path: "/etc/kubernetes/manifests/kube-apiserver.manifest",
Contents: fi.NewBytesResource(manifest), Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
} })
c.AddTask(t)
} }
// @check if we are using secure client certificates for kubelet and grab the certificates // @check if we are using secure client certificates for kubelet and grab the certificates
@ -99,17 +98,13 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
} }
} }
// Touch log file, so that docker doesn't create a directory instead c.AddTask(&nodetasks.File{
{ Path: "/var/log/kube-apiserver.log",
t := &nodetasks.File{ Contents: fi.NewStringResource(""),
Path: "/var/log/kube-apiserver.log", Type: nodetasks.FileType_File,
Contents: fi.NewStringResource(""), Mode: s("0400"),
Type: nodetasks.FileType_File, IfNotExists: true,
Mode: s("0400"), })
IfNotExists: true,
}
c.AddTask(t)
}
return nil return nil
} }
@ -150,12 +145,11 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
return fmt.Errorf("error marshalling authentication config to yaml: %v", err) return fmt.Errorf("error marshalling authentication config to yaml: %v", err)
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: PathAuthnConfig, Path: PathAuthnConfig,
Contents: fi.NewBytesResource(manifest), Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
} })
c.AddTask(t)
return nil return nil
} }
@ -195,18 +189,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
} }
// build the kube-apiserver flags for the service // build the kube-apiserver flags for the service
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer) flags, err := flagbuilder.BuildFlagsList(b.Cluster.Spec.KubeAPIServer)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err) return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
} }
// Add cloud config file if needed // add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil { if b.Cluster.Spec.CloudConfig != nil {
flags += " --cloud-config=" + CloudConfigFilePath flags = append(flags, fmt.Sprintf("--cloud-config=%s", CloudConfigFilePath))
}
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-apiserver " + flags + " 1>>/var/log/kube-apiserver.log 2>&1",
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -242,12 +232,11 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{ container := &v1.Container{
Name: "kube-apiserver", Name: "kube-apiserver",
Image: b.Cluster.Spec.KubeAPIServer.Image, Image: b.Cluster.Spec.KubeAPIServer.Image,
Resources: v1.ResourceRequirements{ Command: []string{
Requests: v1.ResourceList{ "/bin/sh", "-c",
v1.ResourceCPU: resource.MustParse("150m"), "/usr/local/bin/kube-apiserver " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-apiserver.log",
},
}, },
Command: redirectCommand, Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{ LivenessProbe: &v1.Probe{
Handler: v1.Handler{ Handler: v1.Handler{
HTTPGet: probeAction, HTTPGet: probeAction,
@ -267,7 +256,11 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
HostPort: 8080, HostPort: 8080,
}, },
}, },
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy), Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("150m"),
},
},
} }
for _, path := range b.SSLHostPaths() { for _, path := range b.SSLHostPaths() {
@ -275,6 +268,8 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, name, path) addHostPathMapping(pod, container, name, path)
} }
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log").ReadOnly = false
// Add cloud config file if needed // Add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil { if b.Cluster.Spec.CloudConfig != nil {
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath) addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath)
@ -290,8 +285,6 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy) addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy)
} }
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log").ReadOnly = false
auditLogPath := b.Cluster.Spec.KubeAPIServer.AuditLogPath auditLogPath := b.Cluster.Spec.KubeAPIServer.AuditLogPath
if auditLogPath != nil { if auditLogPath != nil {
// Mount the directory of the path instead, as kube-apiserver rotates the log by renaming the file. // Mount the directory of the path instead, as kube-apiserver rotates the log by renaming the file.
@ -312,30 +305,12 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
return pod, nil return pod, nil
} }
func addHostPathMapping(pod *v1.Pod, container *v1.Container, name string, path string) *v1.VolumeMount {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: true,
})
return &container.VolumeMounts[len(container.VolumeMounts)-1]
}
func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string { func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string {
annotations := make(map[string]string) annotations := make(map[string]string)
annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName
if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.DNS != nil { if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.DNS != nil {
annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.MasterPublicName annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.MasterPublicName
} }
return annotations return annotations
} }

View File

@ -21,13 +21,14 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
) )
// KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment) // KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment)
@ -37,13 +38,14 @@ type KubeControllerManagerBuilder struct {
var _ fi.ModelBuilder = &KubeControllerManagerBuilder{} var _ fi.ModelBuilder = &KubeControllerManagerBuilder{}
// Build is responsible for configuring the kube-controller-manager
func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error { func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster { if !b.IsMaster {
return nil return nil
} }
// If we're using the CertificateSigner, include the CA Key // If we're using the CertificateSigner, include the CA Key
// TODO: use a per-machine key? use KMS? // @TODO: use a per-machine key? use KMS?
if b.useCertificateSigner() { if b.useCertificateSigner() {
ca, err := b.KeyStore.PrivateKey(fi.CertificateId_CA, false) ca, err := b.KeyStore.PrivateKey(fi.CertificateId_CA, false)
if err != nil { if err != nil {
@ -55,12 +57,11 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
return err return err
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "ca.key"), Path: filepath.Join(b.PathSrvKubernetes(), "ca.key"),
Contents: fi.NewStringResource(serialized), Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
} })
c.AddTask(t)
} }
{ {
@ -82,33 +83,29 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t) c.AddTask(t)
} }
// Add kubeconfig
{ {
// TODO: Change kubeconfig to be https c.AddTask(&nodetasks.File{
kubeconfig, err := b.buildPKIKubeconfig("kube-controller-manager")
if err != nil {
return err
}
t := &nodetasks.File{
Path: "/var/lib/kube-controller-manager/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
}
c.AddTask(t)
}
// Touch log file, so that docker doesn't create a directory instead
{
t := &nodetasks.File{
Path: "/var/log/kube-controller-manager.log", Path: "/var/log/kube-controller-manager.log",
Contents: fi.NewStringResource(""), Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
Mode: s("0400"), Mode: s("0400"),
IfNotExists: true, IfNotExists: true,
})
}
// Add kubeconfig
{
// @TODO: Change kubeconfig to be https
kubeconfig, err := b.buildPKIKubeconfig("kube-controller-manager")
if err != nil {
return err
} }
c.AddTask(t) c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-controller-manager/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
} }
return nil return nil
@ -121,32 +118,27 @@ func (b *KubeControllerManagerBuilder) useCertificateSigner() bool {
func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) { func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
kcm := b.Cluster.Spec.KubeControllerManager kcm := b.Cluster.Spec.KubeControllerManager
kcm.RootCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt") kcm.RootCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kcm.ServiceAccountPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key") kcm.ServiceAccountPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
flags, err := flagbuilder.BuildFlags(kcm) flags, err := flagbuilder.BuildFlagsList(kcm)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err) return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err)
} }
// Add cloud config file if needed // Add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil { if b.Cluster.Spec.CloudConfig != nil {
flags += " --cloud-config=" + CloudConfigFilePath flags = append(flags, "--cloud-config="+CloudConfigFilePath)
} }
// Add kubeconfig flag // Add kubeconfig flag
flags += " --kubeconfig=" + "/var/lib/kube-controller-manager/kubeconfig" flags = append(flags, "--kubeconfig="+"/var/lib/kube-controller-manager/kubeconfig")
// Configure CA certificate to be used to sign keys, if we are using CSRs // Configure CA certificate to be used to sign keys, if we are using CSRs
if b.useCertificateSigner() { if b.useCertificateSigner() {
flags += " --cluster-signing-cert-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.crt") flags = append(flags, []string{
flags += " --cluster-signing-key-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.key") "--cluster-signing-cert-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.crt"),
} "--cluster-signing-key-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.key")}...)
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-controller-manager " + flags + " 1>>/var/log/kube-controller-manager.log 2>&1",
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -169,12 +161,11 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{ container := &v1.Container{
Name: "kube-controller-manager", Name: "kube-controller-manager",
Image: b.Cluster.Spec.KubeControllerManager.Image, Image: b.Cluster.Spec.KubeControllerManager.Image,
Resources: v1.ResourceRequirements{ Command: []string{
Requests: v1.ResourceList{ "/bin/sh", "-c",
v1.ResourceCPU: resource.MustParse("100m"), "/usr/local/bin/kube-controller-manager " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-controller-manager.log",
},
}, },
Command: redirectCommand, Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{ LivenessProbe: &v1.Probe{
Handler: v1.Handler{ Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
@ -186,12 +177,15 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
InitialDelaySeconds: 15, InitialDelaySeconds: 15,
TimeoutSeconds: 15, TimeoutSeconds: 15,
}, },
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy), Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
} }
for _, path := range b.SSLHostPaths() { for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1) name := strings.Replace(path, "/", "", -1)
addHostPathMapping(pod, container, name, path) addHostPathMapping(pod, container, name, path)
} }

View File

@ -18,14 +18,17 @@ package model
import ( import (
"fmt" "fmt"
"github.com/golang/glog" "strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/dns" "k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
) )
// KubeProxyBuilder installs kube-proxy // KubeProxyBuilder installs kube-proxy
@ -35,6 +38,8 @@ type KubeProxyBuilder struct {
var _ fi.ModelBuilder = &KubeAPIServerBuilder{} var _ fi.ModelBuilder = &KubeAPIServerBuilder{}
// Build is responsible for building the kube-proxy manifest
// @TODO we should probaby change this to a daemonset in the future and follow the kubeadm path
func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error { func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
if b.IsMaster { if b.IsMaster {
// If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc) // If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc)
@ -56,44 +61,40 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling manifest to yaml: %v", err) return fmt.Errorf("error marshalling manifest to yaml: %v", err)
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-proxy.manifest", Path: "/etc/kubernetes/manifests/kube-proxy.manifest",
Contents: fi.NewBytesResource(manifest), Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
} })
c.AddTask(t)
} }
// Add kubeconfig
{ {
kubeconfig, err := b.buildPKIKubeconfig("kube-proxy") kubeconfig, err := b.buildPKIKubeconfig("kube-proxy")
if err != nil { if err != nil {
return err return err
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-proxy/kubeconfig", Path: "/var/lib/kube-proxy/kubeconfig",
Contents: fi.NewStringResource(kubeconfig), Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
Mode: s("0400"), Mode: s("0400"),
} })
c.AddTask(t)
} }
// Touch log file, so that docker doesn't create a directory instead
{ {
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/var/log/kube-proxy.log", Path: "/var/log/kube-proxy.log",
Contents: fi.NewStringResource(""), Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
Mode: s("0400"), Mode: s("0400"),
IfNotExists: true, IfNotExists: true,
} })
c.AddTask(t)
} }
return nil return nil
} }
// buildPod is responsble constructing the pod spec
func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) { func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
c := b.Cluster.Spec.KubeProxy c := b.Cluster.Spec.KubeProxy
if c == nil { if c == nil {
@ -120,45 +121,39 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
return nil, fmt.Errorf("Error parsing CPURequest=%q", c.CPURequest) return nil, fmt.Errorf("Error parsing CPURequest=%q", c.CPURequest)
} }
flags, err := flagbuilder.BuildFlags(c) flags, err := flagbuilder.BuildFlagsList(c)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kubeproxy flags: %v", err) return nil, fmt.Errorf("error building kubeproxy flags: %v", err)
} }
image := c.Image image := c.Image
cmd := "echo -998 > /proc/$$$/oom_score_adj && kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --conntrack-max-per-core=131072 --resource-container=\"\" "
cmd += flags
// cmd += " 1>>/var/log/kube-proxy.log 2>&1" flags = append(flags, []string{
cmd += " 2>&1 | /usr/bin/tee /var/log/kube-proxy.log" "--conntrack-max-per-core=131072",
"--kubeconfig=/var/lib/kube-proxy/kubeconfig",
glog.V(2).Infof("built kube-proxy command: %s", cmd) "--oom-score-adj=-998",
`--resource-container=""`}...)
command := []string{
"/bin/sh", "-c",
cmd,
}
container := &v1.Container{ container := &v1.Container{
Name: "kube-proxy", Name: "kube-proxy",
Image: image, Image: image,
ImagePullPolicy: v1.PullIfNotPresent, Command: []string{
Command: command, "/bin/sh", "-c",
SecurityContext: &v1.SecurityContext{ "/usr/local/bin/kube-proxy " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /usr/bin/tee -a /var/log/kube-proxy.log",
Privileged: fi.Bool(true),
}, },
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
"cpu": cpuRequest, "cpu": cpuRequest,
}, },
}, },
SecurityContext: &v1.SecurityContext{
Privileged: fi.Bool(true),
},
} }
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy", Name: "kube-proxy",
Namespace: "kube-system", Namespace: "kube-system",
Labels: map[string]string{ Labels: map[string]string{
"k8s-app": "kube-proxy", "k8s-app": "kube-proxy",
"tier": "node", "tier": "node",
@ -179,12 +174,8 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
} }
{ {
// Map /var/log to host
varlog := addHostPathMapping(pod, container, "varlog", "/var/log")
varlog.ReadOnly = false
// Map kubeconfig from host
addHostPathMapping(pod, container, "kubeconfig", "/var/lib/kube-proxy/kubeconfig") addHostPathMapping(pod, container, "kubeconfig", "/var/lib/kube-proxy/kubeconfig")
addHostPathMapping(pod, container, "logfile", "/var/log/kube-proxy.log").ReadOnly = false
// Map SSL certs from host: /usr/share/ca-certificates -> /etc/ssl/certs // Map SSL certs from host: /usr/share/ca-certificates -> /etc/ssl/certs
sslCertsHost := addHostPathMapping(pod, container, "ssl-certs-hosts", "/usr/share/ca-certificates") sslCertsHost := addHostPathMapping(pod, container, "ssl-certs-hosts", "/usr/share/ca-certificates")

View File

@ -18,13 +18,16 @@ package model
import ( import (
"fmt" "fmt"
"strings"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
) )
// KubeSchedulerBuilder install kube-scheduler // KubeSchedulerBuilder install kube-scheduler
@ -34,6 +37,7 @@ type KubeSchedulerBuilder struct {
var _ fi.ModelBuilder = &KubeSchedulerBuilder{} var _ fi.ModelBuilder = &KubeSchedulerBuilder{}
// Build is responsible for building the manifest for the kube-scheduler
func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster { if !b.IsMaster {
return nil return nil
@ -50,58 +54,50 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling pod to yaml: %v", err) return fmt.Errorf("error marshalling pod to yaml: %v", err)
} }
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-scheduler.manifest", Path: "/etc/kubernetes/manifests/kube-scheduler.manifest",
Contents: fi.NewBytesResource(manifest), Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
} })
c.AddTask(t)
} }
// Add kubeconfig
{ {
kubeconfig, err := b.buildPKIKubeconfig("kube-scheduler") kubeconfig, err := b.buildPKIKubeconfig("kube-scheduler")
if err != nil { if err != nil {
return err return err
} }
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-scheduler/kubeconfig", Path: "/var/lib/kube-scheduler/kubeconfig",
Contents: fi.NewStringResource(kubeconfig), Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
Mode: s("0400"), Mode: s("0400"),
} })
c.AddTask(t)
} }
// Touch log file, so that docker doesn't create a directory instead
{ {
t := &nodetasks.File{ c.AddTask(&nodetasks.File{
Path: "/var/log/kube-scheduler.log", Path: "/var/log/kube-scheduler.log",
Contents: fi.NewStringResource(""), Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
Mode: s("0400"), Mode: s("0400"),
IfNotExists: true, IfNotExists: true,
} })
c.AddTask(t)
} }
return nil return nil
} }
// buildPod is responsible for constructing the pod specification
func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) { func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
c := b.Cluster.Spec.KubeScheduler c := b.Cluster.Spec.KubeScheduler
flags, err := flagbuilder.BuildFlags(c) flags, err := flagbuilder.BuildFlagsList(c)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kube-scheduler flags: %v", err) return nil, fmt.Errorf("error building kube-scheduler flags: %v", err)
} }
// Add kubeconfig flag // Add kubeconfig flag
flags += " --kubeconfig=" + "/var/lib/kube-scheduler/kubeconfig" flags = append(flags, "--kubeconfig="+"/var/lib/kube-scheduler/kubeconfig")
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-scheduler " + flags + " 1>>/var/log/kube-scheduler.log 2>&1",
}
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -123,12 +119,11 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{ container := &v1.Container{
Name: "kube-scheduler", Name: "kube-scheduler",
Image: c.Image, Image: c.Image,
Resources: v1.ResourceRequirements{ Command: []string{
Requests: v1.ResourceList{ "/bin/sh", "-c",
v1.ResourceCPU: resource.MustParse("100m"), "/usr/local/bin/kube-scheduler " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-scheduler.log",
},
}, },
Command: redirectCommand, Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{ LivenessProbe: &v1.Probe{
Handler: v1.Handler{ Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
@ -140,11 +135,13 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
InitialDelaySeconds: 15, InitialDelaySeconds: 15,
TimeoutSeconds: 15, TimeoutSeconds: 15,
}, },
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy), Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
} }
addHostPathMapping(pod, container, "varlibkubescheduler", "/var/lib/kube-scheduler") addHostPathMapping(pod, container, "varlibkubescheduler", "/var/lib/kube-scheduler")
addHostPathMapping(pod, container, "logfile", "/var/log/kube-scheduler.log").ReadOnly = false addHostPathMapping(pod, container, "logfile", "/var/log/kube-scheduler.log").ReadOnly = false
pod.Spec.Containers = append(pod.Spec.Containers, *container) pod.Spec.Containers = append(pod.Spec.Containers, *container)

View File

@ -18,12 +18,13 @@ package model
import ( import (
"fmt" "fmt"
"strings"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros" "k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops/util" "k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"strings"
) )
// LogrotateBuilder installs logrotate.d and configures log rotation for kubernetes logs // LogrotateBuilder installs logrotate.d and configures log rotation for kubernetes logs

View File

@ -61,8 +61,7 @@ type ClusterSpec struct {
MasterPublicName string `json:"masterPublicName,omitempty"` MasterPublicName string `json:"masterPublicName,omitempty"`
// MasterInternalName is the internal DNS name for the master nodes // MasterInternalName is the internal DNS name for the master nodes
MasterInternalName string `json:"masterInternalName,omitempty"` MasterInternalName string `json:"masterInternalName,omitempty"`
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s // NetworkCIDR is used by the AWS VPC / GCE Network, or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// This is a real CIDR, not the internal k8s network
NetworkCIDR string `json:"networkCIDR,omitempty"` NetworkCIDR string `json:"networkCIDR,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC) // NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"` NetworkID string `json:"networkID,omitempty"`
@ -144,11 +143,8 @@ type ClusterSpec struct {
CloudLabels map[string]string `json:"cloudLabels,omitempty"` CloudLabels map[string]string `json:"cloudLabels,omitempty"`
// Hooks for custom actions e.g. on first installation // Hooks for custom actions e.g. on first installation
Hooks []HookSpec `json:"hooks,omitempty"` Hooks []HookSpec `json:"hooks,omitempty"`
// Alternative locations for files and containers // Assets is alternative locations for files and containers; the API under construction, will remove this comment once this API is fully functional.
// This API component is under construction, will remove this comment
// once this API is fully functional.
Assets *Assets `json:"assets,omitempty"` Assets *Assets `json:"assets,omitempty"`
// IAM field adds control over the IAM security policies applied to resources // IAM field adds control over the IAM security policies applied to resources
IAM *IAMSpec `json:"iam,omitempty"` IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig controls if encryption is enabled // EncryptionConfig controls if encryption is enabled
@ -169,9 +165,12 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"` IsBase64 bool `json:"isBase64,omitempty"`
} }
// Assets defined the privately hosted assets
type Assets struct { type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"` ContainerRegistry *string `json:"containerRegistry,omitempty"`
FileRepository *string `json:"fileRepository,omitempty"` // FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
} }
// IAMSpec adds control over the IAM security policies applied to resources // IAMSpec adds control over the IAM security policies applied to resources

View File

@ -41,7 +41,7 @@ type ClusterList struct {
// ClusterSpec defines the configuration for a cluster // ClusterSpec defines the configuration for a cluster
type ClusterSpec struct { type ClusterSpec struct {
// The Channel we are following // Channel we are following
Channel string `json:"channel,omitempty"` Channel string `json:"channel,omitempty"`
// ConfigBase is the path where we store configuration for the cluster // ConfigBase is the path where we store configuration for the cluster
// This might be different that the location when the cluster spec itself is stored, // This might be different that the location when the cluster spec itself is stored,
@ -165,9 +165,12 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"` IsBase64 bool `json:"isBase64,omitempty"`
} }
// Assets defined the privately hosted assets
type Assets struct { type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"` ContainerRegistry *string `json:"containerRegistry,omitempty"`
FileRepository *string `json:"fileRepository,omitempty"` // FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
} }
// IAMSpec adds control over the IAM security policies applied to resources // IAMSpec adds control over the IAM security policies applied to resources

View File

@ -29,6 +29,7 @@ type Cluster struct {
Spec ClusterSpec `json:"spec,omitempty"` Spec ClusterSpec `json:"spec,omitempty"`
} }
// ClusterList is a list of clusters
type ClusterList struct { type ClusterList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"` metav1.ListMeta `json:"metadata,omitempty"`
@ -36,52 +37,42 @@ type ClusterList struct {
Items []Cluster `json:"items"` Items []Cluster `json:"items"`
} }
// ClusterSpec defines the configuration for a cluster
type ClusterSpec struct { type ClusterSpec struct {
// The Channel we are following // The Channel we are following
Channel string `json:"channel,omitempty"` Channel string `json:"channel,omitempty"`
// ConfigBase is the path where we store configuration for the cluster // ConfigBase is the path where we store configuration for the cluster
// This might be different that the location when the cluster spec itself is stored, // This might be different that the location when the cluster spec itself is stored,
// both because this must be accessible to the cluster, // both because this must be accessible to the cluster,
// and because it might be on a different cloud or storage system (etcd vs S3) // and because it might be on a different cloud or storage system (etcd vs S3)
ConfigBase string `json:"configBase,omitempty"` ConfigBase string `json:"configBase,omitempty"`
// The CloudProvider to use (aws or gce) // The CloudProvider to use (aws or gce)
CloudProvider string `json:"cloudProvider,omitempty"` CloudProvider string `json:"cloudProvider,omitempty"`
// The version of kubernetes to install (optional, and can be a "spec" like stable) // The version of kubernetes to install (optional, and can be a "spec" like stable)
KubernetesVersion string `json:"kubernetesVersion,omitempty"` KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Configuration of subnets we are targeting // Configuration of subnets we are targeting
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"` Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// Project is the cloud project we should use, required on GCE // Project is the cloud project we should use, required on GCE
Project string `json:"project,omitempty"` Project string `json:"project,omitempty"`
// MasterPublicName is the external DNS name for the master nodes // MasterPublicName is the external DNS name for the master nodes
MasterPublicName string `json:"masterPublicName,omitempty"` MasterPublicName string `json:"masterPublicName,omitempty"`
// MasterInternalName is the internal DNS name for the master nodes // MasterInternalName is the internal DNS name for the master nodes
MasterInternalName string `json:"masterInternalName,omitempty"` MasterInternalName string `json:"masterInternalName,omitempty"`
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s // The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network // This is a real CIDR, not the internal k8s network
NetworkCIDR string `json:"networkCIDR,omitempty"` NetworkCIDR string `json:"networkCIDR,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC) // NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"` NetworkID string `json:"networkID,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public // Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough // This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed // to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"` Topology *TopologySpec `json:"topology,omitempty"`
// SecretStore is the VFS path to where secrets are stored // SecretStore is the VFS path to where secrets are stored
SecretStore string `json:"secretStore,omitempty"` SecretStore string `json:"secretStore,omitempty"`
// KeyStore is the VFS path to where SSL keys and certificates are stored // KeyStore is the VFS path to where SSL keys and certificates are stored
KeyStore string `json:"keyStore,omitempty"` KeyStore string `json:"keyStore,omitempty"`
// ConfigStore is the VFS path to where the configuration (Cluster, InstanceGroups etc) is stored // ConfigStore is the VFS path to where the configuration (Cluster, InstanceGroups etc) is stored
ConfigStore string `json:"configStore,omitempty"` ConfigStore string `json:"configStore,omitempty"`
// DNSZone is the DNS zone we should use when configuring DNS // DNSZone is the DNS zone we should use when configuring DNS
// This is because some clouds let us define a managed zone foo.bar, and then have // This is because some clouds let us define a managed zone foo.bar, and then have
// kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone. // kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone.
@ -89,41 +80,26 @@ type ClusterSpec struct {
// Note that DNSZone can either by the host name of the zone (containing dots), // Note that DNSZone can either by the host name of the zone (containing dots),
// or can be an identifier for the zone. // or can be an identifier for the zone.
DNSZone string `json:"dnsZone,omitempty"` DNSZone string `json:"dnsZone,omitempty"`
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local) // ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"` ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
//InstancePrefix string `json:",omitempty"`
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
//ClusterName string `json:",omitempty"`
//ClusterIPRange string `json:",omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services // ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"` ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
//MasterIPRange string `json:",omitempty"` //MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live) // NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange // It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"` NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// SSHAccess determines the permitted access to SSH // SSHAccess determines the permitted access to SSH
// Currently only a single CIDR is supported (though a richer grammar could be added in future)
SSHAccess []string `json:"sshAccess,omitempty"` SSHAccess []string `json:"sshAccess,omitempty"`
// NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767). // NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767).
NodePortAccess []string `json:"nodePortAccess,omitempty"` NodePortAccess []string `json:"nodePortAccess,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy // HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"` EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// SSHKeyName specifies a preexisting SSH key to use // SSHKeyName specifies a preexisting SSH key to use
SSHKeyName string `json:"sshKeyName,omitempty"` SSHKeyName string `json:"sshKeyName,omitempty"`
// KubernetesAPIAccess determines the permitted access to the API endpoints (master HTTPS) // KubernetesAPIAccess determines the permitted access to the API endpoints (master HTTPS)
// Currently only a single CIDR is supported (though a richer grammar could be added in future) // Currently only a single CIDR is supported (though a richer grammar could be added in future)
KubernetesAPIAccess []string `json:"kubernetesApiAccess,omitempty"` KubernetesAPIAccess []string `json:"kubernetesApiAccess,omitempty"`
// IsolatesMasters determines whether we should lock down masters so that they are not on the pod network. // IsolatesMasters determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master // true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true. // if they have hostNetwork=true.
@ -132,19 +108,15 @@ type ClusterSpec struct {
// * run kube-proxy on the master // * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works // * enable debugging handlers on the master, so kubectl logs works
IsolateMasters *bool `json:"isolateMasters,omitempty"` IsolateMasters *bool `json:"isolateMasters,omitempty"`
// UpdatePolicy determines the policy for applying upgrades automatically. // UpdatePolicy determines the policy for applying upgrades automatically.
// Valid values: // Valid values:
// 'external' do not apply updates automatically - they are applied manually or by an external system // 'external' do not apply updates automatically - they are applied manually or by an external system
// missing: default policy (currently OS security upgrades that do not require a reboot) // missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"` UpdatePolicy *string `json:"updatePolicy,omitempty"`
// Additional policies to add for roles // Additional policies to add for roles
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"` AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
// A collection of files assets for deployed cluster wide // A collection of files assets for deployed cluster wide
FileAssets []FileAssetSpec `json:"fileAssets,omitempty"` FileAssets []FileAssetSpec `json:"fileAssets,omitempty"`
// EtcdClusters stores the configuration for each cluster // EtcdClusters stores the configuration for each cluster
EtcdClusters []*EtcdClusterSpec `json:"etcdClusters,omitempty"` EtcdClusters []*EtcdClusterSpec `json:"etcdClusters,omitempty"`
@ -174,7 +146,6 @@ type ClusterSpec struct {
Hooks []HookSpec `json:"hooks,omitempty"` Hooks []HookSpec `json:"hooks,omitempty"`
// Alternative locations for files and containers // Alternative locations for files and containers
Assets *Assets `json:"assets,omitempty"` Assets *Assets `json:"assets,omitempty"`
// IAM field adds control over the IAM security policies applied to resources // IAM field adds control over the IAM security policies applied to resources
IAM *IAMSpec `json:"iam,omitempty"` IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig holds the encryption config // EncryptionConfig holds the encryption config
@ -195,9 +166,12 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"` IsBase64 bool `json:"isBase64,omitempty"`
} }
// Assets defined the privately hosted assets
type Assets struct { type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"` ContainerRegistry *string `json:"containerRegistry,omitempty"`
FileRepository *string `json:"fileRepository,omitempty"` // FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
} }
// IAMSpec adds control over the IAM security policies applied to resources // IAMSpec adds control over the IAM security policies applied to resources

View File

@ -29,8 +29,19 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
) )
// BuildFlags builds flag arguments based on "flag" tags on the structure // BuildFlags returns a space seperated list arguments
// @deprecated: please use BuildFlagsList
func BuildFlags(options interface{}) (string, error) { func BuildFlags(options interface{}) (string, error) {
flags, err := BuildFlagsList(options)
if err != nil {
return "", err
}
return strings.Join(flags, " "), nil
}
// BuildFlagsList reflects the options interface and extracts the flags from struct tags
func BuildFlagsList(options interface{}) ([]string, error) {
var flags []string var flags []string
walker := func(path string, field *reflect.StructField, val reflect.Value) error { walker := func(path string, field *reflect.StructField, val reflect.Value) error {
@ -171,20 +182,20 @@ func BuildFlags(options interface{}) (string, error) {
} }
default: default:
return fmt.Errorf("BuildFlags of value type not handled: %T %s=%v", v, path, v) return fmt.Errorf("BuildFlagsList of value type not handled: %T %s=%v", v, path, v)
} }
if flag != "" { if flag != "" {
flags = append(flags, flag) flags = append(flags, flag)
} }
// Nothing more to do here
return utils.SkipReflection return utils.SkipReflection
} }
err := utils.ReflectRecursive(reflect.ValueOf(options), walker) err := utils.ReflectRecursive(reflect.ValueOf(options), walker)
if err != nil { if err != nil {
return "", err return nil, fmt.Errorf("BuildFlagsList to reflect value: %s", err)
} }
// Sort so that the order is stable across runs // Sort so that the order is stable across runs
sort.Strings(flags) sort.Strings(flags)
return strings.Join(flags, " "), nil return flags, nil
} }

View File

@ -17,11 +17,12 @@ limitations under the License.
package flagbuilder package flagbuilder
import ( import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"testing"
"time"
) )
func stringPointer(s string) *string { func stringPointer(s string) *string {

View File

@ -43,7 +43,9 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
v1.ResourceCPU: c.CPURequest, v1.ResourceCPU: c.CPURequest,
}, },
}, },
Command: []string{"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log"}, Command: []string{
"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log",
},
} }
// build the the environment variables for etcd service // build the the environment variables for etcd service
container.Env = buildEtcdEnvironmentOptions(c) container.Env = buildEtcdEnvironmentOptions(c)

View File

@ -33,7 +33,7 @@ spec:
- command: - command:
- /bin/sh - /bin/sh
- -c - -c
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log - /usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log
env: env:
- name: ETCD_NAME - name: ETCD_NAME
value: node0 value: node0

View File

@ -39,7 +39,7 @@ spec:
- command: - command:
- /bin/sh - /bin/sh
- -c - -c
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log - /usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log
env: env:
- name: ETCD_NAME - name: ETCD_NAME
value: node0 value: node0