Merge pull request #3229 from gambol99/manifest_files

Automatic merge from submit-queue.

Manifest files

The current implementation uses flags embedded into the command line rather than argument which is more consistent with other manifests and kubeadm. Logging currently pushes all the logs into the host, which isn't really ideal with an ephemeral host model, users should be pushed to implement a proper remote logging stack to retain logs not ssh to boxes.

- changed the manifest to use arguments rather than option flags
- changed the logging to use journal rather than relying on host logging files

```shell
core@ip-10-250-34-21 ~ $ cat /etc/kubernetes/manifests/kube-apiserver.manifest  | grep -A 6 args
  - args:
    - --address=127.0.0.1
    - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,PodTolerationRestriction
    - --allow-privileged=true
    - --anonymous-auth=false
    - --apiserver-count=3
    - --authorization-mode=RBAC
```
This commit is contained in:
Kubernetes Submit Queue 2017-09-27 09:55:48 -07:00 committed by GitHub
commit 383a37ad16
16 changed files with 200 additions and 224 deletions

View File

@ -19,6 +19,7 @@ package model
import (
"fmt"
"path/filepath"
"sort"
"strconv"
"k8s.io/client-go/pkg/api/v1"
@ -143,3 +144,30 @@ func buildPrivateKeyRequest(c *fi.ModelBuilderContext, b *NodeupModelContext, na
return nil
}
// sortedStrings is just a one liner helper methods
func sortedStrings(list []string) []string {
sort.Strings(list)
return list
}
// addHostPathMapping is shorthand for mapping a host path into a container
func addHostPathMapping(pod *v1.Pod, container *v1.Container, name, path string) *v1.VolumeMount {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: true,
})
return &container.VolumeMounts[len(container.VolumeMounts)-1]
}

View File

@ -21,15 +21,16 @@ import (
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
)
const PathAuthnConfig = "/etc/kubernetes/authn.config"
@ -47,8 +48,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
err := b.writeAuthenticationConfig(c)
if err != nil {
if err := b.writeAuthenticationConfig(c); err != nil {
return err
}
@ -80,12 +80,11 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling manifest to yaml: %v", err)
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-apiserver.manifest",
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
})
}
// @check if we are using secure client certificates for kubelet and grab the certificates
@ -99,17 +98,13 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
}
}
// Touch log file, so that docker doesn't create a directory instead
{
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/log/kube-apiserver.log",
Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File,
Mode: s("0400"),
IfNotExists: true,
}
c.AddTask(t)
}
})
return nil
}
@ -150,12 +145,11 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
return fmt.Errorf("error marshalling authentication config to yaml: %v", err)
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: PathAuthnConfig,
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
})
return nil
}
@ -195,18 +189,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
}
// build the kube-apiserver flags for the service
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer)
flags, err := flagbuilder.BuildFlagsList(b.Cluster.Spec.KubeAPIServer)
if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
}
// Add cloud config file if needed
// add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil {
flags += " --cloud-config=" + CloudConfigFilePath
}
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-apiserver " + flags + " 1>>/var/log/kube-apiserver.log 2>&1",
flags = append(flags, fmt.Sprintf("--cloud-config=%s", CloudConfigFilePath))
}
pod := &v1.Pod{
@ -242,12 +232,11 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{
Name: "kube-apiserver",
Image: b.Cluster.Spec.KubeAPIServer.Image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("150m"),
Command: []string{
"/bin/sh", "-c",
"/usr/local/bin/kube-apiserver " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-apiserver.log",
},
},
Command: redirectCommand,
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: probeAction,
@ -267,7 +256,11 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
HostPort: 8080,
},
},
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("150m"),
},
},
}
for _, path := range b.SSLHostPaths() {
@ -275,6 +268,8 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, name, path)
}
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log").ReadOnly = false
// Add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil {
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath)
@ -290,8 +285,6 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy)
}
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log").ReadOnly = false
auditLogPath := b.Cluster.Spec.KubeAPIServer.AuditLogPath
if auditLogPath != nil {
// Mount the directory of the path instead, as kube-apiserver rotates the log by renaming the file.
@ -312,30 +305,12 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
return pod, nil
}
func addHostPathMapping(pod *v1.Pod, container *v1.Container, name string, path string) *v1.VolumeMount {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: true,
})
return &container.VolumeMounts[len(container.VolumeMounts)-1]
}
func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string {
annotations := make(map[string]string)
annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName
if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.DNS != nil {
annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.MasterPublicName
}
return annotations
}

View File

@ -21,13 +21,14 @@ import (
"path/filepath"
"strings"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment)
@ -37,13 +38,14 @@ type KubeControllerManagerBuilder struct {
var _ fi.ModelBuilder = &KubeControllerManagerBuilder{}
// Build is responsible for configuring the kube-controller-manager
func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
// If we're using the CertificateSigner, include the CA Key
// TODO: use a per-machine key? use KMS?
// @TODO: use a per-machine key? use KMS?
if b.useCertificateSigner() {
ca, err := b.KeyStore.PrivateKey(fi.CertificateId_CA, false)
if err != nil {
@ -55,12 +57,11 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "ca.key"),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
})
}
{
@ -82,33 +83,29 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
// Add kubeconfig
{
// TODO: Change kubeconfig to be https
kubeconfig, err := b.buildPKIKubeconfig("kube-controller-manager")
if err != nil {
return err
}
t := &nodetasks.File{
Path: "/var/lib/kube-controller-manager/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
}
c.AddTask(t)
}
// Touch log file, so that docker doesn't create a directory instead
{
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/log/kube-controller-manager.log",
Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File,
Mode: s("0400"),
IfNotExists: true,
})
}
c.AddTask(t)
// Add kubeconfig
{
// @TODO: Change kubeconfig to be https
kubeconfig, err := b.buildPKIKubeconfig("kube-controller-manager")
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-controller-manager/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
}
return nil
@ -121,32 +118,27 @@ func (b *KubeControllerManagerBuilder) useCertificateSigner() bool {
func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
kcm := b.Cluster.Spec.KubeControllerManager
kcm.RootCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kcm.ServiceAccountPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
flags, err := flagbuilder.BuildFlags(kcm)
flags, err := flagbuilder.BuildFlagsList(kcm)
if err != nil {
return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err)
}
// Add cloud config file if needed
if b.Cluster.Spec.CloudConfig != nil {
flags += " --cloud-config=" + CloudConfigFilePath
flags = append(flags, "--cloud-config="+CloudConfigFilePath)
}
// Add kubeconfig flag
flags += " --kubeconfig=" + "/var/lib/kube-controller-manager/kubeconfig"
flags = append(flags, "--kubeconfig="+"/var/lib/kube-controller-manager/kubeconfig")
// Configure CA certificate to be used to sign keys, if we are using CSRs
if b.useCertificateSigner() {
flags += " --cluster-signing-cert-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.crt")
flags += " --cluster-signing-key-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.key")
}
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-controller-manager " + flags + " 1>>/var/log/kube-controller-manager.log 2>&1",
flags = append(flags, []string{
"--cluster-signing-cert-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.crt"),
"--cluster-signing-key-file=" + filepath.Join(b.PathSrvKubernetes(), "ca.key")}...)
}
pod := &v1.Pod{
@ -169,12 +161,11 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{
Name: "kube-controller-manager",
Image: b.Cluster.Spec.KubeControllerManager.Image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
Command: []string{
"/bin/sh", "-c",
"/usr/local/bin/kube-controller-manager " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-controller-manager.log",
},
},
Command: redirectCommand,
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
@ -186,12 +177,15 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
},
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
}
for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1)
addHostPathMapping(pod, container, name, path)
}

View File

@ -18,14 +18,17 @@ package model
import (
"fmt"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
"strings"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
)
// KubeProxyBuilder installs kube-proxy
@ -35,6 +38,8 @@ type KubeProxyBuilder struct {
var _ fi.ModelBuilder = &KubeAPIServerBuilder{}
// Build is responsible for building the kube-proxy manifest
// @TODO we should probaby change this to a daemonset in the future and follow the kubeadm path
func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
if b.IsMaster {
// If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc)
@ -56,44 +61,40 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling manifest to yaml: %v", err)
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-proxy.manifest",
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
})
}
// Add kubeconfig
{
kubeconfig, err := b.buildPKIKubeconfig("kube-proxy")
if err != nil {
return err
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-proxy/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
}
c.AddTask(t)
})
}
// Touch log file, so that docker doesn't create a directory instead
{
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/log/kube-proxy.log",
Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File,
Mode: s("0400"),
IfNotExists: true,
}
c.AddTask(t)
})
}
return nil
}
// buildPod is responsble constructing the pod spec
func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
c := b.Cluster.Spec.KubeProxy
if c == nil {
@ -120,45 +121,39 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
return nil, fmt.Errorf("Error parsing CPURequest=%q", c.CPURequest)
}
flags, err := flagbuilder.BuildFlags(c)
flags, err := flagbuilder.BuildFlagsList(c)
if err != nil {
return nil, fmt.Errorf("error building kubeproxy flags: %v", err)
}
image := c.Image
cmd := "echo -998 > /proc/$$$/oom_score_adj && kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --conntrack-max-per-core=131072 --resource-container=\"\" "
cmd += flags
// cmd += " 1>>/var/log/kube-proxy.log 2>&1"
cmd += " 2>&1 | /usr/bin/tee /var/log/kube-proxy.log"
glog.V(2).Infof("built kube-proxy command: %s", cmd)
command := []string{
"/bin/sh", "-c",
cmd,
}
flags = append(flags, []string{
"--conntrack-max-per-core=131072",
"--kubeconfig=/var/lib/kube-proxy/kubeconfig",
"--oom-score-adj=-998",
`--resource-container=""`}...)
container := &v1.Container{
Name: "kube-proxy",
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: fi.Bool(true),
Command: []string{
"/bin/sh", "-c",
"/usr/local/bin/kube-proxy " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /usr/bin/tee -a /var/log/kube-proxy.log",
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": cpuRequest,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: fi.Bool(true),
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "kube-proxy",
"tier": "node",
@ -179,12 +174,8 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
}
{
// Map /var/log to host
varlog := addHostPathMapping(pod, container, "varlog", "/var/log")
varlog.ReadOnly = false
// Map kubeconfig from host
addHostPathMapping(pod, container, "kubeconfig", "/var/lib/kube-proxy/kubeconfig")
addHostPathMapping(pod, container, "logfile", "/var/log/kube-proxy.log").ReadOnly = false
// Map SSL certs from host: /usr/share/ca-certificates -> /etc/ssl/certs
sslCertsHost := addHostPathMapping(pod, container, "ssl-certs-hosts", "/usr/share/ca-certificates")

View File

@ -18,13 +18,16 @@ package model
import (
"fmt"
"strings"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// KubeSchedulerBuilder install kube-scheduler
@ -34,6 +37,7 @@ type KubeSchedulerBuilder struct {
var _ fi.ModelBuilder = &KubeSchedulerBuilder{}
// Build is responsible for building the manifest for the kube-scheduler
func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
@ -50,58 +54,50 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
return fmt.Errorf("error marshalling pod to yaml: %v", err)
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-scheduler.manifest",
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
})
}
// Add kubeconfig
{
kubeconfig, err := b.buildPKIKubeconfig("kube-scheduler")
if err != nil {
return err
}
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-scheduler/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
}
c.AddTask(t)
})
}
// Touch log file, so that docker doesn't create a directory instead
{
t := &nodetasks.File{
c.AddTask(&nodetasks.File{
Path: "/var/log/kube-scheduler.log",
Contents: fi.NewStringResource(""),
Type: nodetasks.FileType_File,
Mode: s("0400"),
IfNotExists: true,
}
c.AddTask(t)
})
}
return nil
}
// buildPod is responsible for constructing the pod specification
func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
c := b.Cluster.Spec.KubeScheduler
flags, err := flagbuilder.BuildFlags(c)
flags, err := flagbuilder.BuildFlagsList(c)
if err != nil {
return nil, fmt.Errorf("error building kube-scheduler flags: %v", err)
}
// Add kubeconfig flag
flags += " --kubeconfig=" + "/var/lib/kube-scheduler/kubeconfig"
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-scheduler " + flags + " 1>>/var/log/kube-scheduler.log 2>&1",
}
flags = append(flags, "--kubeconfig="+"/var/lib/kube-scheduler/kubeconfig")
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -123,12 +119,11 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
container := &v1.Container{
Name: "kube-scheduler",
Image: c.Image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
Command: []string{
"/bin/sh", "-c",
"/usr/local/bin/kube-scheduler " + strings.Join(sortedStrings(flags), " ") + " 2>&1 | /bin/tee -a /var/log/kube-scheduler.log",
},
},
Command: redirectCommand,
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
@ -140,11 +135,13 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
},
Env: getProxyEnvVars(b.Cluster.Spec.EgressProxy),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
}
addHostPathMapping(pod, container, "varlibkubescheduler", "/var/lib/kube-scheduler")
addHostPathMapping(pod, container, "logfile", "/var/log/kube-scheduler.log").ReadOnly = false
pod.Spec.Containers = append(pod.Spec.Containers, *container)

View File

@ -18,12 +18,13 @@ package model
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"strings"
)
// LogrotateBuilder installs logrotate.d and configures log rotation for kubernetes logs

View File

@ -61,8 +61,7 @@ type ClusterSpec struct {
MasterPublicName string `json:"masterPublicName,omitempty"`
// MasterInternalName is the internal DNS name for the master nodes
MasterInternalName string `json:"masterInternalName,omitempty"`
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network
// NetworkCIDR is used by the AWS VPC / GCE Network, or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
NetworkCIDR string `json:"networkCIDR,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"`
@ -144,11 +143,8 @@ type ClusterSpec struct {
CloudLabels map[string]string `json:"cloudLabels,omitempty"`
// Hooks for custom actions e.g. on first installation
Hooks []HookSpec `json:"hooks,omitempty"`
// Alternative locations for files and containers
// This API component is under construction, will remove this comment
// once this API is fully functional.
// Assets is alternative locations for files and containers; the API under construction, will remove this comment once this API is fully functional.
Assets *Assets `json:"assets,omitempty"`
// IAM field adds control over the IAM security policies applied to resources
IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig controls if encryption is enabled
@ -169,8 +165,11 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"`
}
// Assets defined the privately hosted assets
type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"`
// FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
}

View File

@ -41,7 +41,7 @@ type ClusterList struct {
// ClusterSpec defines the configuration for a cluster
type ClusterSpec struct {
// The Channel we are following
// Channel we are following
Channel string `json:"channel,omitempty"`
// ConfigBase is the path where we store configuration for the cluster
// This might be different that the location when the cluster spec itself is stored,
@ -165,8 +165,11 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"`
}
// Assets defined the privately hosted assets
type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"`
// FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
}

View File

@ -29,6 +29,7 @@ type Cluster struct {
Spec ClusterSpec `json:"spec,omitempty"`
}
// ClusterList is a list of clusters
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
@ -36,52 +37,42 @@ type ClusterList struct {
Items []Cluster `json:"items"`
}
// ClusterSpec defines the configuration for a cluster
type ClusterSpec struct {
// The Channel we are following
Channel string `json:"channel,omitempty"`
// ConfigBase is the path where we store configuration for the cluster
// This might be different that the location when the cluster spec itself is stored,
// both because this must be accessible to the cluster,
// and because it might be on a different cloud or storage system (etcd vs S3)
ConfigBase string `json:"configBase,omitempty"`
// The CloudProvider to use (aws or gce)
CloudProvider string `json:"cloudProvider,omitempty"`
// The version of kubernetes to install (optional, and can be a "spec" like stable)
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Configuration of subnets we are targeting
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// Project is the cloud project we should use, required on GCE
Project string `json:"project,omitempty"`
// MasterPublicName is the external DNS name for the master nodes
MasterPublicName string `json:"masterPublicName,omitempty"`
// MasterInternalName is the internal DNS name for the master nodes
MasterInternalName string `json:"masterInternalName,omitempty"`
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network
NetworkCIDR string `json:"networkCIDR,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"`
// SecretStore is the VFS path to where secrets are stored
SecretStore string `json:"secretStore,omitempty"`
// KeyStore is the VFS path to where SSL keys and certificates are stored
KeyStore string `json:"keyStore,omitempty"`
// ConfigStore is the VFS path to where the configuration (Cluster, InstanceGroups etc) is stored
ConfigStore string `json:"configStore,omitempty"`
// DNSZone is the DNS zone we should use when configuring DNS
// This is because some clouds let us define a managed zone foo.bar, and then have
// kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone.
@ -89,41 +80,26 @@ type ClusterSpec struct {
// Note that DNSZone can either by the host name of the zone (containing dots),
// or can be an identifier for the zone.
DNSZone string `json:"dnsZone,omitempty"`
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
//InstancePrefix string `json:",omitempty"`
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
//ClusterName string `json:",omitempty"`
//ClusterIPRange string `json:",omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
//MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// SSHAccess determines the permitted access to SSH
// Currently only a single CIDR is supported (though a richer grammar could be added in future)
SSHAccess []string `json:"sshAccess,omitempty"`
// NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767).
NodePortAccess []string `json:"nodePortAccess,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// SSHKeyName specifies a preexisting SSH key to use
SSHKeyName string `json:"sshKeyName,omitempty"`
// KubernetesAPIAccess determines the permitted access to the API endpoints (master HTTPS)
// Currently only a single CIDR is supported (though a richer grammar could be added in future)
KubernetesAPIAccess []string `json:"kubernetesApiAccess,omitempty"`
// IsolatesMasters determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true.
@ -132,19 +108,15 @@ type ClusterSpec struct {
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
IsolateMasters *bool `json:"isolateMasters,omitempty"`
// UpdatePolicy determines the policy for applying upgrades automatically.
// Valid values:
// 'external' do not apply updates automatically - they are applied manually or by an external system
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`
// Additional policies to add for roles
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
// A collection of files assets for deployed cluster wide
FileAssets []FileAssetSpec `json:"fileAssets,omitempty"`
// EtcdClusters stores the configuration for each cluster
EtcdClusters []*EtcdClusterSpec `json:"etcdClusters,omitempty"`
@ -174,7 +146,6 @@ type ClusterSpec struct {
Hooks []HookSpec `json:"hooks,omitempty"`
// Alternative locations for files and containers
Assets *Assets `json:"assets,omitempty"`
// IAM field adds control over the IAM security policies applied to resources
IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig holds the encryption config
@ -195,8 +166,11 @@ type FileAssetSpec struct {
IsBase64 bool `json:"isBase64,omitempty"`
}
// Assets defined the privately hosted assets
type Assets struct {
// ContainerRegistry is a url for to a docker registry
ContainerRegistry *string `json:"containerRegistry,omitempty"`
// FileRepository is the url for a private file serving repository
FileRepository *string `json:"fileRepository,omitempty"`
}

View File

@ -29,8 +29,19 @@ import (
"github.com/golang/glog"
)
// BuildFlags builds flag arguments based on "flag" tags on the structure
// BuildFlags returns a space seperated list arguments
// @deprecated: please use BuildFlagsList
func BuildFlags(options interface{}) (string, error) {
flags, err := BuildFlagsList(options)
if err != nil {
return "", err
}
return strings.Join(flags, " "), nil
}
// BuildFlagsList reflects the options interface and extracts the flags from struct tags
func BuildFlagsList(options interface{}) ([]string, error) {
var flags []string
walker := func(path string, field *reflect.StructField, val reflect.Value) error {
@ -171,20 +182,20 @@ func BuildFlags(options interface{}) (string, error) {
}
default:
return fmt.Errorf("BuildFlags of value type not handled: %T %s=%v", v, path, v)
return fmt.Errorf("BuildFlagsList of value type not handled: %T %s=%v", v, path, v)
}
if flag != "" {
flags = append(flags, flag)
}
// Nothing more to do here
return utils.SkipReflection
}
err := utils.ReflectRecursive(reflect.ValueOf(options), walker)
if err != nil {
return "", err
return nil, fmt.Errorf("BuildFlagsList to reflect value: %s", err)
}
// Sort so that the order is stable across runs
sort.Strings(flags)
return strings.Join(flags, " "), nil
return flags, nil
}

View File

@ -17,11 +17,12 @@ limitations under the License.
package flagbuilder
import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"testing"
"time"
)
func stringPointer(s string) *string {

View File

@ -43,7 +43,9 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
v1.ResourceCPU: c.CPURequest,
},
},
Command: []string{"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log"},
Command: []string{
"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log",
},
}
// build the the environment variables for etcd service
container.Env = buildEtcdEnvironmentOptions(c)

View File

@ -33,7 +33,7 @@ spec:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log
- /usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log
env:
- name: ETCD_NAME
value: node0

View File

@ -39,7 +39,7 @@ spec:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log
- /usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log
env:
- name: ETCD_NAME
value: node0