Merge pull request #1523 from justinsb/remove_kubelet

Refactor kubelet to code
This commit is contained in:
Chris Love 2017-01-19 08:10:23 -08:00 committed by GitHub
commit 645bf798b6
15 changed files with 296 additions and 94 deletions

View File

@ -19,10 +19,36 @@ package model
import (
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
)
type NodeupModelContext struct {
Cluster *kops.Cluster
Architecture Architecture
Distribution distros.Distribution
Cluster *kops.Cluster
InstanceGroup *kops.InstanceGroup
Architecture Architecture
Distribution distros.Distribution
IsMaster bool
UsesCNI bool
Assets *fi.AssetStore
KeyStore fi.CAStore
SecretStore fi.SecretStore
}
func (c *NodeupModelContext) SSLHostPaths() []string {
paths := []string{"/etc/ssl", "/etc/pki/tls", "/etc/pki/ca-trust"}
switch c.Distribution {
case distros.DistributionCoreOS:
// Because /usr is read-only on CoreOS, we can't have any new directories; docker will try (and fail) to create them
// TODO: Just check if the directories exist?
paths = append(paths, "/usr/share/ca-certificates")
default:
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
}
return paths
}

View File

@ -18,6 +18,7 @@ package model
import (
"io/ioutil"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/upup/pkg/fi"
@ -55,7 +56,7 @@ func runDockerBuilderTest(t *testing.T, key string) {
nodeUpModelContext := &NodeupModelContext{
Cluster: cluster,
Architecture: "amd64",
Distribution: DistributionXenial,
Distribution: distros.DistributionXenial,
}
builder := DockerBuilder{NodeupModelContext: nodeUpModelContext}

238
nodeup/pkg/model/kubelet.go Normal file
View File

@ -0,0 +1,238 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// KubeletBuilder install kubelet
type KubeletBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &DockerBuilder{}
func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
kubeletConfig, err := b.buildKubeletConfig()
if err != nil {
return fmt.Errorf("error building kubelet config: %v", err)
}
// Add sysconfig file
{
// TODO: Dump this - just complexity!
flags, err := flagbuilder.BuildFlags(kubeletConfig)
if err != nil {
return fmt.Errorf("error building kubelet flags: %v", err)
}
sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n"
t := &nodetasks.File{
Path: "/etc/sysconfig/kubelet",
Contents: fi.NewStringResource(sysconfig),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
// Add kubelet file itself (as an asset)
{
// TODO: Extract to common function?
assetName := "kubelet"
assetPath := ""
asset, err := b.Assets.Find(assetName, assetPath)
if err != nil {
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
}
if asset == nil {
return fmt.Errorf("unable to locate asset %q", assetName)
}
t := &nodetasks.File{
Path: b.kubeletPath(),
Contents: asset,
Type: nodetasks.FileType_File,
Mode: s("0755"),
}
c.AddTask(t)
}
// Add kubeconfig
{
kubeconfig, err := b.buildKubeconfig()
if err != nil {
return err
}
t := &nodetasks.File{
Path: "/var/lib/kubelet/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
}
c.AddTask(t)
}
if b.UsesCNI {
t := &nodetasks.File{
Path: "/etc/cni/net.d/",
Type: nodetasks.FileType_Directory,
}
c.AddTask(t)
}
c.AddTask(b.buildSystemdService())
return nil
}
func (b *KubeletBuilder) kubeletPath() string {
kubeletCommand := "/usr/local/bin/kubelet"
if b.Distribution == distros.DistributionCoreOS {
kubeletCommand = "/opt/kubernetes/bin/kubelet"
}
return kubeletCommand
}
func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
kubeletCommand := b.kubeletPath()
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Kubelet Server")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kubernetes")
manifest.Set("Unit", "After", "docker.service")
manifest.Set("Service", "EnvironmentFile", "/etc/sysconfig/kubelet")
manifest.Set("Service", "ExecStart", kubeletCommand+" \"$DAEMON_ARGS\"")
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Service", "KillMode", "process")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "docker", manifestString)
service := &nodetasks.Service{
Name: "kubelet.service",
Definition: s(manifestString),
}
// To avoid going in to backoff, we wait for protokube to start us
service.Running = fi.Bool(false)
service.InitDefaults()
return service
}
func (b *KubeletBuilder) buildKubeconfig() (string, error) {
caCertificate, err := b.KeyStore.Cert(fi.CertificateId_CA)
if err != nil {
return "", fmt.Errorf("error fetching CA certificate from keystore: %v", err)
}
kubeletCertificate, err := b.KeyStore.Cert("kubelet")
if err != nil {
return "", fmt.Errorf("error fetching kubelet certificate from keystore: %v", err)
}
kubeletPrivateKey, err := b.KeyStore.PrivateKey("kubelet")
if err != nil {
return "", fmt.Errorf("error fetching kubelet private key from keystore: %v", err)
}
user := kubeconfig.KubectlUser{}
user.ClientCertificateData, err = kubeletCertificate.AsBytes()
if err != nil {
return "", fmt.Errorf("error encoding kubelet certificate: %v", err)
}
user.ClientKeyData, err = kubeletPrivateKey.AsBytes()
if err != nil {
return "", fmt.Errorf("error encoding kubelet private key: %v", err)
}
cluster := kubeconfig.KubectlCluster{}
cluster.CertificateAuthorityData, err = caCertificate.AsBytes()
if err != nil {
return "", fmt.Errorf("error encoding CA certificate: %v", err)
}
config := &kubeconfig.KubectlConfig{
ApiVersion: "v1",
Kind: "Config",
Users: []*kubeconfig.KubectlUserWithName{
{
Name: "kubelet",
User: user,
},
},
Clusters: []*kubeconfig.KubectlClusterWithName{
{
Name: "local",
Cluster: cluster,
},
},
Contexts: []*kubeconfig.KubectlContextWithName{
{
Name: "service-account-context",
Context: kubeconfig.KubectlContext{
Cluster: "local",
User: "kubelet",
},
},
},
CurrentContext: "service-account-context",
}
yaml, err := kops.ToRawYaml(config)
if err != nil {
return "", fmt.Errorf("error marshalling kubeconfig to yaml: %v", err)
}
return string(yaml), nil
}
func (b *KubeletBuilder) buildKubeletConfig() (*kops.KubeletConfigSpec, error) {
instanceGroup := b.InstanceGroup
if instanceGroup == nil {
// Old clusters might not have exported instance groups
// in that case we build a synthetic instance group with the information that BuildKubeletConfigSpec needs
// TODO: Remove this once we have a stable release
glog.Warningf("Building a synthetic instance group")
instanceGroup = &kops.InstanceGroup{}
instanceGroup.ObjectMeta.Name = "synthetic"
if b.IsMaster {
instanceGroup.Spec.Role = kops.InstanceGroupRoleMaster
} else {
instanceGroup.Spec.Role = kops.InstanceGroupRoleNode
}
//b.InstanceGroup = instanceGroup
}
kubeletConfigSpec, err := kops.BuildKubeletConfigSpec(b.Cluster, instanceGroup)
if err != nil {
return nil, fmt.Errorf("error building kubelet config: %v", err)
}
// TODO: Memoize if we reuse this
return kubeletConfigSpec, nil
}

View File

@ -1 +0,0 @@
# place holder file to create the default directory for CNI plugins

View File

@ -1 +0,0 @@
DAEMON_ARGS="{{ BuildFlags KubeletConfig }}"

View File

@ -1,3 +0,0 @@
{
"mode": "0755"
}

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: {{ Base64Encode (Certificate "kubelet").AsString }}
client-key-data: {{ Base64Encode (PrivateKey "kubelet").AsString }}
clusters:
- name: local
cluster:
certificate-authority-data: {{ Base64Encode CACertificate.AsString }}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context

View File

@ -1,3 +0,0 @@
{
"mode": "0400"
}

View File

@ -1,12 +0,0 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
[Service]
EnvironmentFile=/etc/sysconfig/kubelet
ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
Restart=always
RestartSec=2s
StartLimitInterval=0
KillMode=process

View File

@ -1,3 +0,0 @@
{
"running": false
}

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
"k8s.io/kops/upup/pkg/fi/nodeup/local"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/upup/pkg/fi/nodeup/tags"
"k8s.io/kops/upup/pkg/fi/utils"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/kubernetes/pkg/util/sets"
@ -181,27 +182,35 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
osTags := distribution.BuildTags()
tags := sets.NewString()
tags.Insert(osTags...)
tags.Insert(c.config.Tags...)
nodeTags := sets.NewString()
nodeTags.Insert(osTags...)
nodeTags.Insert(c.config.Tags...)
glog.Infof("Config tags: %v", c.config.Tags)
glog.Infof("OS tags: %v", osTags)
modelContext := &model.NodeupModelContext{
Cluster: c.cluster,
Distribution: distribution,
Architecture: model.ArchitectureAmd64,
}
loader := NewLoader(c.config, c.cluster, assets, tags)
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
tf, err := newTemplateFunctions(c.config, c.cluster, c.instanceGroup, tags)
tf, err := newTemplateFunctions(c.config, c.cluster, c.instanceGroup, nodeTags)
if err != nil {
return fmt.Errorf("error initializing: %v", err)
}
modelContext := &model.NodeupModelContext{
Cluster: c.cluster,
Distribution: distribution,
Architecture: model.ArchitectureAmd64,
InstanceGroup: c.instanceGroup,
IsMaster: nodeTags.Has(TagMaster),
UsesCNI: nodeTags.Has(tags.TagCNI),
Assets: assets,
KeyStore: tf.keyStore,
SecretStore: tf.secretStore,
}
loader := NewLoader(c.config, c.cluster, assets, nodeTags)
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
tf.populate(loader.TemplateFunctions)
taskMap, err := loader.Build(c.ModelDir)
@ -232,13 +241,13 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
case "direct":
target = &local.LocalTarget{
CacheDir: c.CacheDir,
Tags: tags,
Tags: nodeTags,
}
case "dryrun":
target = fi.NewDryRunTarget(out)
case "cloudinit":
checkExisting = false
target = cloudinit.NewCloudInitTarget(out, tags)
target = cloudinit.NewCloudInitTarget(out, nodeTags)
default:
return fmt.Errorf("unsupported target type %q", c.Target)
}

View File

@ -21,6 +21,8 @@ const (
TagOSFamilyDebian = "_debian_family"
TagSystemd = "_systemd"
TagCNI = "_networking_cni"
)
type HasTags interface {

View File

@ -50,9 +50,6 @@ type templateFunctions struct {
secretStore fi.SecretStore
tags sets.String
// kubeletConfig is the kubelet config for the current node
kubeletConfig *api.KubeletConfigSpec
}
// newTemplateFunctions is the constructor for templateFunctions
@ -88,29 +85,6 @@ func newTemplateFunctions(nodeupConfig *NodeUpConfig, cluster *api.Cluster, inst
return nil, fmt.Errorf("KeyStore not set")
}
{
instanceGroup := t.instanceGroup
if instanceGroup == nil {
// Old clusters might not have exported instance groups
// in that case we build a synthetic instance group with the information that BuildKubeletConfigSpec needs
// TODO: Remove this once we have a stable release
glog.Warningf("Building a synthetic instance group")
instanceGroup = &api.InstanceGroup{}
instanceGroup.ObjectMeta.Name = "synthetic"
if t.IsMaster() {
instanceGroup.Spec.Role = api.InstanceGroupRoleMaster
} else {
instanceGroup.Spec.Role = api.InstanceGroupRoleNode
}
t.instanceGroup = instanceGroup
}
kubeletConfigSpec, err := api.BuildKubeletConfigSpec(cluster, instanceGroup)
if err != nil {
return nil, fmt.Errorf("error building kubelet config: %v", err)
}
t.kubeletConfig = kubeletConfigSpec
}
return t, nil
}
@ -145,9 +119,6 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
return t.cluster.Spec.KubeControllerManager
}
dest["KubeProxy"] = t.KubeProxyConfig
dest["KubeletConfig"] = func() *api.KubeletConfigSpec {
return t.kubeletConfig
}
dest["ClusterName"] = func() string {
return t.cluster.ObjectMeta.Name