mirror of https://github.com/kubernetes/kops.git
Merge pull request #561 from justinsb/more_protokube
Protokube: master tainting and channel creation
This commit is contained in:
commit
e0fd286e69
43
Makefile
43
Makefile
|
@ -2,7 +2,7 @@ all: kops
|
|||
|
||||
.PHONY: channels
|
||||
|
||||
DOCKER_REGISTRY?=gcr.io/must-override/
|
||||
DOCKER_REGISTRY?=gcr.io/must-override
|
||||
S3_BUCKET?=s3://must-override/
|
||||
GCS_LOCATION?=gs://must-override
|
||||
GCS_URL=$(GCS_LOCATION:gs://%=https://storage.googleapis.com/%)
|
||||
|
@ -59,21 +59,6 @@ codegen: gobindata
|
|||
test:
|
||||
go test k8s.io/kops/upup/pkg/... -args -v=1 -logtostderr
|
||||
|
||||
godeps:
|
||||
# I think strip-vendor is the workaround for 25572
|
||||
glide install --strip-vendor --strip-vcs
|
||||
|
||||
gofmt:
|
||||
gofmt -w -s cmd/
|
||||
gofmt -w -s channels/
|
||||
gofmt -w -s util/
|
||||
gofmt -w -s cmd/
|
||||
gofmt -w -s upup/pkg/
|
||||
gofmt -w -s protokube/cmd
|
||||
gofmt -w -s protokube/pkg
|
||||
gofmt -w -s dns-controller/cmd
|
||||
gofmt -w -s dns-controller/pkg
|
||||
|
||||
crossbuild:
|
||||
mkdir -p .build/dist/
|
||||
GOOS=darwin GOARCH=amd64 go build -a ${EXTRA_BUILDFLAGS} -o .build/dist/darwin/amd64/kops -ldflags "${EXTRA_LDFLAGS} -X main.BuildVersion=${VERSION}" k8s.io/kops/cmd/kops/...
|
||||
|
@ -174,18 +159,36 @@ dns-controller-image: dns-controller-build-in-docker
|
|||
dns-controller-push: dns-controller-image
|
||||
docker push ${DOCKER_REGISTRY}/dns-controller:${TAG}
|
||||
|
||||
|
||||
# --------------------------------------------------
|
||||
# development targets
|
||||
|
||||
copydeps:
|
||||
rsync -avz _vendor/ vendor/ --delete --exclude vendor/ --exclude .git
|
||||
|
||||
gofmt:
|
||||
gofmt -w -s cmd/
|
||||
gofmt -w -s channels/
|
||||
gofmt -w -s util/
|
||||
gofmt -w -s cmd/
|
||||
gofmt -w -s upup/pkg/
|
||||
gofmt -w -s protokube/cmd
|
||||
gofmt -w -s protokube/pkg
|
||||
gofmt -w -s dns-controller/cmd
|
||||
gofmt -w -s dns-controller/pkg
|
||||
|
||||
|
||||
# --------------------------------------------------
|
||||
# Continuous integration targets
|
||||
|
||||
ci: kops nodeup-gocode test
|
||||
echo "Done"
|
||||
|
||||
|
||||
|
||||
# --------------------------------------------------
|
||||
# channel tool
|
||||
|
||||
channels:
|
||||
go install ${EXTRA_BUILDFLAGS} -ldflags "-X main.BuildVersion=${VERSION} ${EXTRA_LDFLAGS}" k8s.io/kops/channels/cmd/...
|
||||
channels: channels-gocode
|
||||
|
||||
channels-gocode:
|
||||
go install ${EXTRA_BUILDFLAGS} -ldflags "-X main.BuildVersion=${VERSION} ${EXTRA_LDFLAGS}" k8s.io/kops/channels/cmd/channels
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -ex
|
||||
|
||||
mkdir -p /go
|
||||
export GOPATH=/go
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -ex
|
||||
|
||||
mkdir -p /go
|
||||
export GOPATH=/go
|
||||
|
@ -13,3 +13,14 @@ make protokube-gocode
|
|||
|
||||
mkdir -p /src/.build/artifacts/
|
||||
cp /go/bin/protokube /src/.build/artifacts/
|
||||
|
||||
# Applying channels calls out to the channels tool
|
||||
make channels-gocode
|
||||
cp /go/bin/channels /src/.build/artifacts/
|
||||
|
||||
# channels uses protokube
|
||||
cd /src/.build/artifacts/
|
||||
curl -O https://storage.googleapis.com/kubernetes-release/release/v1.3.7/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
|
||||
|
||||
|
|
|
@ -4,9 +4,13 @@ FROM debian:jessie
|
|||
# e2fsprogs: Needed to mount / format ext4 filesytems
|
||||
RUN apt-get update && apt-get install --yes ca-certificates e2fsprogs
|
||||
|
||||
COPY /.build/artifacts/kubectl /usr/bin/kubectl
|
||||
|
||||
COPY protokube/model/ /model/
|
||||
COPY protokube/templates/ /templates/
|
||||
|
||||
COPY /.build/artifacts/protokube /usr/bin/protokube
|
||||
COPY /.build/artifacts/channels /usr/bin/channels
|
||||
|
||||
CMD /usr/bin/protokube
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ func main() {
|
|||
clusterID := ""
|
||||
flag.StringVar(&clusterID, "cluster-id", clusterID, "Cluster ID")
|
||||
|
||||
flagChannels := ""
|
||||
flag.StringVar(&flagChannels, "channels", flagChannels, "channels to install")
|
||||
|
||||
flag.Set("logtostderr", "true")
|
||||
flag.Parse()
|
||||
|
||||
|
@ -85,6 +88,11 @@ func main() {
|
|||
|
||||
modelDir := "model/etcd"
|
||||
|
||||
var channels []string
|
||||
if flagChannels != "" {
|
||||
channels = strings.Split(flagChannels, ",")
|
||||
}
|
||||
|
||||
k := &protokube.KubeBoot{
|
||||
Master: master,
|
||||
InternalDNSSuffix: dnsInternalSuffix,
|
||||
|
@ -94,6 +102,10 @@ func main() {
|
|||
|
||||
ModelDir: modelDir,
|
||||
DNS: dns,
|
||||
|
||||
Channels: channels,
|
||||
|
||||
Kubernetes: protokube.NewKubernetesContext(),
|
||||
}
|
||||
k.Init(volumes)
|
||||
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
package protokube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/kops/channels/pkg/channels"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func execChannels(args ...string) (string, error) {
|
||||
kubectlPath := "channels" // Assume in PATH
|
||||
cmd := exec.Command(kubectlPath, args...)
|
||||
env := os.Environ()
|
||||
cmd.Env = env
|
||||
|
||||
human := strings.Join(cmd.Args, " ")
|
||||
glog.V(2).Infof("Running command: %s", human)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("error running %s:", human)
|
||||
glog.Info(string(output))
|
||||
return string(output), fmt.Errorf("error running channels: %v", err)
|
||||
}
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func ApplyChannel(channel string) error {
|
||||
// We don't embed the channels code because we expect this will eventually be part of kubectl
|
||||
glog.Infof("checking channel: %q", channel)
|
||||
|
||||
// We copy the channel to a temp file because it is likely e.g. an s3 URL, which kubectl can't read
|
||||
|
||||
location, err := url.Parse(channel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing channel location: %v", err)
|
||||
}
|
||||
data, err := vfs.Context.ReadFile(channel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading channel: %v", err)
|
||||
}
|
||||
|
||||
addons, err := channels.ParseAddons(location, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing adddons: %v", err)
|
||||
}
|
||||
all, err := addons.All()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error processing adddons: %v", err)
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "channel")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temp dir: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
glog.Warningf("error deleting temp dir: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
localChannelFile := path.Join(tmpDir, "channel.yaml")
|
||||
if err := ioutil.WriteFile(localChannelFile, data, 0600); err != nil {
|
||||
return fmt.Errorf("error writing temp file: %v", err)
|
||||
}
|
||||
|
||||
for _, addon := range all {
|
||||
if addon.Spec.Manifest == nil {
|
||||
continue
|
||||
}
|
||||
manifest := *addon.Spec.Manifest
|
||||
manifestURL, err := url.Parse(manifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing manifest location: %v", manifest)
|
||||
}
|
||||
if manifestURL.IsAbs() {
|
||||
// Hopefully http or https!
|
||||
continue
|
||||
}
|
||||
|
||||
dest := path.Join(tmpDir, manifest)
|
||||
src := location.ResolveReference(manifestURL)
|
||||
|
||||
b, err := vfs.Context.ReadFile(src.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading source manifest %q: %v", src, err)
|
||||
}
|
||||
|
||||
parent := path.Dir(dest)
|
||||
if err := os.MkdirAll(parent, 0700); err != nil {
|
||||
return fmt.Errorf("error creating directories %q: %v", parent, err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(dest, b, 0600); err != nil {
|
||||
return fmt.Errorf("error copying channel to temp file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
out, err := execChannels("apply", "channel", localChannelFile, "--v=4", "--yes")
|
||||
glog.V(4).Infof("apply channel output was: %v", out)
|
||||
return err
|
||||
}
|
|
@ -19,6 +19,10 @@ type KubeBoot struct {
|
|||
DNS DNSProvider
|
||||
|
||||
ModelDir string
|
||||
|
||||
Channels []string
|
||||
|
||||
Kubernetes *KubernetesContext
|
||||
}
|
||||
|
||||
func (k *KubeBoot) Init(volumesProvider Volumes) {
|
||||
|
@ -87,5 +91,19 @@ func (k *KubeBoot) syncOnce() error {
|
|||
// TODO: Should we set up symlinks here?
|
||||
}
|
||||
|
||||
if k.Master {
|
||||
err := ApplyMasterTaints(k.Kubernetes)
|
||||
if err != nil {
|
||||
glog.Warningf("error updating master taints: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, channel := range k.Channels {
|
||||
err := ApplyChannel(channel)
|
||||
if err != nil {
|
||||
glog.Warningf("error applying channel %q: %v", channel, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package protokube
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type KubernetesContext struct {
|
||||
mutex sync.Mutex
|
||||
client *release_1_3.Clientset
|
||||
}
|
||||
|
||||
func NewKubernetesContext() *KubernetesContext {
|
||||
return &KubernetesContext{}
|
||||
}
|
||||
|
||||
func (c *KubernetesContext) KubernetesClient() (*release_1_3.Clientset, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if c.client == nil {
|
||||
config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
clientcmd.NewDefaultClientConfigLoadingRules(),
|
||||
&clientcmd.ConfigOverrides{})
|
||||
clientConfig, err := config.ClientConfig()
|
||||
if err != nil {
|
||||
if clientcmd.IsEmptyConfig(err) {
|
||||
glog.V(2).Infof("No client config found; will use default config")
|
||||
clientConfig, err = clientcmd.DefaultClientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot build default kube config settings: %v", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("cannot load kubecfg settings: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
k8sClient, err := release_1_3.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot build kube client: %v", err)
|
||||
}
|
||||
c.client = k8sClient
|
||||
}
|
||||
return c.client, nil
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
package protokube
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
type nodePatch struct {
|
||||
Metadata *nodePatchMetadata `json:"metadata,omitempty"`
|
||||
Spec *nodePatchSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type nodePatchMetadata struct {
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
type nodePatchSpec struct {
|
||||
Unschedulable *bool `json:"unschedulable,omitempty"`
|
||||
}
|
||||
|
||||
// ApplyMasterTaints finds masters that have not yet been tainted, and applies the master taint
|
||||
// Once the kubelet support --taints (like --labels) this can probably go away entirely.
|
||||
// It also sets the unschedulable flag to false, so pods (with a toleration) can target the node
|
||||
func ApplyMasterTaints(kubeContext *KubernetesContext) error {
|
||||
client, err := kubeContext.KubernetesClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := api.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{"kubernetes.io/role": "master"}),
|
||||
}
|
||||
glog.V(2).Infof("Querying k8s for nodes with selector %q", options.LabelSelector)
|
||||
nodes, err := client.Core().Nodes().List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error querying nodes: %v", err)
|
||||
}
|
||||
|
||||
taint := []api.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}
|
||||
taintJSON, err := json.Marshal(taint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing taint: %v", err)
|
||||
}
|
||||
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
|
||||
nodeTaintJSON := node.Annotations[api.TaintsAnnotationKey]
|
||||
if nodeTaintJSON != "" {
|
||||
if nodeTaintJSON != string(taintJSON) {
|
||||
glog.Infof("Node %q had unexpected taint: %v", node.Name, nodeTaintJSON)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
nodePatchMetadata := &nodePatchMetadata{
|
||||
Annotations: map[string]string{api.TaintsAnnotationKey: string(taintJSON)},
|
||||
}
|
||||
unschedulable := false
|
||||
nodePatchSpec := &nodePatchSpec{
|
||||
Unschedulable: &unschedulable,
|
||||
}
|
||||
nodePatch := &nodePatch{
|
||||
Metadata: nodePatchMetadata,
|
||||
Spec: nodePatchSpec,
|
||||
}
|
||||
nodePatchJson, err := json.Marshal(nodePatch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building node patch: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("sending patch for node %q: %q", node.Name, string(nodePatchJson))
|
||||
|
||||
_, err = client.Nodes().Patch(node.Name, api.StrategicMergePatchType, nodePatchJson)
|
||||
if err != nil {
|
||||
// TODO: Should we keep going?
|
||||
return fmt.Errorf("error applying patch to node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,5 +1 @@
|
|||
{{ if HasTag "_kubernetes_master" }}
|
||||
DAEMON_ARGS="--dns-zone-name={{ .DNSZone }} --master=true --containerized --v=8"
|
||||
{{ else }}
|
||||
DAEMON_ARGS="--dns-zone-name={{ .DNSZone }} --master=false --containerized --v=8"
|
||||
{{ end }}
|
||||
DAEMON_ARGS="{{ BuildFlags ProtokubeFlags }}"
|
||||
|
|
|
@ -6,7 +6,7 @@ After=docker.service
|
|||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/protokube
|
||||
ExecStartPre=/usr/bin/docker pull {{ ProtokubeImage }}
|
||||
ExecStart=/usr/bin/docker run -v /:/rootfs/ --privileged {{ ProtokubeImage }} /usr/bin/protokube "$DAEMON_ARGS"
|
||||
ExecStart=/usr/bin/docker run -v /:/rootfs/ --net=host --privileged {{ ProtokubeImage }} /usr/bin/protokube "$DAEMON_ARGS"
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
|
|
|
@ -30,7 +30,10 @@ type NodeUpConfig struct {
|
|||
ClusterName string `json:",omitempty"`
|
||||
|
||||
// ProtokubeImage is the docker image to load for protokube (bootstrapping)
|
||||
ProtokubeImage *Image `json:"protokubeImage"`
|
||||
ProtokubeImage *Image `json:"protokubeImage,omitempty"`
|
||||
|
||||
// Channels is a list of channels that we should apply
|
||||
Channels []string `json:"channels,omitempty"`
|
||||
}
|
||||
|
||||
// Image is a docker image we should pre-load
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
package nodeup
|
||||
|
||||
type ProtokubeFlags struct {
|
||||
DNSZoneName *string `json:"dnsZoneName,omitempty" flag:"dns-zone-name"`
|
||||
Master *bool `json:"master,omitempty" flag:"master"`
|
||||
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
|
||||
LogLevel *int `json:"logLevel,omitempty" flag:"v"`
|
||||
|
||||
Channels []string `json:"channels,omitempty" flag:"channels"`
|
||||
}
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
const TagMaster = "_kubernetes_master"
|
||||
|
||||
const DefaultProtokubeImage = "kope/protokube:1.3"
|
||||
const DefaultProtokubeImage = "kope/protokube:1.4"
|
||||
|
||||
// templateFunctions is a simple helper-class for the functions accessible to templates
|
||||
type templateFunctions struct {
|
||||
|
@ -131,6 +131,8 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
|
|||
}
|
||||
|
||||
dest["ProtokubeImage"] = t.ProtokubeImage
|
||||
|
||||
dest["ProtokubeFlags"] = t.ProtokubeFlags
|
||||
}
|
||||
|
||||
// IsMaster returns true if we are tagged as a master
|
||||
|
@ -221,6 +223,26 @@ func (t *templateFunctions) ProtokubeImage() string {
|
|||
return image
|
||||
}
|
||||
|
||||
// ProtokubeFlags returns the flags object for protokube
|
||||
func (t *templateFunctions) ProtokubeFlags() *ProtokubeFlags {
|
||||
f := &ProtokubeFlags{}
|
||||
|
||||
master := t.IsMaster()
|
||||
|
||||
f.Master = fi.Bool(master)
|
||||
if master {
|
||||
f.Channels = t.nodeupConfig.Channels
|
||||
}
|
||||
|
||||
f.LogLevel = fi.Int(8)
|
||||
f.Containerized = fi.Bool(true)
|
||||
if t.cluster.Spec.DNSZone != "" {
|
||||
f.DNSZoneName = fi.String(t.cluster.Spec.DNSZone)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// KubeProxyConfig builds the KubeProxyConfig configuration object
|
||||
func (t *templateFunctions) KubeProxyConfig() *api.KubeProxyConfig {
|
||||
config := &api.KubeProxyConfig{}
|
||||
|
|
Loading…
Reference in New Issue