mirror of https://github.com/kubernetes/kops.git
Merge pull request #2104 from justinsb/container_optimized_os
Initial Container-Optimized OS support
This commit is contained in:
commit
bdf0d04b0a
3
Makefile
3
Makefile
|
|
@ -183,7 +183,8 @@ push-aws-dry: push
|
|||
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8
|
||||
|
||||
push-gce-run: push
|
||||
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=metadata://gce/config --v=8
|
||||
ssh ${TARGET} sudo cp /tmp/nodeup /home/kubernetes/bin/nodeup
|
||||
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /home/kubernetes/bin/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8
|
||||
|
||||
# -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password
|
||||
push-aws-run: push
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ spec:
|
|||
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.5.0"
|
||||
- providerID: gce
|
||||
name: "cos-cloud/cos-stable-56-9000-84-2"
|
||||
cluster:
|
||||
kubernetesVersion: v1.5.4
|
||||
networking:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ k8s.io/kops/pkg/apis/kops/util
|
|||
k8s.io/kops/pkg/apis/kops/v1alpha1
|
||||
k8s.io/kops/pkg/apis/kops/v1alpha2
|
||||
k8s.io/kops/pkg/apis/kops/validation
|
||||
k8s.io/kops/pkg/apis/nodeup
|
||||
k8s.io/kops/pkg/client/simple
|
||||
k8s.io/kops/pkg/client/simple/vfsclientset
|
||||
k8s.io/kops/pkg/diff
|
||||
|
|
|
|||
|
|
@ -24,11 +24,12 @@ import (
|
|||
type Distribution string
|
||||
|
||||
var (
|
||||
DistributionJessie Distribution = "jessie"
|
||||
DistributionXenial Distribution = "xenial"
|
||||
DistributionRhel7 Distribution = "rhel7"
|
||||
DistributionCentos7 Distribution = "centos7"
|
||||
DistributionCoreOS Distribution = "coreos"
|
||||
DistributionJessie Distribution = "jessie"
|
||||
DistributionXenial Distribution = "xenial"
|
||||
DistributionRhel7 Distribution = "rhel7"
|
||||
DistributionCentos7 Distribution = "centos7"
|
||||
DistributionCoreOS Distribution = "coreos"
|
||||
DistributionContainerOS Distribution = "containeros"
|
||||
)
|
||||
|
||||
func (d Distribution) BuildTags() []string {
|
||||
|
|
@ -45,6 +46,8 @@ func (d Distribution) BuildTags() []string {
|
|||
t = []string{"_rhel7"}
|
||||
case DistributionCoreOS:
|
||||
t = []string{"_coreos"}
|
||||
case DistributionContainerOS:
|
||||
t = []string{"_containeros"}
|
||||
default:
|
||||
glog.Fatalf("unknown distribution: %s", d)
|
||||
return nil
|
||||
|
|
@ -67,7 +70,7 @@ func (d Distribution) IsDebianFamily() bool {
|
|||
switch d {
|
||||
case DistributionJessie, DistributionXenial:
|
||||
return true
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCoreOS:
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCoreOS, DistributionContainerOS:
|
||||
return false
|
||||
default:
|
||||
glog.Fatalf("unknown distribution: %s", d)
|
||||
|
|
@ -79,7 +82,7 @@ func (d Distribution) IsRHELFamily() bool {
|
|||
switch d {
|
||||
case DistributionCentos7, DistributionRhel7:
|
||||
return true
|
||||
case DistributionJessie, DistributionXenial, DistributionCoreOS:
|
||||
case DistributionJessie, DistributionXenial, DistributionCoreOS, DistributionContainerOS:
|
||||
return false
|
||||
default:
|
||||
glog.Fatalf("unknown distribution: %s", d)
|
||||
|
|
@ -95,6 +98,8 @@ func (d Distribution) IsSystemd() bool {
|
|||
return true
|
||||
case DistributionCoreOS:
|
||||
return true
|
||||
case DistributionContainerOS:
|
||||
return true
|
||||
default:
|
||||
glog.Fatalf("unknown distribution: %s", d)
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -87,5 +87,21 @@ func FindDistribution(rootfs string) (Distribution, error) {
|
|||
glog.Warningf("error reading /usr/lib/os-release: %v", err)
|
||||
}
|
||||
|
||||
// ContainerOS uses /etc/os-release
|
||||
{
|
||||
osRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/os-release"))
|
||||
if err == nil {
|
||||
for _, line := range strings.Split(string(osRelease), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "ID=cos" {
|
||||
return DistributionContainerOS, nil
|
||||
}
|
||||
}
|
||||
glog.Warningf("unhandled /etc/os-release info %q", string(osRelease))
|
||||
} else if !os.IsNotExist(err) {
|
||||
glog.Warningf("error reading /etc/os-release: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cannot identify distro")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,10 +19,13 @@ package model
|
|||
import (
|
||||
"k8s.io/kops/nodeup/pkg/distros"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
)
|
||||
|
||||
type NodeupModelContext struct {
|
||||
NodeupConfig *nodeup.NodeUpConfig
|
||||
|
||||
Cluster *kops.Cluster
|
||||
InstanceGroup *kops.InstanceGroup
|
||||
Architecture Architecture
|
||||
|
|
@ -46,9 +49,39 @@ func (c *NodeupModelContext) SSLHostPaths() []string {
|
|||
|
||||
paths = append(paths, "/usr/share/ca-certificates")
|
||||
|
||||
case distros.DistributionContainerOS:
|
||||
paths = append(paths, "/usr/share/ca-certificates")
|
||||
|
||||
default:
|
||||
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
|
||||
}
|
||||
|
||||
return paths
|
||||
}
|
||||
|
||||
func (c *NodeupModelContext) PathSrvKubernetes() string {
|
||||
switch c.Distribution {
|
||||
case distros.DistributionContainerOS:
|
||||
return "/etc/srv/kubernetes"
|
||||
default:
|
||||
return "/srv/kubernetes"
|
||||
}
|
||||
}
|
||||
|
||||
func (c *NodeupModelContext) PathSrvSshproxy() string {
|
||||
switch c.Distribution {
|
||||
case distros.DistributionContainerOS:
|
||||
return "/etc/srv/sshproxy"
|
||||
default:
|
||||
return "/srv/sshproxy"
|
||||
}
|
||||
}
|
||||
|
||||
func (c *NodeupModelContext) NetworkPluginDir() string {
|
||||
switch c.Distribution {
|
||||
case distros.DistributionContainerOS:
|
||||
return "/home/kubernetes/bin/"
|
||||
default:
|
||||
return "/opt/cni/bin/"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"k8s.io/kops/nodeup/pkg/distros"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
)
|
||||
|
||||
// DirectoryBuilder creates required directories
|
||||
type DirectoryBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &DirectoryBuilder{}
|
||||
|
||||
func (b *DirectoryBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
dir := "/home/kubernetes/bin"
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: dir,
|
||||
Type: nodetasks.FileType_Directory,
|
||||
Mode: s("0755"),
|
||||
|
||||
OnChangeExecute: [][]string{
|
||||
{"/bin/mount", "--bind", "/home/kubernetes/bin", "/home/kubernetes/bin"},
|
||||
{"/bin/mount", "-o", "remount,exec", "/home/kubernetes/bin"},
|
||||
},
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -227,9 +227,14 @@ func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro
|
|||
}
|
||||
|
||||
func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
if b.Distribution == distros.DistributionCoreOS {
|
||||
switch b.Distribution {
|
||||
case distros.DistributionCoreOS:
|
||||
glog.Infof("Detected CoreOS; won't install Docker")
|
||||
return nil
|
||||
|
||||
case distros.DistributionContainerOS:
|
||||
glog.Infof("Detected ContainerOS; won't install Docker")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add Apache2 license
|
||||
|
|
|
|||
|
|
@ -35,9 +35,14 @@ func (b *EtcdBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if b.Distribution == distros.DistributionCoreOS {
|
||||
switch b.Distribution {
|
||||
case distros.DistributionCoreOS:
|
||||
glog.Infof("Detected CoreOS; skipping etcd user installation")
|
||||
return nil
|
||||
|
||||
case distros.DistributionContainerOS:
|
||||
glog.Infof("Detected ContainerOS; skipping etcd user installation")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Do we actually use the user anywhere?
|
||||
|
|
|
|||
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/nodeup/pkg/distros"
|
||||
"k8s.io/kops/pkg/systemd"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
)
|
||||
|
||||
// FirewallBuilder configures the firewall (iptables)
|
||||
type FirewallBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &FirewallBuilder{}
|
||||
|
||||
func (b *FirewallBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
c.AddTask(b.buildFirewallScript())
|
||||
|
||||
c.AddTask(b.buildSystemdService())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FirewallBuilder) buildSystemdService() *nodetasks.Service {
|
||||
manifest := &systemd.Manifest{}
|
||||
manifest.Set("Unit", "Description", "Configure iptables for kubernetes")
|
||||
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
|
||||
manifest.Set("Unit", "Before", "network.target")
|
||||
|
||||
manifest.Set("Service", "Type", "oneshot")
|
||||
manifest.Set("Service", "RemainAfterExit", "yes")
|
||||
manifest.Set("Service", "ExecStart", "/home/kubernetes/bin/iptables-setup")
|
||||
|
||||
manifest.Set("Install", "WantedBy", "basic.target")
|
||||
|
||||
manifestString := manifest.Render()
|
||||
glog.V(8).Infof("Built service manifest %q\n%s", "kubernetes-iptables-setup", manifestString)
|
||||
|
||||
service := &nodetasks.Service{
|
||||
Name: "kubernetes-iptables-setup.service",
|
||||
Definition: s(manifestString),
|
||||
}
|
||||
|
||||
service.InitDefaults()
|
||||
|
||||
return service
|
||||
}
|
||||
|
||||
func (b *FirewallBuilder) buildFirewallScript() *nodetasks.File {
|
||||
// TODO: Do we want to rely on running nodeup on every boot, or do we want to install systemd units?
|
||||
|
||||
// TODO: The if statement in the script doesn't make it idempotent
|
||||
|
||||
// This is borrowed from gce/gci/configure-helper.sh
|
||||
script := `#!/bin/bash
|
||||
# Built by kops - do not edit
|
||||
|
||||
# The GCI image has host firewall which drop most inbound/forwarded packets.
|
||||
# We need to add rules to accept all TCP/UDP/ICMP packets.
|
||||
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
|
||||
iptables -A INPUT -w -p TCP -j ACCEPT
|
||||
iptables -A INPUT -w -p UDP -j ACCEPT
|
||||
iptables -A INPUT -w -p ICMP -j ACCEPT
|
||||
fi
|
||||
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
|
||||
iptables -A FORWARD -w -p TCP -j ACCEPT
|
||||
iptables -A FORWARD -w -p UDP -j ACCEPT
|
||||
iptables -A FORWARD -w -p ICMP -j ACCEPT
|
||||
fi
|
||||
`
|
||||
t := &nodetasks.File{
|
||||
Path: "/home/kubernetes/bin/iptables-setup",
|
||||
Contents: fi.NewStringResource(script),
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: s("0755"),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
|
@ -64,6 +65,15 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
|
||||
kubeAPIServer := b.Cluster.Spec.KubeAPIServer
|
||||
|
||||
kubeAPIServer.ClientCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
|
||||
kubeAPIServer.TLSCertFile = filepath.Join(b.PathSrvKubernetes(), "server.cert")
|
||||
kubeAPIServer.TLSPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
|
||||
|
||||
kubeAPIServer.BasicAuthFile = filepath.Join(b.PathSrvKubernetes(), "basic_auth.csv")
|
||||
kubeAPIServer.TokenAuthFile = filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv")
|
||||
|
||||
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
|
||||
|
|
@ -141,12 +151,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
|
|||
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes != "" {
|
||||
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes, true)
|
||||
pathSrvKubernetes := b.PathSrvKubernetes()
|
||||
if pathSrvKubernetes != "" {
|
||||
addHostPathMapping(pod, container, "srvkube", pathSrvKubernetes, true)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy != "" {
|
||||
addHostPathMapping(pod, container, "srvsshproxy", b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy, false)
|
||||
pathSrvSshproxy := b.PathSrvSshproxy()
|
||||
if pathSrvSshproxy != "" {
|
||||
addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy, false)
|
||||
}
|
||||
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log", false)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
|
@ -25,7 +28,6 @@ import (
|
|||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment)
|
||||
|
|
@ -63,7 +65,12 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
|
||||
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeControllerManager)
|
||||
kcm := b.Cluster.Spec.KubeControllerManager
|
||||
|
||||
kcm.RootCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
|
||||
kcm.ServiceAccountPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
|
||||
|
||||
flags, err := flagbuilder.BuildFlags(kcm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err)
|
||||
}
|
||||
|
|
@ -127,8 +134,9 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
|
|||
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes != "" {
|
||||
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes, true)
|
||||
pathSrvKubernetes := b.PathSrvKubernetes()
|
||||
if pathSrvKubernetes != "" {
|
||||
addHostPathMapping(pod, container, "srvkube", pathSrvKubernetes, true)
|
||||
}
|
||||
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-controller-manager.log", false)
|
||||
|
|
|
|||
|
|
@ -66,5 +66,8 @@ func (b *KubectlBuilder) kubectlPath() string {
|
|||
if b.Distribution == distros.DistributionCoreOS {
|
||||
kubeletCommand = "/opt/bin/kubectl"
|
||||
}
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
kubeletCommand = "/home/kubernetes/bin/kubectl"
|
||||
}
|
||||
return kubeletCommand
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,6 +57,8 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
flags += " --cloud-config=" + CloudConfigFilePath
|
||||
}
|
||||
|
||||
flags += " --network-plugin-dir=" + b.NetworkPluginDir()
|
||||
|
||||
sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n"
|
||||
|
||||
t := &nodetasks.File{
|
||||
|
|
@ -126,6 +128,9 @@ func (b *KubeletBuilder) kubeletPath() string {
|
|||
if b.Distribution == distros.DistributionCoreOS {
|
||||
kubeletCommand = "/opt/kubernetes/bin/kubelet"
|
||||
}
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
kubeletCommand = "/home/kubernetes/bin/kubelet"
|
||||
}
|
||||
return kubeletCommand
|
||||
}
|
||||
|
||||
|
|
@ -138,7 +143,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
|||
manifest.Set("Unit", "After", "docker.service")
|
||||
|
||||
if b.Distribution == distros.DistributionCoreOS {
|
||||
// We add /opt/kubernetes/bin for our utilities
|
||||
// We add /opt/kubernetes/bin for our utilities (socat)
|
||||
manifest.Set("Service", "Environment", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kubernetes/bin")
|
||||
}
|
||||
|
||||
|
|
@ -150,7 +155,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
|||
manifest.Set("Service", "KillMode", "process")
|
||||
|
||||
manifestString := manifest.Render()
|
||||
glog.V(8).Infof("Built service manifest %q\n%s", "docker", manifestString)
|
||||
glog.V(8).Infof("Built service manifest %q\n%s", "kubelet", manifestString)
|
||||
|
||||
service := &nodetasks.Service{
|
||||
Name: "kubelet.service",
|
||||
|
|
|
|||
|
|
@ -36,6 +36,11 @@ func (b *LogrotateBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
glog.Infof("Detected ContainerOS; won't install logrotate")
|
||||
return nil
|
||||
}
|
||||
|
||||
c.AddTask(&nodetasks.Package{Name: "logrotate"})
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// NetworkBuilder writes CNI assets
|
||||
type NetworkBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &NetworkBuilder{}
|
||||
|
||||
func (b *NetworkBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
var assetNames []string
|
||||
|
||||
networking := b.Cluster.Spec.Networking
|
||||
if networking == nil || networking.Classic != nil {
|
||||
} else if networking.Kubenet != nil {
|
||||
assetNames = append(assetNames, "bridge", "host-local", "loopback")
|
||||
} else if networking.External != nil {
|
||||
// external is based on kubenet
|
||||
assetNames = append(assetNames, "bridge", "host-local", "loopback")
|
||||
} else if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
|
||||
assetNames = append(assetNames, "bridge", "host-local", "loopback", "ptp")
|
||||
// Do we need tuning?
|
||||
|
||||
// TODO: Only when using flannel ?
|
||||
assetNames = append(assetNames, "flannel")
|
||||
} else if networking.Kopeio != nil {
|
||||
// TODO combine with External
|
||||
// Kopeio is based on kubenet / external
|
||||
assetNames = append(assetNames, "bridge", "host-local", "loopback")
|
||||
} else {
|
||||
return fmt.Errorf("No networking mode set")
|
||||
}
|
||||
|
||||
for _, assetName := range assetNames {
|
||||
if err := b.addAsset(c, assetName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *NetworkBuilder) addAsset(c *fi.ModelBuilderContext, assetName string) error {
|
||||
assetPath := ""
|
||||
asset, err := b.Assets.Find(assetName, assetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
|
||||
}
|
||||
if asset == nil {
|
||||
return fmt.Errorf("unable to locate asset %q", assetName)
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.NetworkPluginDir(), assetName),
|
||||
Contents: asset,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: s("0755"),
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/pkg/systemd"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProtokubeBuilder configures protokube
|
||||
type ProtokubeBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &ProtokubeBuilder{}
|
||||
|
||||
func (b *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
// TODO: Should we run _protokube on the nodes?
|
||||
service, err := b.buildSystemdService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.AddTask(service)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
|
||||
k8sVersion, err := util.ParseKubernetesVersion(b.Cluster.Spec.KubernetesVersion)
|
||||
if err != nil || k8sVersion == nil {
|
||||
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", b.Cluster.Spec.KubernetesVersion)
|
||||
}
|
||||
|
||||
protokubeFlags := b.ProtokubeFlags(*k8sVersion)
|
||||
protokubeFlagsArgs, err := flagbuilder.BuildFlags(protokubeFlags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protokubeCommand := "/usr/bin/docker run -v /:/rootfs/ -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd --net=host --privileged "
|
||||
protokubeCommand += b.ProtokubeImageName() + " /usr/bin/protokube "
|
||||
protokubeCommand += protokubeFlagsArgs
|
||||
|
||||
manifest := &systemd.Manifest{}
|
||||
manifest.Set("Unit", "Description", "Kubernetes Protokube Service")
|
||||
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
|
||||
|
||||
//manifest.Set("Service", "EnvironmentFile", "/etc/sysconfig/protokube")
|
||||
manifest.Set("Service", "ExecStartPre", b.ProtokubeImagePullCommand())
|
||||
manifest.Set("Service", "ExecStart", protokubeCommand)
|
||||
manifest.Set("Service", "Restart", "always")
|
||||
manifest.Set("Service", "RestartSec", "2s")
|
||||
manifest.Set("Service", "StartLimitInterval", "0")
|
||||
|
||||
manifest.Set("Install", "WantedBy", "multi-user.target")
|
||||
|
||||
manifestString := manifest.Render()
|
||||
glog.V(8).Infof("Built service manifest %q\n%s", "protokube", manifestString)
|
||||
|
||||
service := &nodetasks.Service{
|
||||
Name: "protokube.service",
|
||||
Definition: s(manifestString),
|
||||
}
|
||||
|
||||
service.InitDefaults()
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// ProtokubeImageName returns the docker image for protokube
|
||||
func (t *ProtokubeBuilder) ProtokubeImageName() string {
|
||||
name := ""
|
||||
if t.NodeupConfig.ProtokubeImage != nil && t.NodeupConfig.ProtokubeImage.Name != "" {
|
||||
name = t.NodeupConfig.ProtokubeImage.Name
|
||||
}
|
||||
if name == "" {
|
||||
// use current default corresponding to this version of nodeup
|
||||
name = kops.DefaultProtokubeImageName()
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ProtokubeImagePullCommand returns the command to pull the image
|
||||
func (t *ProtokubeBuilder) ProtokubeImagePullCommand() string {
|
||||
source := ""
|
||||
if t.NodeupConfig.ProtokubeImage != nil {
|
||||
source = t.NodeupConfig.ProtokubeImage.Source
|
||||
}
|
||||
if source == "" {
|
||||
// Nothing to pull; return dummy value
|
||||
return "/bin/true"
|
||||
}
|
||||
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
|
||||
// We preloaded the image; return a dummy value
|
||||
return "/bin/true"
|
||||
}
|
||||
return "/usr/bin/docker pull " + t.NodeupConfig.ProtokubeImage.Source
|
||||
}
|
||||
|
||||
type ProtokubeFlags struct {
|
||||
Master *bool `json:"master,omitempty" flag:"master"`
|
||||
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
|
||||
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
|
||||
|
||||
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
|
||||
|
||||
Zone []string `json:"zone,omitempty" flag:"zone"`
|
||||
|
||||
Channels []string `json:"channels,omitempty" flag:"channels"`
|
||||
|
||||
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
|
||||
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
|
||||
}
|
||||
|
||||
// ProtokubeFlags returns the flags object for protokube
|
||||
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeFlags {
|
||||
f := &ProtokubeFlags{}
|
||||
|
||||
master := t.IsMaster
|
||||
|
||||
f.Master = fi.Bool(master)
|
||||
if master {
|
||||
f.Channels = t.NodeupConfig.Channels
|
||||
}
|
||||
|
||||
f.LogLevel = fi.Int32(4)
|
||||
f.Containerized = fi.Bool(true)
|
||||
|
||||
zone := t.Cluster.Spec.DNSZone
|
||||
if zone != "" {
|
||||
if strings.Contains(zone, ".") {
|
||||
// match by name
|
||||
f.Zone = append(f.Zone, zone)
|
||||
} else {
|
||||
// match by id
|
||||
f.Zone = append(f.Zone, "*/"+zone)
|
||||
}
|
||||
} else {
|
||||
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
|
||||
// TODO: Should we permit wildcard updates if zone is not specified?
|
||||
//argv = append(argv, "--zone=*/*")
|
||||
}
|
||||
|
||||
if t.Cluster.Spec.CloudProvider != "" {
|
||||
f.Cloud = fi.String(t.Cluster.Spec.CloudProvider)
|
||||
|
||||
switch fi.CloudProviderID(t.Cluster.Spec.CloudProvider) {
|
||||
case fi.CloudProviderAWS:
|
||||
f.DNSProvider = fi.String("aws-route53")
|
||||
case fi.CloudProviderGCE:
|
||||
f.DNSProvider = fi.String("google-clouddns")
|
||||
default:
|
||||
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider")
|
||||
}
|
||||
}
|
||||
|
||||
f.DNSInternalSuffix = fi.String(".internal." + t.Cluster.ObjectMeta.Name)
|
||||
|
||||
return f
|
||||
}
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SecretBuilder writes secrets
|
||||
type SecretBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &SecretBuilder{}
|
||||
|
||||
func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
if b.KeyStore == nil {
|
||||
return fmt.Errorf("KeyStore not set")
|
||||
}
|
||||
|
||||
{
|
||||
ca, err := b.KeyStore.CertificatePool(fi.CertificateId_CA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serialized, err := ca.AsString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), "ca.crt"),
|
||||
Contents: fi.NewStringResource(serialized),
|
||||
Type: nodetasks.FileType_File,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
{
|
||||
cert, err := b.KeyStore.Cert("master")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serialized, err := cert.AsString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), "server.cert"),
|
||||
Contents: fi.NewStringResource(serialized),
|
||||
Type: nodetasks.FileType_File,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
{
|
||||
k, err := b.KeyStore.PrivateKey("master")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serialized, err := k.AsString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), "server.key"),
|
||||
Contents: fi.NewStringResource(serialized),
|
||||
Type: nodetasks.FileType_File,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
if b.SecretStore != nil {
|
||||
key := "kube"
|
||||
token, err := b.SecretStore.FindSecret(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == nil {
|
||||
return fmt.Errorf("token not found: %q", key)
|
||||
}
|
||||
csv := string(token.Data) + ",admin,admin"
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), "basic_auth.csv"),
|
||||
Contents: fi.NewStringResource(csv),
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: s("0600"),
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
if b.SecretStore != nil {
|
||||
allTokens, err := b.allTokens()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lines []string
|
||||
for id, token := range allTokens {
|
||||
lines = append(lines, token+","+id+","+id)
|
||||
}
|
||||
csv := strings.Join(lines, "\n")
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv"),
|
||||
Contents: fi.NewStringResource(csv),
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: s("0600"),
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allTokens returns a map of all tokens
|
||||
func (b *SecretBuilder) allTokens() (map[string]string, error) {
|
||||
tokens := make(map[string]string)
|
||||
ids, err := b.SecretStore.ListSecrets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range ids {
|
||||
token, err := b.SecretStore.FindSecret(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens[id] = string(token.Data)
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
||||
|
|
@ -123,7 +123,7 @@ func (b *SysctlBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Path: "/etc/sysctl.d/99-k8s-general.conf",
|
||||
Contents: fi.NewStringResource(strings.Join(sysctls, "\n")),
|
||||
Type: nodetasks.FileType_File,
|
||||
OnChangeExecute: []string{"sysctl", "--system"},
|
||||
OnChangeExecute: [][]string{{"sysctl", "--system"}},
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
|
|
|
|||
|
|
@ -377,9 +377,12 @@ type KubeProxyConfig struct {
|
|||
}
|
||||
|
||||
type KubeAPIServerConfig struct {
|
||||
// TODO: Remove PathSrvKubernetes - unused
|
||||
PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"`
|
||||
PathSrvSshproxy string `json:"pathSrvSshproxy,omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
// TODO: Remove PathSrvSshProxy - unused
|
||||
PathSrvSshproxy string `json:"pathSrvSshproxy,omitempty"`
|
||||
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
LogLevel int32 `json:"logLevel,omitempty" flag:"v"`
|
||||
|
||||
|
|
@ -390,13 +393,22 @@ type KubeAPIServerConfig struct {
|
|||
EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
|
||||
AdmissionControl []string `json:"admissionControl,omitempty" flag:"admission-control"`
|
||||
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty" flag:"service-cluster-ip-range"`
|
||||
ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"`
|
||||
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
|
||||
TLSCertFile string `json:"tlsCertFile,omitempty" flag:"tls-cert-file"`
|
||||
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty" flag:"tls-private-key-file"`
|
||||
TokenAuthFile string `json:"tokenAuthFile,omitempty" flag:"token-auth-file"`
|
||||
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"`
|
||||
APIServerCount *int32 `json:"apiServerCount,omitempty" flag:"apiserver-count"`
|
||||
|
||||
// TODO: Remove unused BasicAuthFile
|
||||
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
|
||||
|
||||
// TODO: Remove unused ClientCAFile
|
||||
ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"`
|
||||
// TODO: Remove unused TLSCertFile
|
||||
TLSCertFile string `json:"tlsCertFile,omitempty" flag:"tls-cert-file"`
|
||||
// TODO: Remove unused TLSPrivateKeyFile
|
||||
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty" flag:"tls-private-key-file"`
|
||||
|
||||
// TODO: Remove unused TokenAuthFile
|
||||
TokenAuthFile string `json:"tokenAuthFile,omitempty" flag:"token-auth-file"`
|
||||
|
||||
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"`
|
||||
APIServerCount *int32 `json:"apiServerCount,omitempty" flag:"apiserver-count"`
|
||||
// keys and values in RuntimeConfig are parsed into the `--runtime-config` parameter
|
||||
// for KubeAPIServer, concatenated with commas. ex: `--runtime-config=key1=value1,key2=value2`.
|
||||
// Use this to enable alpha resources on kube-apiserver
|
||||
|
|
@ -445,10 +457,12 @@ type KubeControllerManagerConfig struct {
|
|||
Master string `json:"master,omitempty" flag:"master"`
|
||||
LogLevel int32 `json:"logLevel,omitempty" flag:"v" flag-empty:"0"`
|
||||
|
||||
// TODO: Remove as unused
|
||||
ServiceAccountPrivateKeyFile string `json:"serviceAccountPrivateKeyFile,omitempty" flag:"service-account-private-key-file"`
|
||||
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// TODO: Remove PathSrvKubernetes - unused
|
||||
PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
|
@ -563,9 +577,12 @@ type KubeControllerManagerConfig struct {
|
|||
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
|
||||
// to be configured on the cloud provider.
|
||||
ConfigureCloudRoutes *bool `json:"configureCloudRoutes,omitempty" flag:"configure-cloud-routes"`
|
||||
|
||||
// TODO: Remove as unused
|
||||
// rootCAFile is the root certificate authority will be included in service
|
||||
// account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
RootCAFile string `json:"rootCAFile,omitempty" flag:"root-ca-file"`
|
||||
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||
|
|
|
|||
|
|
@ -18,17 +18,16 @@ package model
|
|||
|
||||
import (
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/pkg/model/resources"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// BootstrapScript creates the bootstrap script
|
||||
type BootstrapScript struct {
|
||||
NodeUpSource string
|
||||
NodeUpSourceHash string
|
||||
|
||||
NodeUpSource string
|
||||
NodeUpSourceHash string
|
||||
NodeUpConfigBuilder func(ig *kops.InstanceGroup) (*nodeup.NodeUpConfig, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -108,10 +108,6 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
|
|||
return fmt.Errorf("unknown cloud provider %q", clusterSpec.CloudProvider)
|
||||
}
|
||||
|
||||
kcm.PathSrvKubernetes = "/srv/kubernetes"
|
||||
kcm.RootCAFile = "/srv/kubernetes/ca.crt"
|
||||
kcm.ServiceAccountPrivateKeyFile = "/srv/kubernetes/server.key"
|
||||
|
||||
kcm.Master = "127.0.0.1:8080"
|
||||
kcm.LogLevel = 2
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,10 @@ NODEUP_HASH={{ NodeUpSourceHash }}
|
|||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
|
||||
if [[ -d /var/lib/toolbox ]]; then
|
||||
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
|
||||
fi
|
||||
mkdir -p ${INSTALL_DIR}
|
||||
cd ${INSTALL_DIR}
|
||||
}
|
||||
|
|
@ -122,7 +126,7 @@ function download-release() {
|
|||
|
||||
echo "Running nodeup"
|
||||
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 )
|
||||
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
|
||||
}
|
||||
|
||||
####################################################################################
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
|
|||
container.Env = append(container.Env, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(initialCluster, ",")})
|
||||
|
||||
container.LivenessProbe = &v1.Probe{
|
||||
InitialDelaySeconds: 600,
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 15,
|
||||
}
|
||||
container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{
|
||||
|
|
|
|||
|
|
@ -58,6 +58,17 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
|
|||
glog.V(2).Infof("Master volume %q is attached at %q", v.ID, v.LocalDevice)
|
||||
|
||||
mountpoint := "/mnt/master-" + v.ID
|
||||
|
||||
// On ContainerOS, we mount to /mnt/disks instead (/mnt is readonly)
|
||||
_, err := os.Stat(PathFor("/mnt/disks"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("error checking for /mnt/disks: %v", err)
|
||||
}
|
||||
} else {
|
||||
mountpoint = "/mnt/disks/master-" + v.ID
|
||||
}
|
||||
|
||||
glog.Infof("Doing safe-format-and-mount of %s to %s", v.LocalDevice, mountpoint)
|
||||
fstype := ""
|
||||
err = k.safeFormatAndMount(v.LocalDevice, mountpoint, fstype)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ spec:
|
|||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4001
|
||||
initialDelaySeconds: 600
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: etcd-container
|
||||
ports:
|
||||
|
|
@ -63,4 +63,4 @@ spec:
|
|||
- hostPath:
|
||||
path: /var/log/main.log
|
||||
name: varlogetcd
|
||||
status: {}
|
||||
status: {}
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -1,18 +1,11 @@
|
|||
KubeAPIServer:
|
||||
SecurePort: 443
|
||||
PathSrvKubernetes: /srv/kubernetes
|
||||
PathSrvSshproxy: /srv/sshproxy
|
||||
Address: 127.0.0.1
|
||||
EtcdServers:
|
||||
- http://127.0.0.1:4001
|
||||
EtcdServersOverrides:
|
||||
- /events#http://127.0.0.1:4002
|
||||
ServiceClusterIPRange: {{ .ServiceClusterIPRange }}
|
||||
ClientCAFile: /srv/kubernetes/ca.crt
|
||||
BasicAuthFile: /srv/kubernetes/basic_auth.csv
|
||||
TLSCertFile: /srv/kubernetes/server.cert
|
||||
TLSPrivateKeyFile: /srv/kubernetes/server.key
|
||||
TokenAuthFile: /srv/kubernetes/known_tokens.csv
|
||||
LogLevel: 2
|
||||
AllowPrivileged: true
|
||||
Image: {{ Image "kube-apiserver" }}
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
{{ CACertificatePool.AsString }}
|
||||
|
|
@ -1 +0,0 @@
|
|||
{{ (Certificate "master").AsString }}
|
||||
|
|
@ -1 +0,0 @@
|
|||
{{ (PrivateKey "master").AsString }}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"etcd-server-events",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"k8s-app" : "etcd-server-events"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "gcr.io/google_containers/etcd:2.2.1",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2381 --addr 127.0.0.1:4002 --bind-addr 127.0.0.1:4002 --data-dir /var/etcd/data-events 1>>/var/log/etcd-events.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 4002,
|
||||
"path": "/health"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports":[
|
||||
{ "name": "serverport",
|
||||
"containerPort": 2381,
|
||||
"hostPort": 2381
|
||||
},{
|
||||
"name": "clientport",
|
||||
"containerPort": 4002,
|
||||
"hostPort": 4002
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{"name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false
|
||||
},
|
||||
{"name": "varlogetcd",
|
||||
"mountPath": "/var/log/etcd-events.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/mnt/master-pd/var/etcd"}
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/log/etcd-events.log"}
|
||||
}
|
||||
]
|
||||
}}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"etcd-server",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"k8s-app" : "etcd-server"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "gcr.io/google_containers/etcd:2.2.1",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "200m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2380 --addr 127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data 1>>/var/log/etcd.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 4001,
|
||||
"path": "/health"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports":[
|
||||
{ "name": "serverport",
|
||||
"containerPort": 2380,
|
||||
"hostPort": 2380
|
||||
},{
|
||||
"name": "clientport",
|
||||
"containerPort": 4001,
|
||||
"hostPort": 4001
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{"name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false
|
||||
},
|
||||
{"name": "varlogetcd",
|
||||
"mountPath": "/var/log/etcd.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/mnt/master-pd/var/etcd"}
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/log/etcd.log"}
|
||||
}
|
||||
]
|
||||
}}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"ifNotExists": true
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"ifNotExists": true
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
{{ GetToken "kube" }},admin,admin
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0600"
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{{ range $id, $token := AllTokens }}
|
||||
{{ $token }},{{ $id }},{{ $id }}
|
||||
{{ end }}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0600"
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
DAEMON_ARGS="{{ BuildFlags ProtokubeFlags }}"
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
[Unit]
|
||||
Description=Kubernetes Protokube Service
|
||||
Documentation=https://github.com/kubernetes/kube-deploy/protokube
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/protokube
|
||||
ExecStartPre={{ ProtokubeImagePullCommand }}
|
||||
ExecStart=/usr/bin/docker run -v /:/rootfs/ -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd --net=host --privileged {{ ProtokubeImageName }} /usr/bin/protokube "$DAEMON_ARGS"
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0755"
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0755"
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0755"
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0755"
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"mode": "0755"
|
||||
}
|
||||
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops/registry"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/apis/kops/validation"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/pkg/client/simple"
|
||||
"k8s.io/kops/pkg/featureflag"
|
||||
"k8s.io/kops/pkg/model"
|
||||
|
|
@ -44,7 +45,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
|
||||
"k8s.io/kops/upup/pkg/fi/fitasks"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup"
|
||||
"k8s.io/kops/util/pkg/hashing"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -116,9 +116,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
|
|||
case api.InstanceGroupRoleNode:
|
||||
tags.Insert("_kubernetes_pool")
|
||||
|
||||
// TODO: Should we run _protokube on the nodes?
|
||||
tags.Insert("_protokube")
|
||||
|
||||
case api.InstanceGroupRoleMaster:
|
||||
tags.Insert("_kubernetes_master")
|
||||
|
||||
|
|
@ -127,8 +124,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
|
|||
tags.Insert("_kubernetes_pool")
|
||||
}
|
||||
|
||||
tags.Insert("_protokube")
|
||||
|
||||
case api.InstanceGroupRoleBastion:
|
||||
// No tags
|
||||
|
||||
|
|
@ -136,12 +131,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
|
|||
return nil, fmt.Errorf("Unrecognized role: %v", role)
|
||||
}
|
||||
|
||||
// TODO: Replace with list of CNI plugins ?
|
||||
if usesCNI(cluster) {
|
||||
tags.Insert("_cni_bridge", "_cni_host_local", "_cni_loopback", "_cni_ptp", "_cni_flannel")
|
||||
//tags.Insert("_cni_tuning")
|
||||
}
|
||||
|
||||
switch fi.StringValue(cluster.Spec.UpdatePolicy) {
|
||||
case "": // default
|
||||
tags.Insert("_automatic_upgrades")
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/kops/nodeup/pkg/model"
|
||||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/registry"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
||||
|
|
@ -44,7 +45,7 @@ import (
|
|||
const MaxTaskDuration = 365 * 24 * time.Hour
|
||||
|
||||
type NodeUpCommand struct {
|
||||
config *NodeUpConfig
|
||||
config *nodeup.NodeUpConfig
|
||||
cluster *api.Cluster
|
||||
instanceGroup *api.InstanceGroup
|
||||
ConfigLocation string
|
||||
|
|
@ -195,6 +196,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
}
|
||||
|
||||
modelContext := &model.NodeupModelContext{
|
||||
NodeupConfig: c.config,
|
||||
Cluster: c.cluster,
|
||||
Distribution: distribution,
|
||||
Architecture: model.ArchitectureAmd64,
|
||||
|
|
@ -207,12 +209,18 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
}
|
||||
|
||||
loader := NewLoader(c.config, c.cluster, assets, nodeTags)
|
||||
loader.Builders = append(loader.Builders, &model.DirectoryBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.ProtokubeBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.CloudConfigBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.KubectlBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.EtcdBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.LogrotateBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.PackagesBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.SecretBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.FirewallBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.NetworkBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.KubeAPIServerBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.KubeControllerManagerBuilder{NodeupModelContext: modelContext})
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/loader"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
|
|
@ -35,7 +36,7 @@ type Loader struct {
|
|||
Builders []fi.ModelBuilder
|
||||
|
||||
templates []*template.Template
|
||||
config *NodeUpConfig
|
||||
config *nodeup.NodeUpConfig
|
||||
cluster *api.Cluster
|
||||
|
||||
assets *fi.AssetStore
|
||||
|
|
@ -45,7 +46,7 @@ type Loader struct {
|
|||
TemplateFunctions template.FuncMap
|
||||
}
|
||||
|
||||
func NewLoader(config *NodeUpConfig, cluster *api.Cluster, assets *fi.AssetStore, tags sets.String) *Loader {
|
||||
func NewLoader(config *nodeup.NodeUpConfig, cluster *api.Cluster, assets *fi.AssetStore, tags sets.String) *Loader {
|
||||
l := &Loader{}
|
||||
l.assets = assets
|
||||
l.tasks = make(map[string]fi.Task)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ type File struct {
|
|||
Mode *string `json:"mode,omitempty"`
|
||||
IfNotExists bool `json:"ifNotExists,omitempty"`
|
||||
|
||||
OnChangeExecute []string `json:"onChangeExecute,omitempty"`
|
||||
OnChangeExecute [][]string `json:"onChangeExecute,omitempty"`
|
||||
|
||||
Symlink *string `json:"symlink,omitempty"`
|
||||
Owner *string `json:"owner,omitempty"`
|
||||
|
|
@ -96,6 +96,23 @@ func (f *File) GetDependencies(tasks map[string]fi.Task) []fi.Task {
|
|||
}
|
||||
}
|
||||
|
||||
// Files depend on parent directories
|
||||
for _, v := range tasks {
|
||||
dir, ok := v.(*File)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if dir.Type == FileType_Directory {
|
||||
dirPath := dir.Path
|
||||
if !strings.HasSuffix(dirPath, "/") {
|
||||
dirPath += "/"
|
||||
}
|
||||
if strings.HasPrefix(f.Path, dirPath) {
|
||||
deps = append(deps, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return deps
|
||||
}
|
||||
|
||||
|
|
@ -261,15 +278,16 @@ func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error {
|
|||
}
|
||||
|
||||
if changed && e.OnChangeExecute != nil {
|
||||
args := e.OnChangeExecute
|
||||
human := strings.Join(args, " ")
|
||||
for _, args := range e.OnChangeExecute {
|
||||
human := strings.Join(args, " ")
|
||||
|
||||
glog.Infof("Changed; will execute OnChangeExecute command: %q", human)
|
||||
glog.Infof("Changed; will execute OnChangeExecute command: %q", human)
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error executing command %q: %v\nOutput: %s", human, err, output)
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error executing command %q: %v\nOutput: %s", human, err, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -303,7 +321,8 @@ func (_ *File) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *File
|
|||
}
|
||||
|
||||
if e.OnChangeExecute != nil {
|
||||
t.AddCommand(cloudinit.Always, e.OnChangeExecute...)
|
||||
return fmt.Errorf("OnChangeExecute not supported with CloudInit")
|
||||
//t.AddCommand(cloudinit.Always, e.OnChangeExecute...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ const (
|
|||
centosSystemdSystemPath = "/usr/lib/systemd/system"
|
||||
|
||||
coreosSystemdSystemPath = "/etc/systemd/system"
|
||||
|
||||
containerosSystemdSystemPath = "/etc/systemd/system"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
|
|
@ -146,6 +148,8 @@ func (e *Service) systemdSystemPath(target tags.HasTags) (string, error) {
|
|||
return centosSystemdSystemPath, nil
|
||||
} else if target.HasTag("_coreos") {
|
||||
return coreosSystemdSystemPath, nil
|
||||
} else if target.HasTag("_containeros") {
|
||||
return containerosSystemdSystemPath, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("unsupported systemd system")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeup
|
||||
|
||||
type ProtokubeFlags struct {
|
||||
Master *bool `json:"master,omitempty" flag:"master"`
|
||||
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
|
||||
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
|
||||
|
||||
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
|
||||
|
||||
Zone []string `json:"zone,omitempty" flag:"zone"`
|
||||
|
||||
Channels []string `json:"channels,omitempty" flag:"channels"`
|
||||
|
||||
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
|
||||
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
|
||||
}
|
||||
|
|
@ -20,13 +20,12 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kops"
|
||||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/secrets"
|
||||
|
|
@ -37,7 +36,7 @@ const TagMaster = "_kubernetes_master"
|
|||
|
||||
// templateFunctions is a simple helper-class for the functions accessible to templates
|
||||
type templateFunctions struct {
|
||||
nodeupConfig *NodeUpConfig
|
||||
nodeupConfig *nodeup.NodeUpConfig
|
||||
|
||||
// cluster is populated with the current cluster
|
||||
cluster *api.Cluster
|
||||
|
|
@ -53,7 +52,7 @@ type templateFunctions struct {
|
|||
}
|
||||
|
||||
// newTemplateFunctions is the constructor for templateFunctions
|
||||
func newTemplateFunctions(nodeupConfig *NodeUpConfig, cluster *api.Cluster, instanceGroup *api.InstanceGroup, tags sets.String) (*templateFunctions, error) {
|
||||
func newTemplateFunctions(nodeupConfig *nodeup.NodeUpConfig, cluster *api.Cluster, instanceGroup *api.InstanceGroup, tags sets.String) (*templateFunctions, error) {
|
||||
t := &templateFunctions{
|
||||
nodeupConfig: nodeupConfig,
|
||||
cluster: cluster,
|
||||
|
|
@ -93,11 +92,9 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
|
|||
return runtime.GOARCH
|
||||
}
|
||||
|
||||
dest["CACertificatePool"] = t.CACertificatePool
|
||||
dest["CACertificate"] = t.CACertificate
|
||||
dest["PrivateKey"] = t.PrivateKey
|
||||
dest["Certificate"] = t.Certificate
|
||||
dest["AllTokens"] = t.AllTokens
|
||||
dest["GetToken"] = t.GetToken
|
||||
|
||||
dest["BuildFlags"] = flagbuilder.BuildFlags
|
||||
|
|
@ -123,31 +120,6 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
|
|||
dest["ClusterName"] = func() string {
|
||||
return t.cluster.ObjectMeta.Name
|
||||
}
|
||||
|
||||
dest["ProtokubeImageName"] = t.ProtokubeImageName
|
||||
dest["ProtokubeImagePullCommand"] = t.ProtokubeImagePullCommand
|
||||
|
||||
dest["ProtokubeFlags"] = t.ProtokubeFlags
|
||||
}
|
||||
|
||||
// CACertificatePool returns the set of valid CA certificates for the cluster
|
||||
func (t *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
|
||||
if t.keyStore != nil {
|
||||
return t.keyStore.CertificatePool(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
cert, err := t.CACertificate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("CA certificate not found (with fallback)")
|
||||
}
|
||||
pool := &fi.CertificatePool{}
|
||||
pool.Primary = cert
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// CACertificate returns the primary CA certificate for the cluster
|
||||
|
|
@ -165,23 +137,6 @@ func (t *templateFunctions) Certificate(id string) (*fi.Certificate, error) {
|
|||
return t.keyStore.Cert(id)
|
||||
}
|
||||
|
||||
// AllTokens returns a map of all tokens
|
||||
func (t *templateFunctions) AllTokens() (map[string]string, error) {
|
||||
tokens := make(map[string]string)
|
||||
ids, err := t.secretStore.ListSecrets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range ids {
|
||||
token, err := t.secretStore.FindSecret(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens[id] = string(token.Data)
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
// GetToken returns the specified token
|
||||
func (t *templateFunctions) GetToken(key string) (string, error) {
|
||||
token, err := t.secretStore.FindSecret(key)
|
||||
|
|
@ -194,36 +149,6 @@ func (t *templateFunctions) GetToken(key string) (string, error) {
|
|||
return string(token.Data), nil
|
||||
}
|
||||
|
||||
// ProtokubeImageName returns the docker image for protokube
|
||||
func (t *templateFunctions) ProtokubeImageName() string {
|
||||
name := ""
|
||||
if t.nodeupConfig.ProtokubeImage != nil && t.nodeupConfig.ProtokubeImage.Name != "" {
|
||||
name = t.nodeupConfig.ProtokubeImage.Name
|
||||
}
|
||||
if name == "" {
|
||||
// use current default corresponding to this version of nodeup
|
||||
name = kops.DefaultProtokubeImageName()
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ProtokubeImagePullCommand returns the command to pull the image
|
||||
func (t *templateFunctions) ProtokubeImagePullCommand() string {
|
||||
source := ""
|
||||
if t.nodeupConfig.ProtokubeImage != nil {
|
||||
source = t.nodeupConfig.ProtokubeImage.Source
|
||||
}
|
||||
if source == "" {
|
||||
// Nothing to pull; return dummy value
|
||||
return "/bin/true"
|
||||
}
|
||||
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
|
||||
// We preloaded the image; return a dummy value
|
||||
return "/bin/true"
|
||||
}
|
||||
return "/usr/bin/docker pull " + t.nodeupConfig.ProtokubeImage.Source
|
||||
}
|
||||
|
||||
// IsMaster returns true if we are tagged as a master
|
||||
func (t *templateFunctions) isMaster() bool {
|
||||
return t.hasTag(TagMaster)
|
||||
|
|
@ -235,53 +160,6 @@ func (t *templateFunctions) hasTag(tag string) bool {
|
|||
return found
|
||||
}
|
||||
|
||||
// ProtokubeFlags returns the flags object for protokube
|
||||
func (t *templateFunctions) ProtokubeFlags() *ProtokubeFlags {
|
||||
f := &ProtokubeFlags{}
|
||||
|
||||
master := t.isMaster()
|
||||
|
||||
f.Master = fi.Bool(master)
|
||||
if master {
|
||||
f.Channels = t.nodeupConfig.Channels
|
||||
}
|
||||
|
||||
f.LogLevel = fi.Int32(4)
|
||||
f.Containerized = fi.Bool(true)
|
||||
|
||||
zone := t.cluster.Spec.DNSZone
|
||||
if zone != "" {
|
||||
if strings.Contains(zone, ".") {
|
||||
// match by name
|
||||
f.Zone = append(f.Zone, zone)
|
||||
} else {
|
||||
// match by id
|
||||
f.Zone = append(f.Zone, "*/"+zone)
|
||||
}
|
||||
} else {
|
||||
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
|
||||
// TODO: Should we permit wildcard updates if zone is not specified?
|
||||
//argv = append(argv, "--zone=*/*")
|
||||
}
|
||||
|
||||
if t.cluster.Spec.CloudProvider != "" {
|
||||
f.Cloud = fi.String(t.cluster.Spec.CloudProvider)
|
||||
|
||||
switch fi.CloudProviderID(t.cluster.Spec.CloudProvider) {
|
||||
case fi.CloudProviderAWS:
|
||||
f.DNSProvider = fi.String("aws-route53")
|
||||
case fi.CloudProviderGCE:
|
||||
f.DNSProvider = fi.String("google-clouddns")
|
||||
default:
|
||||
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider")
|
||||
}
|
||||
}
|
||||
|
||||
f.DNSInternalSuffix = fi.String(".internal." + t.cluster.ObjectMeta.Name)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// KubeProxyConfig builds the KubeProxyConfig configuration object
|
||||
func (t *templateFunctions) KubeProxyConfig() *api.KubeProxyConfig {
|
||||
config := &api.KubeProxyConfig{}
|
||||
|
|
|
|||
Loading…
Reference in New Issue