Merge pull request #1813 from aledbf/coreos

Initial (experimental) CoreOS support
This commit is contained in:
Justin Santa Barbara 2017-02-14 11:08:40 -05:00 committed by GitHub
commit 1c7818833a
26 changed files with 730 additions and 280 deletions

View File

@ -19,15 +19,20 @@ package main // import "k8s.io/kops/cmd/nodeup"
import (
"flag"
"fmt"
"github.com/golang/glog"
"k8s.io/kops"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi/nodeup"
"os"
"time"
"github.com/golang/glog"
"k8s.io/kops"
"k8s.io/kops/nodeup/pkg/bootstrap"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi/nodeup"
)
const retryInterval = 30 * time.Second
const (
retryInterval = 30 * time.Second
procSelfExe = "/proc/self/exe"
)
func main() {
gitVersion := ""
@ -50,6 +55,9 @@ func main() {
target := "direct"
flag.StringVar(&target, "target", target, "Target - direct, cloudinit")
installSystemdUnit := false
flag.BoolVar(&installSystemdUnit, "install-systemd-unit", installSystemdUnit, "If true, will install a systemd unit instead of running directly")
if dryrun {
target = "dryrun"
}
@ -64,17 +72,58 @@ func main() {
retries := flagRetries
for {
cmd := &nodeup.NodeUpCommand{
ConfigLocation: flagConf,
Target: target,
CacheDir: flagCacheDir,
FSRoot: flagRootFS,
ModelDir: models.NewAssetPath("nodeup"),
}
err := cmd.Run(os.Stdout)
if err == nil {
fmt.Printf("success")
os.Exit(0)
var err error
if installSystemdUnit {
// create a systemd unit to bootstrap kops
// using the same args as we were called with
var command []string
for i := 0; i < len(os.Args); i++ {
s := os.Args[i]
if s == "-install-systemd-unit" || s == "--install-systemd-unit" {
continue
}
if i == 0 {
// We could also try to evaluate based on cwd
if _, err := os.Stat(procSelfExe); os.IsNotExist(err) {
glog.Fatalf("file %v does not exists", procSelfExe)
}
fi, err := os.Lstat(procSelfExe)
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Fatalf("file %v is not a symlink", procSelfExe)
}
s, err = os.Readlink(procSelfExe)
if err != nil {
glog.Fatalf("error reading %v link: %v", procSelfExe, err)
}
}
command = append(command, s)
}
i := bootstrap.Installation{
MaxTaskDuration: 5 * time.Minute,
CacheDir: flagCacheDir,
Command: command,
FSRoot: flagRootFS,
}
err = i.Run()
if err == nil {
fmt.Printf("service installed")
os.Exit(0)
}
} else {
cmd := &nodeup.NodeUpCommand{
ConfigLocation: flagConf,
Target: target,
CacheDir: flagCacheDir,
FSRoot: flagRootFS,
ModelDir: models.NewAssetPath("nodeup"),
}
err = cmd.Run(os.Stdout)
if err == nil {
fmt.Printf("success")
os.Exit(0)
}
}
if retries == 0 {

View File

@ -85,9 +85,21 @@ RHEL7 support is still experimental, but should work. Please report any issues.
The following steps are known:
* Redhat AMIs can be found using `aws ec2 describe-images --region=us-east-1 --owner=309956199498 --filters Name=virtualization-type,Values=hvm`
* You can specify the name using the owner alias, for example `redhat.com/RHEL-7.2_HVM-20161025-x86_64-1-Hourly2-GP2`
* You can specify the name using the 'redhat.com` owner alias, for example `redhat.com/RHEL-7.2_HVM-20161025-x86_64-1-Hourly2-GP2`
Be aware of the following limitations:
* RHEL 7.2 is the recommended minimum version
* RHEL7 AMIs are running an older kernel than we prefer to run elsewhere
## CoreOS
CoreOS support is highly experimental. Please report any issues.
The following steps are known:
* CoreOS AMIs can be found using `aws ec2 describe-images --region=us-east-1 --owner=595879546273 --filters Name=virtualization-type,Values=hvm`
* You can specify the name using the 'coreos.com` owner alias, for example `coreos.com/CoreOS-stable-1235.6.0-hvm`

View File

@ -17,6 +17,7 @@ k8s.io/kops/federation
k8s.io/kops/federation/model
k8s.io/kops/federation/targets/kubernetes
k8s.io/kops/federation/tasks
k8s.io/kops/nodeup/pkg/bootstrap
k8s.io/kops/nodeup/pkg/distros
k8s.io/kops/nodeup/pkg/model
k8s.io/kops/nodeup/pkg/model/resources

View File

@ -0,0 +1,127 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/local"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/kubernetes/pkg/util/sets"
"strings"
"time"
)
type Installation struct {
FSRoot string
CacheDir string
MaxTaskDuration time.Duration
Command []string
}
func (i *Installation) Run() error {
distribution, err := distros.FindDistribution(i.FSRoot)
if err != nil {
return fmt.Errorf("error determining OS distribution: %v", err)
}
tags := sets.NewString()
tags.Insert(distribution.BuildTags()...)
tasks := make(map[string]fi.Task)
buildContext := &fi.ModelBuilderContext{
Tasks: tasks,
}
i.Build(buildContext)
// If there is a package task, we need an update packages task
for _, t := range tasks {
if _, ok := t.(*nodetasks.Package); ok {
glog.Infof("Package task found; adding UpdatePackages task")
tasks["UpdatePackages"] = nodetasks.NewUpdatePackages()
break
}
}
if tasks["UpdatePackages"] == nil {
glog.Infof("No package task found; won't update packages")
}
var configBase vfs.Path
var cloud fi.Cloud
var keyStore fi.Keystore
var secretStore fi.SecretStore
target := &local.LocalTarget{
CacheDir: i.CacheDir,
Tags: tags,
}
checkExisting := true
context, err := fi.NewContext(target, cloud, keyStore, secretStore, configBase, checkExisting, tasks)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
err = context.RunTasks(i.MaxTaskDuration)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
err = target.Finish(tasks)
if err != nil {
return fmt.Errorf("error finishing target: %v", err)
}
return nil
}
func (i *Installation) Build(c *fi.ModelBuilderContext) {
c.AddTask(i.buildSystemdJob())
}
func (i *Installation) buildSystemdJob() *nodetasks.Service {
command := strings.Join(i.Command, " ")
serviceName := "kops-configuration.service"
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Run kops bootstrap (nodeup)")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
manifest.Set("Service", "ExecStart", command)
manifest.Set("Service", "Type", "oneshot")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", serviceName, manifestString)
service := &nodetasks.Service{
Name: serviceName,
Definition: fi.String(manifestString),
}
service.InitDefaults()
return service
}

View File

@ -227,6 +227,11 @@ func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro
}
func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution == distros.DistributionCoreOS {
glog.Infof("Detected CoreOS; won't install Docker")
return nil
}
// Add Apache2 license
{
t := &nodetasks.File{

53
nodeup/pkg/model/etcd.go Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// EtcdBuilder installs etcd
type EtcdBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &LogrotateBuilder{}
func (b *EtcdBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
if b.Distribution == distros.DistributionCoreOS {
glog.Infof("Detected CoreOS; skipping etcd user installation")
return nil
}
// TODO: Do we actually use the user anywhere?
c.AddTask(&nodetasks.UserTask{
// TODO: Should we set a consistent UID in case we remount?
Name: "user",
Shell: "/sbin/nologin",
Home: "/var/etcd",
})
return nil
}

View File

@ -0,0 +1,173 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"strings"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/intstr"
)
// KubeAPIServerBuilder install kube-apiserver (just the manifest at the moment)
type KubeAPIServerBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &KubeAPIServerBuilder{}
func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
{
pod, err := b.buildPod()
if err != nil {
return fmt.Errorf("error building kube-apiserver manifest: %v", err)
}
manifest, err := ToVersionedYaml(pod)
if err != nil {
return fmt.Errorf("error marshalling manifest to yaml: %v", err)
}
t := &nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-apiserver.manifest",
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
return nil
}
func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer)
if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
}
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-apiserver " + flags + " 1>>/var/log/kube-apiserver.log 2>&1",
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: v1.ObjectMeta{
Name: "kube-apiserver",
Namespace: "kube-system",
Annotations: b.buildAnnotations(),
Labels: map[string]string{
"k8s-app": "kube-apiserver",
},
},
Spec: v1.PodSpec{
HostNetwork: true,
},
}
container := &v1.Container{
Name: "kube-apiserver",
Image: b.Cluster.Spec.KubeAPIServer.Image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("150m"),
},
},
Command: redirectCommand,
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Host: "127.0.0.1",
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
},
Ports: []v1.ContainerPort{
{
Name: "https",
ContainerPort: b.Cluster.Spec.KubeAPIServer.SecurePort,
HostPort: b.Cluster.Spec.KubeAPIServer.SecurePort,
},
{
Name: "local",
ContainerPort: 8080,
HostPort: 8080,
},
},
}
for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1)
addHostPathMapping(pod, container, name, path, true)
}
if b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes != "" {
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes, true)
}
if b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy != "" {
addHostPathMapping(pod, container, "srvsshproxy", b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy, false)
}
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log", false)
pod.Spec.Containers = append(pod.Spec.Containers, *container)
return pod, nil
}
func addHostPathMapping(pod *v1.Pod, container *v1.Container, name string, path string, readOnly bool) {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: readOnly,
})
}
func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string {
annotations := make(map[string]string)
annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName
if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.DNS != nil {
annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.MasterPublicName
}
return annotations
}

View File

@ -0,0 +1,129 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"strings"
)
// KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment)
type KubeControllerManagerBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &KubeControllerManagerBuilder{}
func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
{
pod, err := b.buildPod()
if err != nil {
return fmt.Errorf("error building kube-controller-manager pod: %v", err)
}
manifest, err := ToVersionedYaml(pod)
if err != nil {
return fmt.Errorf("error marshalling pod to yaml: %v", err)
}
t := &nodetasks.File{
Path: "/etc/kubernetes/manifests/kube-controller-manager.manifest",
Contents: fi.NewBytesResource(manifest),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
return nil
}
func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeControllerManager)
if err != nil {
return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err)
}
redirectCommand := []string{
"/bin/sh", "-c", "/usr/local/bin/kube-controller-manager " + flags + " 1>>/var/log/kube-controller-manager.log 2>&1",
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: v1.ObjectMeta{
Name: "kube-controller-manager",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "kube-controller-manager",
},
},
Spec: v1.PodSpec{
HostNetwork: true,
},
}
container := &v1.Container{
Name: "kube-controller-manager",
Image: b.Cluster.Spec.KubeControllerManager.Image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
Command: redirectCommand,
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Host: "127.0.0.1",
Path: "/healthz",
Port: intstr.FromInt(10252),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
},
}
for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1)
addHostPathMapping(pod, container, name, path, true)
}
if b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes != "" {
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes, true)
}
addHostPathMapping(pod, container, "logfile", "/var/log/kube-controller-manager.log", false)
pod.Spec.Containers = append(pod.Spec.Containers, *container)
return pod, nil
}

View File

@ -0,0 +1,70 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// KubectlBuilder install kubectl
type KubectlBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &KubectlBuilder{}
func (b *KubectlBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
// We don't have the configuration on the machines, so it only works on the master anyway
return nil
}
// Add kubectl file as an asset
{
// TODO: Extract to common function?
assetName := "kubectl"
assetPath := ""
asset, err := b.Assets.Find(assetName, assetPath)
if err != nil {
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
}
if asset == nil {
return fmt.Errorf("unable to locate asset %q", assetName)
}
t := &nodetasks.File{
Path: b.kubectlPath(),
Contents: asset,
Type: nodetasks.FileType_File,
Mode: s("0755"),
}
c.AddTask(t)
}
return nil
}
func (b *KubectlBuilder) kubectlPath() string {
kubeletCommand := "/usr/local/bin/kubectl"
if b.Distribution == distros.DistributionCoreOS {
kubeletCommand = "/opt/bin/kubectl"
}
return kubeletCommand
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// LogrotateBuilder install kubectl
type LogrotateBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &LogrotateBuilder{}
func (b *LogrotateBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution == distros.DistributionCoreOS {
glog.Infof("Detected CoreOS; won't install logrotate")
return nil
}
c.AddTask(&nodetasks.Package{Name: "logrotate"})
return nil
}

View File

@ -272,16 +272,6 @@ type KubeDNSConfig struct {
ServerIP string `json:"serverIP,omitempty"`
}
//
//type MasterConfig struct {
// Name string `json:",omitempty"`
//
// Image string `json:",omitempty"`
// Zone string `json:",omitempty"`
// MachineType string `json:",omitempty"`
//}
//
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Name string `json:"name,omitempty"`

View File

@ -18,11 +18,13 @@ package model
import (
"fmt"
"strings"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"strings"
)
type KopsModelContext struct {

View File

@ -120,14 +120,9 @@ function download-release() {
echo "Couldn't download release. Retrying..."
done
echo "Running release install script"
# We run in the background to work around https://github.com/docker/docker/issues/23793
run-nodeup &
}
function run-nodeup() {
sleep 1
( cd ${INSTALL_DIR}; ./nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 )
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 )
}
####################################################################################

View File

@ -1,5 +0,0 @@
# TODO: Should we set a consistent UID in case we remount?
shell: /sbin/nologin
home: /var/etcd

View File

@ -1,109 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
{{ range $k, $v := BuildAPIServerAnnotations }}
{{ $k }}: {{ $v }}
{{ end }}
name: kube-apiserver
namespace: kube-system
labels:
k8s-app: kube-apiserver
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: "{{ KubeAPIServer.Image }}"
resources:
requests:
cpu: 150m
command:
- "/bin/sh"
- "-c"
- "/usr/local/bin/kube-apiserver {{ BuildFlags KubeAPIServer }} 1>>/var/log/kube-apiserver.log 2>&1"
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: https
containerPort: {{ KubeAPIServer.SecurePort }}
hostPort: {{ KubeAPIServer.SecurePort }}
- name: local
containerPort: 8080
hostPort: 8080
volumeMounts:
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: "{{ KubeAPIServer.PathSrvKubernetes }}"
name: srvkube
readOnly: true
- mountPath: /var/log/kube-apiserver.log
name: logfile
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
- mountPath: /etc/pki/ca-trust
name: etcpkicatrust
readOnly: true
- mountPath: "{{ KubeAPIServer.PathSrvSshproxy }}"
name: srvsshproxy
volumes:
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: "{{ KubeAPIServer.PathSrvKubernetes }}"
name: srvkube
- hostPath:
path: /var/log/kube-apiserver.log
name: logfile
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
- hostPath:
path: /etc/pki/ca-trust
name: etcpkicatrust
- hostPath:
path: "{{ KubeAPIServer.PathSrvSshproxy }}"
name: srvsshproxy

View File

@ -1,94 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"labels": {
"k8s-app" : "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "{{ KubeControllerManager.Image }}",
"resources": {
"requests": {
"cpu": "100m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-controller-manager {{ BuildFlags KubeControllerManager }} 1>>/var/log/kube-controller-manager.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{"name": "usrsharessl","mountPath": "/usr/share/ssl", "readOnly": true}, {"name": "usrssl","mountPath": "/usr/ssl", "readOnly": true}, {"name": "usrlibssl","mountPath": "/usr/lib/ssl", "readOnly": true}, {"name": "usrlocalopenssl","mountPath": "/usr/local/openssl", "readOnly": true},
{ "name": "srvkube",
"mountPath": "{{ KubeControllerManager.PathSrvKubernetes }}",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
"readOnly": true},
{ "name": "etcpkicatrust",
"mountPath": "/etc/pki/ca-trust",
"readOnly": true}
]
}
],
"volumes":[
{"name": "usrsharessl","hostPath": {"path": "/usr/share/ssl"}}, {"name": "usrssl","hostPath": {"path": "/usr/ssl"}}, {"name": "usrlibssl","hostPath": {"path": "/usr/lib/ssl"}}, {"name": "usrlocalopenssl","hostPath": {"path": "/usr/local/openssl"}},
{ "name": "srvkube",
"hostPath": {
"path": "{{ KubeControllerManager.PathSrvKubernetes }}"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
"hostPath": {
"path": "/etc/pki/tls"}
},
{ "name": "etcpkicatrust",
"hostPath": {
"path": "/etc/pki/ca-trust"}
}
]
}}

View File

@ -1 +0,0 @@
TODO - where is this sourced from?

View File

@ -56,7 +56,11 @@ const DeleteTagsLogInterval = 10 // this is in "retry intervals"
const TagClusterName = "KubernetesCluster"
const WellKnownAccountKopeio = "383156758163"
const (
WellKnownAccountKopeio = "383156758163"
WellKnownAccountRedhat = "309956199498"
WellKnownAccountCoreOS = "595879546273"
)
type AWSCloud interface {
fi.Cloud
@ -575,8 +579,10 @@ func resolveImage(ec2Client ec2iface.EC2API, name string) (*ec2.Image, error) {
switch owner {
case "kope.io":
owner = WellKnownAccountKopeio
case "coreos.com":
owner = WellKnownAccountCoreOS
case "redhat.com":
owner = "309956199498"
owner = WellKnownAccountRedhat
}
request.Owners = []*string{&owner}

View File

@ -209,8 +209,12 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
loader := NewLoader(c.config, c.cluster, assets, nodeTags)
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.PackagesBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubectlBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.EtcdBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.LogrotateBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeAPIServerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeControllerManagerBuilder{NodeupModelContext: modelContext})
tf.populate(loader.TemplateFunctions)

View File

@ -40,6 +40,8 @@ const (
// But we use it in two ways: we update the docker manifest, and we install our own
// package (protokube, kubelet). Maybe we should have the idea of a "system" package.
centosSystemdSystemPath = "/usr/lib/systemd/system"
coreosSystemdSystemPath = "/etc/systemd/system"
)
type Service struct {
@ -142,6 +144,8 @@ func (e *Service) systemdSystemPath(target tags.HasTags) (string, error) {
return debianSystemdSystemPath, nil
} else if target.HasTag(tags.TagOSFamilyRHEL) {
return centosSystemdSystemPath, nil
} else if target.HasTag("_coreos") {
return coreosSystemdSystemPath, nil
} else {
return "", fmt.Errorf("unsupported systemd system")
}

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kops/upup/pkg/fi/nodeup/local"
"k8s.io/kops/upup/pkg/fi/utils"
"os/exec"
"strings"
)
// UserTask is responsible for creating a user, by calling useradd
@ -40,6 +41,16 @@ func (e *UserTask) String() string {
return fmt.Sprintf("User: %s", e.Name)
}
var _ fi.HasName = &File{}
func (f *UserTask) GetName() *string {
return &f.Name
}
func (f *UserTask) SetName(name string) {
glog.Fatalf("SetName not supported for User task")
}
func NewUserTask(name string, contents string, meta string) (fi.Task, error) {
s := &UserTask{Name: name}
@ -94,6 +105,7 @@ func (_ *UserTask) RenderLocal(t *local.LocalTarget, a, e, changes *UserTask) er
args := buildUseraddArgs(e)
glog.Infof("Creating user %q", e.Name)
cmd := exec.Command("useradd", args...)
glog.V(2).Infof("running command: useradd %s", strings.Join(args, " "))
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error creating user: %v\nOutput: %s", err, output)
@ -112,6 +124,7 @@ func (_ *UserTask) RenderLocal(t *local.LocalTarget, a, e, changes *UserTask) er
args = append(args, e.Name)
glog.Infof("Reconfiguring user %q", e.Name)
cmd := exec.Command("usermod", args...)
glog.V(2).Infof("running command: usermod %s", strings.Join(args, " "))
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error reconfiguring user: %v\nOutput: %s", err, output)

View File

@ -89,7 +89,9 @@ func newTemplateFunctions(nodeupConfig *NodeUpConfig, cluster *api.Cluster, inst
}
func (t *templateFunctions) populate(dest template.FuncMap) {
dest["Arch"] = func() string { return runtime.GOARCH }
dest["Arch"] = func() string {
return runtime.GOARCH
}
dest["CACertificatePool"] = t.CACertificatePool
dest["CACertificate"] = t.CACertificate
@ -102,8 +104,6 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
dest["Base64Encode"] = func(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
dest["HasTag"] = t.HasTag
dest["IsMaster"] = t.IsMaster
// TODO: We may want to move these to a nodeset / masterset specific thing
dest["KubeDNS"] = func() *api.KubeDNSConfig {
@ -128,19 +128,6 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
dest["ProtokubeImagePullCommand"] = t.ProtokubeImagePullCommand
dest["ProtokubeFlags"] = t.ProtokubeFlags
dest["BuildAPIServerAnnotations"] = t.BuildAPIServerAnnotations
}
// IsMaster returns true if we are tagged as a master
func (t *templateFunctions) IsMaster() bool {
return t.HasTag(TagMaster)
}
// Tag returns true if we are tagged with the specified tag
func (t *templateFunctions) HasTag(tag string) bool {
_, found := t.tags[tag]
return found
}
// CACertificatePool returns the set of valid CA certificates for the cluster
@ -237,11 +224,22 @@ func (t *templateFunctions) ProtokubeImagePullCommand() string {
return "/usr/bin/docker pull " + t.nodeupConfig.ProtokubeImage.Source
}
// IsMaster returns true if we are tagged as a master
func (t *templateFunctions) isMaster() bool {
return t.hasTag(TagMaster)
}
// Tag returns true if we are tagged with the specified tag
func (t *templateFunctions) hasTag(tag string) bool {
_, found := t.tags[tag]
return found
}
// ProtokubeFlags returns the flags object for protokube
func (t *templateFunctions) ProtokubeFlags() *ProtokubeFlags {
f := &ProtokubeFlags{}
master := t.IsMaster()
master := t.isMaster()
f.Master = fi.Bool(master)
if master {
@ -277,19 +275,10 @@ func (t *templateFunctions) KubeProxyConfig() *api.KubeProxyConfig {
// As a special case, if this is the master, we point kube-proxy to the local IP
// This prevents a circular dependency where kube-proxy can't come up until DNS comes up,
// which would mean that DNS can't rely on API to come up
if t.IsMaster() {
if t.isMaster() {
glog.Infof("kube-proxy running on the master; setting API endpoint to localhost")
config.Master = "http://127.0.0.1:8080"
}
return config
}
func (t *templateFunctions) BuildAPIServerAnnotations() map[string]string {
annotations := make(map[string]string)
annotations["dns.alpha.kubernetes.io/internal"] = t.cluster.Spec.MasterInternalName
if t.cluster.Spec.API != nil && t.cluster.Spec.API.DNS != nil {
annotations["dns.alpha.kubernetes.io/external"] = t.cluster.Spec.MasterPublicName
}
return annotations
}