Merge pull request #3114 from gambol99/etcd_tls

Automatic merge from submit-queue

Etcd TLS Options

The current implementation does not put any transport security on the etcd cluster. The PR provides and optional flag to enable TLS the etcd cluster

- cleaned up and fixed any formatting issues on the journey
- added two new certificates (server/client) for etcd peers and a client certificate for kubeapi and others perhaps (perhaps calico?)
- disabled the protokube service for nodes completely is not required; note this was first raised in https://github.com/kubernetes/kops/pull/3091, but figured it would be easier to place in here given the relation
- updated protokube codebase to reflect the changes, removing the master option as its no longer required
- added additional integretion tests for the protokube manifests;
- note, still need to add documentation, but opening the PR to get feedback
- one outstanding issue is the migration from http -> https for preexisting clusters, i'm gonna hit the coreos board to ask for the best options
This commit is contained in:
Kubernetes Submit Queue 2017-08-06 14:02:06 -07:00 committed by GitHub
commit 5d5945cb00
46 changed files with 1017 additions and 657 deletions

View File

@ -19,32 +19,31 @@ package model
import (
"fmt"
"github.com/blang/semver"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"github.com/blang/semver"
)
// NodeupModelContext is the context supplied the nodeup tasks
type NodeupModelContext struct {
NodeupConfig *nodeup.Config
Cluster *kops.Cluster
InstanceGroup *kops.InstanceGroup
Architecture Architecture
Distribution distros.Distribution
IsMaster bool
Assets *fi.AssetStore
KeyStore fi.CAStore
SecretStore fi.SecretStore
Architecture Architecture
Assets *fi.AssetStore
Cluster *kops.Cluster
Distribution distros.Distribution
InstanceGroup *kops.InstanceGroup
IsMaster bool
KeyStore fi.CAStore
KubernetesVersion semver.Version
NodeupConfig *nodeup.Config
SecretStore fi.SecretStore
}
// SSLHostPaths returns the TLS paths for the distribution
func (c *NodeupModelContext) SSLHostPaths() []string {
paths := []string{"/etc/ssl", "/etc/pki/tls", "/etc/pki/ca-trust"}
@ -52,12 +51,9 @@ func (c *NodeupModelContext) SSLHostPaths() []string {
case distros.DistributionCoreOS:
// Because /usr is read-only on CoreOS, we can't have any new directories; docker will try (and fail) to create them
// TODO: Just check if the directories exist?
paths = append(paths, "/usr/share/ca-certificates")
case distros.DistributionContainerOS:
paths = append(paths, "/usr/share/ca-certificates")
default:
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
}
@ -65,6 +61,7 @@ func (c *NodeupModelContext) SSLHostPaths() []string {
return paths
}
// PathSrvKubernetes returns the path for the kubernetes service files
func (c *NodeupModelContext) PathSrvKubernetes() string {
switch c.Distribution {
case distros.DistributionContainerOS:
@ -74,6 +71,7 @@ func (c *NodeupModelContext) PathSrvKubernetes() string {
}
}
// PathSrvSshproxy returns the path for the SSL proxy
func (c *NodeupModelContext) PathSrvSshproxy() string {
switch c.Distribution {
case distros.DistributionContainerOS:
@ -83,6 +81,7 @@ func (c *NodeupModelContext) PathSrvSshproxy() string {
}
}
// CNIBinDir returns the path for the CNI binaries
func (c *NodeupModelContext) CNIBinDir() string {
switch c.Distribution {
case distros.DistributionContainerOS:
@ -92,10 +91,12 @@ func (c *NodeupModelContext) CNIBinDir() string {
}
}
// CNIConfDir returns the CNI directory
func (c *NodeupModelContext) CNIConfDir() string {
return "/etc/cni/net.d/"
}
// buildPKIKubeconfig generates a kubeconfig
func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
caCertificate, err := c.KeyStore.Cert(fi.CertificateId_CA)
if err != nil {
@ -172,10 +173,24 @@ func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
return string(yaml), nil
}
// IsKubernetesGTE checks if the version is greater-than-or-equal
func (c *NodeupModelContext) IsKubernetesGTE(version string) bool {
return util.IsKubernetesGTE(version, c.KubernetesVersion)
}
// UseEtcdTLS checks if the etcd cluster has TLS enabled bool
func (c *NodeupModelContext) UseEtcdTLS() bool {
// @note: because we enforce that 'both' have to be enabled for TLS we only need to check one here.
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableEtcdTLS {
return true
}
}
return false
}
// UsesCNI checks if the cluster has CNI configured
func (c *NodeupModelContext) UsesCNI() bool {
networking := c.Cluster.Spec.Networking
if networking == nil || networking.Classic != nil {

View File

@ -27,3 +27,8 @@ func s(v string) *string {
func i64(v int64) *int64 {
return fi.Int64(v)
}
// b returns a pointer to a boolean
func b(v bool) *bool {
return fi.Bool(v)
}

View File

@ -17,10 +17,11 @@ limitations under the License.
package model
import (
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/golang/glog"
)
// EtcdBuilder installs etcd
@ -30,6 +31,7 @@ type EtcdBuilder struct {
var _ fi.ModelBuilder = &EtcdBuilder{}
// Build is responsible for creating the etcd user
func (b *EtcdBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil

View File

@ -41,6 +41,7 @@ type KubeAPIServerBuilder struct {
var _ fi.ModelBuilder = &KubeAPIServerBuilder{}
// Build is responsible for generating the kubernetes api manifest
func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
@ -127,22 +128,29 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
Type: nodetasks.FileType_File,
}
c.AddTask(t)
return nil
} else {
return fmt.Errorf("Unrecognized authentication config %v", b.Cluster.Spec.Authentication)
}
return fmt.Errorf("Unrecognized authentication config %v", b.Cluster.Spec.Authentication)
}
func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
kubeAPIServer := b.Cluster.Spec.KubeAPIServer
kubeAPIServer.ClientCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kubeAPIServer.TLSCertFile = filepath.Join(b.PathSrvKubernetes(), "server.cert")
kubeAPIServer.TLSPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
kubeAPIServer.BasicAuthFile = filepath.Join(b.PathSrvKubernetes(), "basic_auth.csv")
kubeAPIServer.TokenAuthFile = filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv")
if b.UseEtcdTLS() {
kubeAPIServer.EtcdCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kubeAPIServer.EtcdCertFile = filepath.Join(b.PathSrvKubernetes(), "etcd-client.pem")
kubeAPIServer.EtcdKeyFile = filepath.Join(b.PathSrvKubernetes(), "etcd-client-key.pem")
kubeAPIServer.EtcdServers = []string{"https://127.0.0.1:4001"}
kubeAPIServer.EtcdServersOverrides = []string{"/events#https://127.0.0.1:4002"}
}
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer)
if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)

View File

@ -18,10 +18,12 @@ package model
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/golang/glog"
)
// KubectlBuilder install kubectl
@ -31,13 +33,12 @@ type KubectlBuilder struct {
var _ fi.ModelBuilder = &KubectlBuilder{}
// Build is responsible for mananging the kubectl on the nodes
func (b *KubectlBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
// We don't have the configuration on the machines, so it only works on the master anyway
return nil
}
// Add kubectl file as an asset
{
// TODO: Extract to common function?
assetName := "kubectl"
@ -59,7 +60,6 @@ func (b *KubectlBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
// Add kubeconfig
{
kubeconfig, err := b.buildPKIKubeconfig("kubecfg")
if err != nil {

View File

@ -18,8 +18,7 @@ package model
import (
"fmt"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops"
@ -29,6 +28,9 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/upup/pkg/fi/utils"
"github.com/blang/semver"
"github.com/golang/glog"
)
// KubeletBuilder install kubelet
@ -38,6 +40,7 @@ type KubeletBuilder struct {
var _ fi.ModelBuilder = &KubeletBuilder{}
// Build is responsible for generating the kubelet config
func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
kubeletConfig, err := b.buildKubeletConfig()
if err != nil {
@ -52,12 +55,11 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
// Add kubelet file itself (as an asset)
{
// TODO: Extract to common function?
// @TODO Extract to common function?
assetName := "kubelet"
assetPath := ""
// TODO make Find call to an interface, we cannot mock out this function because it finds a file on disk
// @TODO make Find call to an interface, we cannot mock out this function because it finds a file on disk
asset, err := b.Assets.Find(assetName, assetPath)
if err != nil {
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
@ -75,10 +77,8 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
// Add kubeconfig
{
// TODO: Change kubeconfig to be https
// @TODO Change kubeconfig to be https
kubeconfig, err := b.buildPKIKubeconfig("kubelet")
if err != nil {
return err
@ -109,6 +109,7 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
// kubeletPath returns the path of the kubelet based on distro
func (b *KubeletBuilder) kubeletPath() string {
kubeletCommand := "/usr/local/bin/kubelet"
if b.Distribution == distros.DistributionCoreOS {
@ -120,6 +121,7 @@ func (b *KubeletBuilder) kubeletPath() string {
return kubeletCommand
}
// buildSystemdEnvironmentFile renders the environment file for the kubelet
func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.KubeletConfigSpec) (*nodetasks.File, error) {
// TODO: Dump the separate file for flags - just complexity!
flags, err := flagbuilder.BuildFlags(kubeletConfig)
@ -157,6 +159,7 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet
return t, nil
}
// buildSystemdService is responsible for generating the kubelet systemd unit
func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
kubeletCommand := b.kubeletPath()
@ -176,8 +179,8 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Service", "KillMode", "process")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "kubelet", manifestString)
service := &nodetasks.Service{
@ -185,8 +188,10 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
Definition: s(manifestString),
}
// To avoid going in to backoff, we wait for protokube to start us
service.Running = fi.Bool(false)
// @check if we are a master allow protokube to start kubelet
if b.IsMaster {
service.Running = fi.Bool(false)
}
service.InitDefaults()
@ -201,9 +206,9 @@ func (b *KubeletBuilder) buildKubeletConfig() (*kops.KubeletConfigSpec, error) {
if err != nil {
return nil, fmt.Errorf("error building kubelet config: %v", err)
}
// TODO: Memoize if we reuse this
return kubeletConfigSpec, nil
}
func (b *KubeletBuilder) addStaticUtils(c *fi.ModelBuilderContext) error {

View File

@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
kopsbase "k8s.io/kops"
@ -42,9 +43,18 @@ type ProtokubeBuilder struct {
var _ fi.ModelBuilder = &ProtokubeBuilder{}
func (b *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
if b.IsMaster {
kubeconfig, err := b.buildPKIKubeconfig("kops")
// Build is responsible for generating the options for protokube
func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
useGossip := dns.IsGossipHostname(t.Cluster.Spec.MasterInternalName)
// check is not a master and we are not using gossip (https://github.com/kubernetes/kops/pull/3091)
if !t.IsMaster && !useGossip {
glog.V(2).Infof("skipping the provisioning of protokube on the nodes")
return nil
}
if t.IsMaster {
kubeconfig, err := t.buildPKIKubeconfig("kops")
if err != nil {
return err
}
@ -55,10 +65,23 @@ func (b *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
// retrieve the etcd peer certificates and private keys from the keystore
if t.UseEtcdTLS() {
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.buildCeritificateTask(c, x, fmt.Sprintf("%s.pem", x)); err != nil {
return err
}
}
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.buildPrivateTask(c, x, fmt.Sprintf("%s-key.pem", x)); err != nil {
return err
}
}
}
}
// TODO: Should we run _protokube on the nodes?
service, err := b.buildSystemdService()
service, err := t.buildSystemdService()
if err != nil {
return err
}
@ -67,29 +90,28 @@ func (b *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
func (b *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
k8sVersion, err := util.ParseKubernetesVersion(b.Cluster.Spec.KubernetesVersion)
// buildSystemdService generates the manifest for the protokube service
func (t *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
k8sVersion, err := util.ParseKubernetesVersion(t.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", b.Cluster.Spec.KubernetesVersion)
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", t.Cluster.Spec.KubernetesVersion)
}
protokubeFlags := b.ProtokubeFlags(*k8sVersion)
protokubeFlags := t.ProtokubeFlags(*k8sVersion)
protokubeFlagsArgs, err := flagbuilder.BuildFlags(protokubeFlags)
if err != nil {
return nil, err
}
dockerArgs := []string{
"/usr/bin/docker",
"run",
"/usr/bin/docker", "run",
"-v", "/:/rootfs/",
"-v", "/var/run/dbus:/var/run/dbus",
"-v", "/run/systemd:/run/systemd",
"--net=host",
"--privileged",
"--net=host", "--privileged",
"--env", "KUBECONFIG=/rootfs/var/lib/kops/kubeconfig",
b.ProtokubeEnvironmentVariables(),
b.ProtokubeImageName(),
t.ProtokubeEnvironmentVariables(),
t.ProtokubeImageName(),
"/usr/bin/protokube",
}
protokubeCommand := strings.Join(dockerArgs, " ") + " " + protokubeFlagsArgs
@ -97,14 +119,11 @@ func (b *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Protokube Service")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
//manifest.Set("Service", "EnvironmentFile", "/etc/sysconfig/protokube")
manifest.Set("Service", "ExecStartPre", b.ProtokubeImagePullCommand())
manifest.Set("Service", "ExecStartPre", t.ProtokubeImagePullCommand())
manifest.Set("Service", "ExecStart", protokubeCommand)
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()
@ -147,51 +166,56 @@ func (t *ProtokubeBuilder) ProtokubeImagePullCommand() string {
// We preloaded the image; return a dummy value
return "/bin/true"
}
return "/usr/bin/docker pull " + t.NodeupConfig.ProtokubeImage.Source
}
// ProtokubeFlags are the flags for protokube
type ProtokubeFlags struct {
Master *bool `json:"master,omitempty" flag:"master"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
Zone []string `json:"zone,omitempty" flag:"zone"`
Channels []string `json:"channels,omitempty" flag:"channels"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
// ClusterId flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
ClusterId *string `json:"cluster-id,omitempty" flag:"cluster-id"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
Channels []string `json:"channels,omitempty" flag:"channels"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
// ClusterID flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
ClusterID *string `json:"cluster-id,omitempty" flag:"cluster-id"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
Master *bool `json:"master,omitempty" flag:"master"`
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
Zone []string `json:"zone,omitempty" flag:"zone"`
}
// ProtokubeFlags returns the flags object for protokube
// ProtokubeFlags is responsible for building the command line flags for protokube
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeFlags {
f := &ProtokubeFlags{}
master := t.IsMaster
f.Master = fi.Bool(master)
if master {
f.Channels = t.NodeupConfig.Channels
f := &ProtokubeFlags{
Channels: t.NodeupConfig.Channels,
Containerized: fi.Bool(true),
LogLevel: fi.Int32(4),
Master: b(t.IsMaster),
}
// initialize rbac on Kubernetes >= 1.6 and master
if k8sVersion.Major == 1 && k8sVersion.Minor >= 6 {
if master {
f.InitializeRBAC = fi.Bool(true)
}
f.InitializeRBAC = fi.Bool(true)
}
f.LogLevel = fi.Int32(4)
f.Containerized = fi.Bool(true)
// check if we are using tls and add the options to protokube
if t.UseEtcdTLS() {
f.PeerTLSCaFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.PeerTLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.PeerTLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
f.TLSCAFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.TLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.TLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
}
zone := t.Cluster.Spec.DNSZone
if zone != "" {
@ -204,7 +228,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
}
} else {
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
// TODO: Should we permit wildcard updates if zone is not specified?
// @TODO: Should we permit wildcard updates if zone is not specified?
//argv = append(argv, "--zone=*/*")
}
@ -212,7 +236,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
glog.Warningf("MasterInternalName %q implies gossip DNS", t.Cluster.Spec.MasterInternalName)
f.DNSProvider = fi.String("gossip")
/// TODO: This is hacky, but we want it so that we can have a different internal & external name
// @TODO: This is hacky, but we want it so that we can have a different internal & external name
internalSuffix := t.Cluster.Spec.MasterInternalName
internalSuffix = strings.TrimPrefix(internalSuffix, "api.")
f.DNSInternalSuffix = fi.String(internalSuffix)
@ -229,7 +253,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
f.DNSProvider = fi.String("google-clouddns")
case kops.CloudProviderVSphere:
f.DNSProvider = fi.String("coredns")
f.ClusterId = fi.String(t.Cluster.ObjectMeta.Name)
f.ClusterID = fi.String(t.Cluster.ObjectMeta.Name)
f.DNSServer = fi.String(*t.Cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider", t.Cluster.Spec.CloudProvider)
@ -248,6 +272,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
return f
}
// ProtokubeEnvironmentVariables generates the environments variables for docker
func (t *ProtokubeBuilder) ProtokubeEnvironmentVariables() string {
var buffer bytes.Buffer
@ -283,3 +308,47 @@ func (t *ProtokubeBuilder) ProtokubeEnvironmentVariables() string {
return buffer.String()
}
// buildCertificateTask is responsible for build a certificate request task
func (t *ProtokubeBuilder) buildCeritificateTask(c *fi.ModelBuilderContext, name, filename string) error {
cert, err := t.KeyStore.Cert(name)
if err != nil {
return err
}
serialized, err := cert.AsString()
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: filepath.Join(t.PathSrvKubernetes(), filename),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
return nil
}
// buildPrivateKeyTask is responsible for build a certificate request task
func (t *ProtokubeBuilder) buildPrivateTask(c *fi.ModelBuilderContext, name, filename string) error {
cert, err := t.KeyStore.PrivateKey(name)
if err != nil {
return err
}
serialized, err := cert.AsString()
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: filepath.Join(t.PathSrvKubernetes(), filename),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
return nil
}

View File

@ -151,15 +151,6 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
}
csv := strings.Join(lines, "\n")
// TODO: If we want to use tokens with RBAC, we need to add the roles
// cluster/gce/gci/configure-helper.sh has this:
//replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
//replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
//replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
//replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "kubelet,uid:kubelet,system:nodes"
//replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
//replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv"),
Contents: fi.NewStringResource(csv),

View File

@ -209,54 +209,79 @@ type LoadBalancerAccessSpec struct {
IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"`
}
// KubeDNSConfig defines the kube dns configuration
type KubeDNSConfig struct {
// Image is the name of the docker image to run
Image string `json:"image,omitempty"`
Replicas int `json:"replicas,omitempty"`
Domain string `json:"domain,omitempty"`
// Replicas is the number of pod replicas
Replicas int `json:"replicas,omitempty"`
// Domain is the dns domain
Domain string `json:"domain,omitempty"`
// ServerIP is the server ip
ServerIP string `json:"serverIP,omitempty"`
}
// EtcdStorageType defined the etcd storage backend
type EtcdStorageType string
const (
// EtcdStorageTypeV2 is the old v2 storage
EtcdStorageTypeV2 EtcdStorageType = "etcd2"
// EtcdStorageTypeV3 is the new v3 storage
EtcdStorageTypeV3 EtcdStorageType = "etcd3"
)
// EtcdClusterSpec is the etcd cluster specification
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Name string `json:"name,omitempty"`
// EtcdMember stores the configurations for each member of the cluster (including the data volume)
// EnableEtcdTLS indicates the etcd service should use TLS between peers and clients
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
// Members stores the configurations for each member of the cluster (including the data volume)
Members []*EtcdMemberSpec `json:"etcdMembers,omitempty"`
}
// EtcdMemberSpec is a specification for a etcd member
type EtcdMemberSpec struct {
// Name is the name of the member within the etcd cluster
Name string `json:"name,omitempty"`
Name string `json:"name,omitempty"`
// InstanceGroup is the instanceGroup this volume is associated
InstanceGroup *string `json:"instanceGroup,omitempty"`
VolumeType *string `json:"volumeType,omitempty"`
VolumeSize *int32 `json:"volumeSize,omitempty"`
KmsKeyId *string `json:"kmsKeyId,omitempty"`
EncryptedVolume *bool `json:"encryptedVolume,omitempty"`
// VolumeType is the underlining cloud storage class
VolumeType *string `json:"volumeType,omitempty"`
// VolumeSize is the underlining cloud volume size
VolumeSize *int32 `json:"volumeSize,omitempty"`
// KmsKeyId is a AWS KMS ID used to encrypt the volume
KmsKeyId *string `json:"kmsKeyId,omitempty"`
// EncryptedVolume indicates you want to encrypt the volume
EncryptedVolume *bool `json:"encryptedVolume,omitempty"`
}
// SubnetType string describes subnet types (public, private, utility)
type SubnetType string
const (
SubnetTypePublic SubnetType = "Public"
// SubnetTypePublic means the subnet is public
SubnetTypePublic SubnetType = "Public"
// SubnetTypePrivate means the subnet has no public address or is natted
SubnetTypePrivate SubnetType = "Private"
// SubnetTypeUtility mean the subnet is used for utility services, such as the bastion
SubnetTypeUtility SubnetType = "Utility"
)
// ClusterSubnetSpec defines a subnet
type ClusterSubnetSpec struct {
// Name is the name of the subnet
Name string `json:"name,omitempty"`
Zone string `json:"zone,omitempty"`
// CIDR is the network cidr of the subnet
CIDR string `json:"cidr,omitempty"`
// Zone is the zone the subnet resides
Zone string `json:"zone,omitempty"`
// ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS)
ProviderID string `json:"id,omitempty"`
// Egress
Egress string `json:"egress,omitempty"`
// Type define which one if the internal types (public, utility, private) the network is
Type SubnetType `json:"type,omitempty"`
}

View File

@ -183,6 +183,12 @@ type KubeAPIServerConfig struct {
EtcdServers []string `json:"etcdServers,omitempty" flag:"etcd-servers"`
// EtcdServersOverrides is per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated
EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
// EtcdCAFile is the path to a ca certificate
EtcdCAFile string `json:"etcdCaFile,omitempty" flag:"etcd-cafile"`
// EtcdCertFile is the path to a certificate
EtcdCertFile string `json:"etcdCertFile,omitempty" flag:"etcd-certfile"`
// EtcdKeyFile is the path to a private key
EtcdKeyFile string `json:"etcdKeyFile,omitempty" flag:"etcd-keyfile"`
// TODO: Remove unused BasicAuthFile
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
// TODO: Remove unused ClientCAFile

View File

@ -18,8 +18,9 @@ package kops
import (
"fmt"
"k8s.io/kops/upup/pkg/fi/utils"
"strings"
"k8s.io/kops/upup/pkg/fi/utils"
)
// ParseInstanceGroupRole converts a string to an InstanceGroupRole

View File

@ -335,7 +335,8 @@ type KubeDNSConfig struct {
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Name string `json:"name,omitempty"`
// EnableEtcdTLS indicates the etcd service should use TLS between peers and clients
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
// EtcdMember stores the configurations for each member of the cluster (including the data volume)
Members []*EtcdMemberSpec `json:"etcdMembers,omitempty"`
}

View File

@ -183,6 +183,12 @@ type KubeAPIServerConfig struct {
EtcdServers []string `json:"etcdServers,omitempty" flag:"etcd-servers"`
// EtcdServersOverrides is per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated
EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
// EtcdCAFile is the path to a ca certificate
EtcdCAFile string `json:"etcdCaFile,omitempty" flag:"etcd-cafile"`
// EtcdCertFile is the path to a certificate
EtcdCertFile string `json:"etcdCertFile,omitempty" flag:"etcd-certfile"`
// EtcdKeyFile is the path to a orivate key
EtcdKeyFile string `json:"etcdKeyFile,omitempty" flag:"etcd-keyfile"`
// TODO: Remove unused BasicAuthFile
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
// TODO: Remove unused ClientCAFile

View File

@ -948,6 +948,7 @@ func Convert_kops_DockerConfig_To_v1alpha1_DockerConfig(in *kops.DockerConfig, o
func autoConvert_v1alpha1_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdClusterSpec, out *kops.EtcdClusterSpec, s conversion.Scope) error {
out.Name = in.Name
out.EnableEtcdTLS = in.EnableEtcdTLS
if in.Members != nil {
in, out := &in.Members, &out.Members
*out = make([]*kops.EtcdMemberSpec, len(*in))
@ -970,6 +971,7 @@ func Convert_v1alpha1_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdClusterSpe
func autoConvert_kops_EtcdClusterSpec_To_v1alpha1_EtcdClusterSpec(in *kops.EtcdClusterSpec, out *EtcdClusterSpec, s conversion.Scope) error {
out.Name = in.Name
out.EnableEtcdTLS = in.EnableEtcdTLS
if in.Members != nil {
in, out := &in.Members, &out.Members
*out = make([]*EtcdMemberSpec, len(*in))
@ -1371,6 +1373,9 @@ func autoConvert_v1alpha1_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.EtcdServers = in.EtcdServers
out.EtcdServersOverrides = in.EtcdServersOverrides
out.EtcdCAFile = in.EtcdCAFile
out.EtcdCertFile = in.EtcdCertFile
out.EtcdKeyFile = in.EtcdKeyFile
out.BasicAuthFile = in.BasicAuthFile
out.ClientCAFile = in.ClientCAFile
out.TLSCertFile = in.TLSCertFile
@ -1416,6 +1421,9 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *ko
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.EtcdServers = in.EtcdServers
out.EtcdServersOverrides = in.EtcdServersOverrides
out.EtcdCAFile = in.EtcdCAFile
out.EtcdCertFile = in.EtcdCertFile
out.EtcdKeyFile = in.EtcdKeyFile
out.BasicAuthFile = in.BasicAuthFile
out.ClientCAFile = in.ClientCAFile
out.TLSCertFile = in.TLSCertFile

View File

@ -253,7 +253,8 @@ type KubeDNSConfig struct {
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Name string `json:"name,omitempty"`
// EnableEtcdTLS indicates the etcd service should use TLS between peers and clients
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
// EtcdMember stores the configurations for each member of the cluster (including the data volume)
Members []*EtcdMemberSpec `json:"etcdMembers,omitempty"`
}

View File

@ -183,6 +183,12 @@ type KubeAPIServerConfig struct {
EtcdServers []string `json:"etcdServers,omitempty" flag:"etcd-servers"`
// EtcdServersOverrides is per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated
EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
// EtcdCAFile is the path to a ca certificate
EtcdCAFile string `json:"etcdCaFile,omitempty" flag:"etcd-cafile"`
// EtcdCertFile is the path to a certificate
EtcdCertFile string `json:"etcdCertFile,omitempty" flag:"etcd-certfile"`
// EtcdKeyFile is the path to a orivate key
EtcdKeyFile string `json:"etcdKeyFile,omitempty" flag:"etcd-keyfile"`
// TODO: Remove unused BasicAuthFile
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
// TODO: Remove unused ClientCAFile
@ -272,6 +278,7 @@ type KubeControllerManagerConfig struct {
// before the terminated pod garbage collector starts deleting terminated pods.
// If <= 0, the terminated pod garbage collector is disabled.
TerminatedPodGCThreshold *int32 `json:"terminatedPodGCThreshold,omitempty" flag:"terminated-pod-gc-threshold"`
// UseServiceAccountCredentials controls whether we use individual service account credentials for each controller.
UseServiceAccountCredentials *bool `json:"useServiceAccountCredentials,omitempty" flag:"use-service-account-credentials"`
}

View File

@ -937,8 +937,8 @@ func Convert_v1alpha2_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(in *ClusterSub
func autoConvert_kops_ClusterSubnetSpec_To_v1alpha2_ClusterSubnetSpec(in *kops.ClusterSubnetSpec, out *ClusterSubnetSpec, s conversion.Scope) error {
out.Name = in.Name
out.Zone = in.Zone
out.CIDR = in.CIDR
out.Zone = in.Zone
out.ProviderID = in.ProviderID
out.Egress = in.Egress
out.Type = SubnetType(in.Type)
@ -1036,6 +1036,7 @@ func Convert_kops_DockerConfig_To_v1alpha2_DockerConfig(in *kops.DockerConfig, o
func autoConvert_v1alpha2_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdClusterSpec, out *kops.EtcdClusterSpec, s conversion.Scope) error {
out.Name = in.Name
out.EnableEtcdTLS = in.EnableEtcdTLS
if in.Members != nil {
in, out := &in.Members, &out.Members
*out = make([]*kops.EtcdMemberSpec, len(*in))
@ -1058,6 +1059,7 @@ func Convert_v1alpha2_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdClusterSpe
func autoConvert_kops_EtcdClusterSpec_To_v1alpha2_EtcdClusterSpec(in *kops.EtcdClusterSpec, out *EtcdClusterSpec, s conversion.Scope) error {
out.Name = in.Name
out.EnableEtcdTLS = in.EnableEtcdTLS
if in.Members != nil {
in, out := &in.Members, &out.Members
*out = make([]*EtcdMemberSpec, len(*in))
@ -1479,6 +1481,9 @@ func autoConvert_v1alpha2_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.EtcdServers = in.EtcdServers
out.EtcdServersOverrides = in.EtcdServersOverrides
out.EtcdCAFile = in.EtcdCAFile
out.EtcdCertFile = in.EtcdCertFile
out.EtcdKeyFile = in.EtcdKeyFile
out.BasicAuthFile = in.BasicAuthFile
out.ClientCAFile = in.ClientCAFile
out.TLSCertFile = in.TLSCertFile
@ -1524,6 +1529,9 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha2_KubeAPIServerConfig(in *ko
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.EtcdServers = in.EtcdServers
out.EtcdServersOverrides = in.EtcdServersOverrides
out.EtcdCAFile = in.EtcdCAFile
out.EtcdCertFile = in.EtcdCertFile
out.EtcdKeyFile = in.EtcdKeyFile
out.BasicAuthFile = in.BasicAuthFile
out.ClientCAFile = in.ClientCAFile
out.TLSCertFile = in.TLSCertFile

View File

@ -18,18 +18,20 @@ package validation
import (
"fmt"
"net"
"strings"
"github.com/blang/semver"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi"
"net"
"strings"
)
// legacy contains validation functions that don't match the apimachinery style
// ValidateCluster is responsible for checking the validatity of the Cluster spec
func ValidateCluster(c *kops.Cluster, strict bool) error {
specField := field.NewPath("Spec")
@ -371,6 +373,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) error {
if len(c.Spec.EtcdClusters) == 0 {
return field.Required(specField.Child("EtcdClusters"), "")
}
var usingTLS int
for _, etcd := range c.Spec.EtcdClusters {
if etcd.Name == "" {
return fmt.Errorf("EtcdCluster did not have name")
@ -382,6 +385,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) error {
// Not technically a requirement, but doesn't really make sense to allow
return fmt.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
}
if etcd.EnableEtcdTLS {
usingTLS++
}
for _, m := range etcd.Members {
if m.Name == "" {
return fmt.Errorf("EtcdMember did not have Name in cluster %q", etcd.Name)
@ -391,6 +397,10 @@ func ValidateCluster(c *kops.Cluster, strict bool) error {
}
}
}
// check both clusters are using tls if one us enabled
if usingTLS > 0 && usingTLS != len(c.Spec.EtcdClusters) {
return fmt.Errorf("Both etcd clusters must have TLS enabled or none at all")
}
}
if kubernetesRelease.GTE(semver.MustParse("1.4.0")) {
@ -399,8 +409,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) error {
}
}
errs := newValidateCluster(c)
if len(errs) != 0 {
if errs := newValidateCluster(c); len(errs) != 0 {
return errs[0]
}
@ -408,8 +417,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) error {
}
func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) error {
err := ValidateCluster(c, strict)
if err != nil {
if err := ValidateCluster(c, strict); err != nil {
return err
}

View File

@ -18,12 +18,13 @@ package validation
import (
"fmt"
"net"
"strings"
"k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"net"
"strings"
)
var validDockerConfigStorageValues = []string{"aufs", "btrfs", "devicemapper", "overlay", "overlay2", "zfs"}

View File

@ -271,6 +271,17 @@ func (m *KopsModelContext) UsePrivateDNS() bool {
return false
}
// UseEtcdTLS checks to see if etcd tls is enabled
func (c *KopsModelContext) UseEtcdTLS() bool {
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableEtcdTLS {
return true
}
}
return false
}
// KubernetesVersion parses the semver version of kubernetes, from the cluster spec
func (c *KopsModelContext) KubernetesVersion() (semver.Version, error) {
// TODO: Remove copy-pasting c.f. https://github.com/kubernetes/kops/blob/master/pkg/model/components/context.go#L32

View File

@ -16,7 +16,9 @@ limitations under the License.
package model
import "k8s.io/kops/upup/pkg/fi"
import (
"k8s.io/kops/upup/pkg/fi"
)
// s is a helper that builds a *string from a string value
func s(v string) *string {

View File

@ -20,10 +20,11 @@ import (
"fmt"
"strconv"
"github.com/golang/glog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"github.com/golang/glog"
)
type Protocol int
@ -134,9 +135,9 @@ func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBu
if b.Cluster.Spec.Networking.Calico != nil {
// Calico needs to access etcd
// TODO: Remove, replace with etcd in calico manifest
// https://coreos.com/etcd/docs/latest/v2/configuration.html
glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.")
tcpPorts = append(tcpPorts, 4001)
tcpPorts = append(tcpPorts, 179)
protocols = append(protocols, ProtocolIPIP)
}

View File

@ -18,14 +18,15 @@ package model
import (
"fmt"
"sort"
"strings"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"sort"
"strings"
)
const (

View File

@ -17,6 +17,8 @@ limitations under the License.
package model
import (
"fmt"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/fitasks"
@ -30,65 +32,76 @@ type PKIModelBuilder struct {
var _ fi.ModelBuilder = &PKIModelBuilder{}
// Build is responsible for generating the pki assets for the cluster
func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
{
// Keypair used by the kubelet
t := &fitasks.Keypair{
Name: fi.String("kubelet"),
Lifecycle: b.Lifecycle,
Subject: "o=" + user.NodesGroup + ",cn=kubelet",
Type: "client",
Subject: "o=" + user.NodesGroup + ",cn=kubelet",
Type: "client",
}
c.AddTask(t)
}
{
// Secret used by the kubelet
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("kubelet"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Keypair used by the kube-scheduler
t := &fitasks.Keypair{
Name: fi.String("kube-scheduler"),
Lifecycle: b.Lifecycle,
Subject: "cn=" + user.KubeScheduler,
Type: "client",
Subject: "cn=" + user.KubeScheduler,
Type: "client",
}
c.AddTask(t)
}
{
// Secret used by the kube-scheduler
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("system:scheduler"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Keypair used by the kube-proxy
t := &fitasks.Keypair{
Name: fi.String("kube-proxy"),
Lifecycle: b.Lifecycle,
Subject: "cn=" + user.KubeProxy,
Type: "client",
Subject: "cn=" + user.KubeProxy,
Type: "client",
}
c.AddTask(t)
}
{
t := &fitasks.Keypair{
Name: fi.String("kube-controller-manager"),
Lifecycle: b.Lifecycle,
Subject: "cn=" + user.KubeControllerManager,
Type: "client",
}
c.AddTask(t)
}
// check if we need to generate certificates for etcd peers certificates from a different CA?
// @question i think we should use another KeyStore for this, perhaps registering a EtcdKeyStore given
// that mutual tls used to verify between the peers we don't want certificates for kubernetes able to act as a peer.
// For clients assuming we are using etcdv3 is can switch on user authentication and map the common names for auth.
if b.UseEtcdTLS() {
alternativeNames := []string{fmt.Sprintf("*.internal.%s", b.ClusterName()), "localhost", "127.0.0.1"}
{
// @question should wildcard's be here instead of generating per node. If we ever provide the
// ability to resize the master, this will become a blocker
c.AddTask(&fitasks.Keypair{
AlternateNames: alternativeNames,
Lifecycle: b.Lifecycle,
Name: fi.String("etcd"),
Subject: "cn=etcd",
Type: "server",
})
}
{
c.AddTask(&fitasks.Keypair{
Name: fi.String("etcd-client"),
Lifecycle: b.Lifecycle,
Subject: "cn=etcd-client",
Type: "client",
})
}
}
if b.KopsModelContext.Cluster.Spec.Networking.Kuberouter != nil {
// Keypair used by the kube-router
t := &fitasks.Keypair{
Name: fi.String("kube-router"),
Subject: "cn=" + "system:kube-router",
@ -98,64 +111,26 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
}
{
// Secret used by the kube-proxy
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("kube-proxy"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Keypair used by the kube-controller-manager
t := &fitasks.Keypair{
Name: fi.String("kube-controller-manager"),
Lifecycle: b.Lifecycle,
Subject: "cn=" + user.KubeControllerManager,
Type: "client",
}
c.AddTask(t)
}
{
// Secret used by the kube-controller-manager
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("system:controller_manager"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Keypair used for admin kubecfg
t := &fitasks.Keypair{
Name: fi.String("kubecfg"),
Lifecycle: b.Lifecycle,
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kubecfg",
Type: "client",
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kubecfg",
Type: "client",
}
c.AddTask(t)
}
{
// Keypair used by kops / protokube
t := &fitasks.Keypair{
Name: fi.String("kops"),
Lifecycle: b.Lifecycle,
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kops",
Type: "client",
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kops",
Type: "client",
}
c.AddTask(t)
}
{
// TLS certificate used for apiserver
// A few names used from inside the cluster, which all resolve the same based on our default suffixes
alternateNames := []string{
"kubernetes",
@ -181,9 +156,8 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
alternateNames = append(alternateNames, "127.0.0.1")
t := &fitasks.Keypair{
Name: fi.String("master"),
Lifecycle: b.Lifecycle,
Name: fi.String("master"),
Lifecycle: b.Lifecycle,
Subject: "cn=kubernetes-master",
Type: "server",
AlternateNames: alternateNames,
@ -191,53 +165,13 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
{
// Secret used by logging (?)
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("system:logging"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
// @@ The following are deprecated for > 1.6 and should be dropped at the appropreciate time
deprecated := []string{
"kubelet", "kube-proxy", "system:scheduler", "system:controller_manager",
"system:logging", "system:monitoring", "system:dns", "kube", "admin"}
{
// Secret used by monitoring (?)
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("system:monitoring"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Secret used by dns (?)
// TODO: Can this be removed... at least from 1.6 on?
t := &fitasks.Secret{
Name: fi.String("system:dns"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Secret used by kube (?)
// TODO: Can this be removed... at least from 1.6 on? Although one of kube/admin is the primary token auth
t := &fitasks.Secret{
Name: fi.String("kube"),
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
}
{
// Secret used by admin (?)
// TODO: Can this be removed... at least from 1.6 on? Although one of kube/admin is the primary token auth
t := &fitasks.Secret{
Name: fi.String("admin"),
Lifecycle: b.Lifecycle,
}
for _, x := range deprecated {
t := &fitasks.Secret{Name: fi.String(x), Lifecycle: b.Lifecycle}
c.AddTask(t)
}

View File

@ -20,95 +20,76 @@ import (
"bytes"
"flag"
"fmt"
"github.com/golang/glog"
"github.com/spf13/pflag"
"io"
"net"
"os"
"path"
"strings"
"k8s.io/kops/dns-controller/pkg/dns"
"k8s.io/kops/protokube/pkg/gossip"
gossipdns "k8s.io/kops/protokube/pkg/gossip/dns"
"k8s.io/kops/protokube/pkg/gossip/mesh"
"k8s.io/kops/protokube/pkg/protokube"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
"net"
"os"
"path"
"strings"
// Load DNS plugins
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53"
k8scoredns "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns"
"github.com/golang/glog"
"github.com/spf13/pflag"
)
var (
flags = pflag.NewFlagSet("", pflag.ExitOnError)
// value overwritten during build. This can be used to resolve issues.
// BuildVersion is overwritten during build. This can be used to resolve issues.
BuildVersion = "0.1"
)
func main() {
fmt.Printf("protokube version %s\n", BuildVersion)
err := run()
if err != nil {
if err := run(); err != nil {
glog.Errorf("Error: %v", err)
os.Exit(1)
}
os.Exit(0)
}
// run is responsible for running the protokube service controller
func run() error {
dnsProviderId := "aws-route53"
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns, coredns)")
var zones []string
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
var applyTaints, initializeRBAC, containerized, master bool
var cloud, clusterID, dnsServer, dnsProviderID, dnsInternalSuffix, gossipSecret, gossipListen string
var flagChannels, tlsCert, tlsKey, tlsCA, peerCert, peerKey, peerCA, etcdImageSource string
master := false
flag.BoolVar(&master, "master", master, "Act as master")
applyTaints := false
flag.BoolVar(&applyTaints, "apply-taints", applyTaints, "Apply taints to nodes based on the role")
initializeRBAC := false
flag.BoolVar(&initializeRBAC, "initialize-rbac", initializeRBAC, "Set if we should initialize RBAC")
containerized := false
flag.BoolVar(&containerized, "containerized", containerized, "Set if we are running containerized.")
cloud := "aws"
flag.StringVar(&cloud, "cloud", cloud, "CloudProvider we are using (aws,gce)")
dnsInternalSuffix := ""
flag.StringVar(&dnsInternalSuffix, "dns-internal-suffix", dnsInternalSuffix, "DNS suffix for internal domain names")
clusterID := ""
flag.BoolVar(&initializeRBAC, "initialize-rbac", initializeRBAC, "Set if we should initialize RBAC")
flag.BoolVar(&master, "master", master, "Whether or not this node is a master")
flag.StringVar(&cloud, "cloud", "aws", "CloudProvider we are using (aws,gce)")
flag.StringVar(&clusterID, "cluster-id", clusterID, "Cluster ID")
dnsServer := ""
flag.StringVar(&dnsInternalSuffix, "dns-internal-suffix", dnsInternalSuffix, "DNS suffix for internal domain names")
flag.StringVar(&dnsServer, "dns-server", dnsServer, "DNS Server")
flagChannels := ""
flag.StringVar(&flagChannels, "channels", flagChannels, "channels to install")
gossipListen := "0.0.0.0:3999"
flag.StringVar(&gossipListen, "gossip-listen", gossipListen, "address:port on which to bind for gossip")
var gossipSecret string
flag.StringVar(&gossipListen, "gossip-listen", "0.0.0.0:3999", "address:port on which to bind for gossip")
flag.StringVar(&peerCA, "peer-ca", peerCA, "Path to a file containing the peer ca in PEM format")
flag.StringVar(&peerCert, "peer-cert", peerCert, "Path to a file containing the peer certificate")
flag.StringVar(&peerKey, "peer-key", peerKey, "Path to a file containing the private key for the peers")
flag.StringVar(&tlsCA, "tls-ca", tlsCA, "Path to a file containing the ca for client certificates")
flag.StringVar(&tlsCert, "tls-cert", tlsCert, "Path to a file containing the certificate for etcd server")
flag.StringVar(&tlsKey, "tls-key", tlsKey, "Path to a file containing the private key for etcd server")
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
flags.StringVar(&dnsProviderID, "dns", "aws-route53", "DNS provider we should use (aws-route53, google-clouddns, coredns)")
flags.StringVar(&etcdImageSource, "etcd-image-source", etcdImageSource, "Etcd Source Container Registry")
flags.StringVar(&gossipSecret, "gossip-secret", gossipSecret, "Secret to use to secure gossip")
// Trick to avoid 'logging before flag.Parse' warning
flag.CommandLine.Parse([]string{})
// optional flag to override the location of etcd. Utilized with cluster asset container registry.
var etcdImageSource string
flags.StringVar(&etcdImageSource, "etcd-image-source", etcdImageSource, "Etcd Source Container Registry")
flag.Set("logtostderr", "true")
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
var volumes protokube.Volumes
@ -137,8 +118,6 @@ func run() error {
volumes = gceVolumes
//gceProject = gceVolumes.Project()
if clusterID == "" {
clusterID = gceVolumes.ClusterID()
}
@ -166,9 +145,8 @@ func run() error {
if clusterID == "" {
if clusterID == "" {
return fmt.Errorf("cluster-id is required (cannot be determined from cloud)")
} else {
glog.Infof("Setting cluster-id from cloud: %s", clusterID)
}
glog.Infof("Setting cluster-id from cloud: %s", clusterID)
}
if internalIP == nil {
@ -187,23 +165,17 @@ func run() error {
dnsInternalSuffix = "." + dnsInternalSuffix
}
// Get internal IP from cloud, to avoid problems if we're in a container
// TODO: Just run with --net=host ??
//internalIP, err := findInternalIP()
//if err != nil {
// glog.Errorf("Error finding internal IP: %q", err)
// os.Exit(1)
//}
rootfs := "/"
if containerized {
rootfs = "/rootfs/"
}
protokube.RootFS = rootfs
protokube.Containerized = containerized
var dnsProvider protokube.DNSProvider
if dnsProviderId == "gossip" {
if dnsProviderID == "gossip" {
dnsTarget := &gossipdns.HostsFile{
Path: path.Join(rootfs, "etc/hosts"),
}
@ -263,7 +235,7 @@ func run() error {
var dnsController *dns.DNSController
{
var file io.Reader
if dnsProviderId == k8scoredns.ProviderName {
if dnsProviderID == k8scoredns.ProviderName {
var lines []string
lines = append(lines, "etcd-endpoints = "+dnsServer)
lines = append(lines, "zones = "+zones[0])
@ -271,12 +243,12 @@ func run() error {
file = bytes.NewReader([]byte(config))
}
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderId, file)
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderID, file)
if err != nil {
return fmt.Errorf("Error initializing DNS provider %q: %v", dnsProviderId, err)
return fmt.Errorf("Error initializing DNS provider %q: %v", dnsProviderID, err)
}
if dnsProvider == nil {
return fmt.Errorf("DNS provider %q could not be initialized", dnsProviderId)
return fmt.Errorf("DNS provider %q could not be initialized", dnsProviderID)
}
zoneRules, err := dns.ParseZoneRules(zones)
@ -303,7 +275,6 @@ func run() error {
DNSController: dnsController,
}
}
modelDir := "model/etcd"
var channels []string
@ -312,23 +283,24 @@ func run() error {
}
k := &protokube.KubeBoot{
Master: master,
ApplyTaints: applyTaints,
Channels: channels,
DNS: dnsProvider,
EtcdImageSource: etcdImageSource,
InitializeRBAC: initializeRBAC,
InternalDNSSuffix: dnsInternalSuffix,
InternalIP: internalIP,
//MasterID : fromVolume
//EtcdClusters : fromVolume
InitializeRBAC: initializeRBAC,
ModelDir: modelDir,
DNS: dnsProvider,
Channels: channels,
Kubernetes: protokube.NewKubernetesContext(),
EtcdImageSource: etcdImageSource,
Kubernetes: protokube.NewKubernetesContext(),
Master: master,
ModelDir: modelDir,
PeerCA: peerCA,
PeerCert: peerCert,
PeerKey: peerKey,
TLSCA: tlsCA,
TLSCert: tlsCert,
TLSKey: tlsKey,
}
k.Init(volumes)
if dnsProvider != nil {

View File

@ -18,42 +18,41 @@ package protokube
import (
"fmt"
"net"
"strings"
"sync"
"time"
"k8s.io/kops/protokube/pkg/gossip"
gossipaws "k8s.io/kops/protokube/pkg/gossip/aws"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/kops/protokube/pkg/gossip"
gossipaws "k8s.io/kops/protokube/pkg/gossip/aws"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"net"
"strings"
"sync"
"time"
)
//const TagNameMasterId = "k8s.io/master/id"
//const DefaultAttachDevice = "/dev/xvdb"
var devices = []string{"/dev/xvdu", "/dev/xvdv", "/dev/xvdx", "/dev/xvdx", "/dev/xvdy", "/dev/xvdz"}
// AWSVolumes defines the aws volume implementation
type AWSVolumes struct {
ec2 *ec2.EC2
metadata *ec2metadata.EC2Metadata
mutex sync.Mutex
zone string
clusterTag string
deviceMap map[string]string
ec2 *ec2.EC2
instanceId string
internalIP net.IP
mutex sync.Mutex
deviceMap map[string]string
metadata *ec2metadata.EC2Metadata
zone string
}
var _ Volumes = &AWSVolumes{}
// NewAWSVolumes returns a new aws volume provider
func NewAWSVolumes() (*AWSVolumes, error) {
a := &AWSVolumes{
deviceMap: make(map[string]string),

View File

@ -18,12 +18,23 @@ package protokube
import (
"fmt"
"github.com/golang/glog"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
)
// applyChannel is responsible for applying the channel manifests
func applyChannel(channel string) error {
// We don't embed the channels code because we expect this will eventually be part of kubectl
glog.Infof("checking channel: %q", channel)
out, err := execChannels("apply", "channel", channel, "--v=4", "--yes")
glog.V(4).Infof("apply channel output was: %v", out)
return err
}
func execChannels(args ...string) (string, error) {
kubectlPath := "channels" // Assume in PATH
cmd := exec.Command(kubectlPath, args...)
@ -41,12 +52,3 @@ func execChannels(args ...string) (string, error) {
return string(output), err
}
func ApplyChannel(channel string) error {
// We don't embed the channels code because we expect this will eventually be part of kubectl
glog.Infof("checking channel: %q", channel)
out, err := execChannels("apply", "channel", channel, "--v=4", "--yes")
glog.V(4).Infof("apply channel output was: %v", out)
return err
}

View File

@ -19,81 +19,109 @@ package protokube
import (
"bytes"
"fmt"
"github.com/golang/glog"
"io/ioutil"
"k8s.io/apimachinery/pkg/api/resource"
"os"
"path"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/golang/glog"
)
// EtcdClusterSpec is configuration for the etcd cluster
type EtcdClusterSpec struct {
// ClusterKey is the initial cluster key
ClusterKey string `json:"clusterKey,omitempty"`
NodeName string `json:"nodeName,omitempty"`
// NodeName is my nodename in the cluster
NodeName string `json:"nodeName,omitempty"`
// NodeNames is a collection of node members in the cluster
NodeNames []string `json:"nodeNames,omitempty"`
}
func (e *EtcdClusterSpec) String() string {
return DebugString(e)
}
// EtcdCluster is the configuration for the etcd cluster
type EtcdCluster struct {
PeerPort int
ClientPort int
LogFile string
DataDirName string
ClusterName string
// ClientPort is the incoming ports for client
ClientPort int
// ClusterName is the cluster name
ClusterName string
// ClusterToken is the cluster token
ClusterToken string
Me *EtcdNode
Nodes []*EtcdNode
PodName string
CPURequest resource.Quantity
// CPURequest is the pod limits
CPURequest resource.Quantity
// DataDirName is the path to the data directory
DataDirName string
// ImageSource is the docker image to use
ImageSource string
// LogFile is the location of the logfile
LogFile string
// Me represents myself
Me *EtcdNode
// Nodes is a list of nodes in the cluster
Nodes []*EtcdNode
// PeerPort is the port for peers to connect
PeerPort int
// PodName is the name given to the pod
PodName string
// ProxyMode indicates we are running in proxy mode
ProxyMode bool
// Spec is the specification found from the volumes
Spec *EtcdClusterSpec
// VolumeMountPath is the mount path
VolumeMountPath string
ImageSource string
}
func (e *EtcdCluster) String() string {
return DebugString(e)
// TLSCA is the path to a client ca for etcd clients
TLSCA string
// TLSCert is the path to a client certificate for etcd
TLSCert string
// TLSKey is the path to a client private key for etcd
TLSKey string
// PeerCA is the path to a peer ca for etcd
PeerCA string
// PeerCert is the path to a peer ca for etcd
PeerCert string
// PeerKey is the path to a peer ca for etcd
PeerKey string
}
// EtcdNode is a definition for the etcd node
type EtcdNode struct {
Name string
InternalName string
}
func (e *EtcdNode) String() string {
return DebugString(e)
}
// EtcdController defines the etcd controller
type EtcdController struct {
kubeBoot *KubeBoot
kubeBoot *KubeBoot
volume *Volume
volumeSpec *EtcdClusterSpec
cluster *EtcdCluster
}
// newEtcdController creates and returns a new etcd controller
func newEtcdController(kubeBoot *KubeBoot, v *Volume, spec *EtcdClusterSpec) (*EtcdController, error) {
k := &EtcdController{
kubeBoot: kubeBoot,
}
cluster := &EtcdCluster{}
cluster.Spec = spec
cluster.VolumeMountPath = v.Mountpoint
cluster.ClusterName = "etcd-" + spec.ClusterKey
cluster.DataDirName = "data-" + spec.ClusterKey
cluster.PodName = "etcd-server-" + spec.ClusterKey
cluster.CPURequest = resource.MustParse("100m")
cluster.ClientPort = 4001
cluster.PeerPort = 2380
cluster.ImageSource = kubeBoot.EtcdImageSource
cluster := &EtcdCluster{
// @TODO we need to deprecate this port and use 2379, but that would be a breaking change
ClientPort: 4001,
ClusterName: "etcd-" + spec.ClusterKey,
CPURequest: resource.MustParse("200m"),
DataDirName: "data-" + spec.ClusterKey,
ImageSource: kubeBoot.EtcdImageSource,
TLSCA: kubeBoot.TLSCA,
TLSCert: kubeBoot.TLSCert,
TLSKey: kubeBoot.TLSKey,
PeerCA: kubeBoot.PeerCA,
PeerCert: kubeBoot.PeerCert,
PeerKey: kubeBoot.PeerKey,
PeerPort: 2380,
PodName: "etcd-server-" + spec.ClusterKey,
Spec: spec,
VolumeMountPath: v.Mountpoint,
}
// We used to build this through text files ... it turns out to just be more complicated than code!
switch spec.ClusterKey {
@ -102,14 +130,11 @@ func newEtcdController(kubeBoot *KubeBoot, v *Volume, spec *EtcdClusterSpec) (*E
cluster.DataDirName = "data"
cluster.PodName = "etcd-server"
cluster.CPURequest = resource.MustParse("200m")
case "events":
cluster.ClientPort = 4002
cluster.PeerPort = 2381
default:
return nil, fmt.Errorf("unknown Etcd ClusterKey %q", spec.ClusterKey)
return nil, fmt.Errorf("unknown etcd cluster key %q", spec.ClusterKey)
}
k.cluster = cluster
@ -117,10 +142,10 @@ func newEtcdController(kubeBoot *KubeBoot, v *Volume, spec *EtcdClusterSpec) (*E
return k, nil
}
// RunSyncLoop is responsible for managing the etcd sign loop
func (k *EtcdController) RunSyncLoop() {
for {
err := k.syncOnce()
if err != nil {
if err := k.syncOnce(); err != nil {
glog.Warningf("error during attempt to bootstrap (will sleep and retry): %v", err)
}
@ -146,7 +171,7 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
c.PodName = c.ClusterName
}
err := touchFile(PathFor(c.LogFile))
err := touchFile(pathFor(c.LogFile))
if err != nil {
return fmt.Errorf("error touching log-file %q: %v", c.LogFile, err)
}
@ -165,12 +190,9 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
InternalName: fqdn,
}
nodes = append(nodes, node)
if nodeName == c.Spec.NodeName {
c.Me = node
err := k.CreateInternalDNSNameRecord(fqdn)
if err != nil {
if err = k.CreateInternalDNSNameRecord(fqdn); err != nil {
return fmt.Errorf("error mapping internal dns name for %q: %v", name, err)
}
}
@ -200,7 +222,7 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
writeManifest := true
{
// See if the manifest has changed
existingManifest, err := ioutil.ReadFile(PathFor(manifestTarget))
existingManifest, err := ioutil.ReadFile(pathFor(manifestTarget))
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("error reading manifest file %q: %v", manifestTarget, err)
@ -215,14 +237,14 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
createSymlink := true
{
// See if the symlink is correct
stat, err := os.Lstat(PathFor(manifestSource))
stat, err := os.Lstat(pathFor(manifestSource))
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("error reading manifest symlink %q: %v", manifestSource, err)
}
} else if (stat.Mode() & os.ModeSymlink) != 0 {
// It's a symlink, make sure the target matches
target, err := os.Readlink(PathFor(manifestSource))
target, err := os.Readlink(pathFor(manifestSource))
if err != nil {
return fmt.Errorf("error reading manifest symlink %q: %v", manifestSource, err)
}
@ -238,23 +260,23 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
}
if createSymlink || writeManifest {
err = os.Remove(PathFor(manifestSource))
err = os.Remove(pathFor(manifestSource))
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing etcd manifest symlink (for strict creation) %q: %v", manifestSource, err)
}
err = os.MkdirAll(PathFor(manifestTargetDir), 0755)
err = os.MkdirAll(pathFor(manifestTargetDir), 0755)
if err != nil {
return fmt.Errorf("error creating directories for etcd manifest %q: %v", manifestTargetDir, err)
}
err = ioutil.WriteFile(PathFor(manifestTarget), manifest, 0644)
err = ioutil.WriteFile(pathFor(manifestTarget), manifest, 0644)
if err != nil {
return fmt.Errorf("error writing etcd manifest %q: %v", manifestTarget, err)
}
// Note: no PathFor on the target, because it's a symlink and we want it to evaluate on the host
err = os.Symlink(manifestTarget, PathFor(manifestSource))
// Note: no pathFor on the target, because it's a symlink and we want it to evaluate on the host
err = os.Symlink(manifestTarget, pathFor(manifestSource))
if err != nil {
return fmt.Errorf("error creating etcd manifest symlink %q -> %q: %v", manifestSource, manifestTarget, err)
}
@ -265,23 +287,20 @@ func (c *EtcdCluster) configure(k *KubeBoot) error {
return nil
}
func touchFile(p string) error {
_, err := os.Lstat(p)
if err == nil {
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("error getting state of file %q: %v", p, err)
}
f, err := os.Create(p)
if err != nil {
return fmt.Errorf("error touching file %q: %v", p, err)
}
err = f.Close()
if err != nil {
return fmt.Errorf("error closing touched file %q: %v", p, err)
}
return nil
func (e *EtcdClusterSpec) String() string {
return DebugString(e)
}
// isTLS indicates the etcd cluster should be configured to use tls
func (c *EtcdCluster) isTLS() bool {
return notEmpty(c.TLSCert) && notEmpty(c.TLSKey)
}
// String returns the debug string
func (c *EtcdCluster) String() string {
return DebugString(c)
}
func (e *EtcdNode) String() string {
return DebugString(e)
}

View File

@ -18,9 +18,11 @@ package protokube
import (
"fmt"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/pkg/api/v1"
"strings"
)
// BuildEtcdManifest creates the pod spec, based on the etcd cluster
@ -30,70 +32,51 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
pod.Kind = "Pod"
pod.Name = c.PodName
pod.Namespace = "kube-system"
pod.Labels = map[string]string{"k8s-app": c.PodName}
pod.Labels = map[string]string{
"k8s-app": c.PodName,
}
etcdImage := "/etcd:2.2.1"
etcdRegistry := "gcr.io/google_containers"
// TODO another hardcoded version
image := "/etcd:2.2.1"
imageRegistry := "gcr.io/google_containers"
// Test to determine if the container registry has been passed in as a flag.
// If so use the provider registry location.
// @check if the container is being overloaded via flags
if c.ImageSource == "" {
image = imageRegistry + image
etcdImage = etcdRegistry + etcdImage
} else {
image = strings.TrimSuffix(c.ImageSource, "/") + image
etcdImage = strings.TrimSuffix(c.ImageSource, "/") + etcdImage
}
pod.Spec.HostNetwork = true
{
container := v1.Container{
Name: "etcd-container",
Image: image,
Image: etcdImage,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: c.CPURequest,
},
},
Command: []string{
"/bin/sh",
"-c",
"/usr/local/bin/etcd 1>>/var/log/etcd.log 2>&1",
},
// Note that we listen on 0.0.0.0, not 127.0.0.1, so we can support etcd clusters
Env: []v1.EnvVar{
{Name: "ETCD_NAME", Value: c.Me.Name},
{Name: "ETCD_DATA_DIR", Value: "/var/etcd/" + c.DataDirName},
{Name: "ETCD_LISTEN_PEER_URLS", Value: fmt.Sprintf("http://0.0.0.0:%d", c.PeerPort)},
{Name: "ETCD_LISTEN_CLIENT_URLS", Value: fmt.Sprintf("http://0.0.0.0:%d", c.ClientPort)},
{Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: fmt.Sprintf("http://%s:%d", c.Me.InternalName, c.ClientPort)},
{Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: fmt.Sprintf("http://%s:%d", c.Me.InternalName, c.PeerPort)},
{Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"},
{Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: c.ClusterToken},
},
Command: []string{"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log"},
}
var initialCluster []string
for _, node := range c.Nodes {
// TODO: Use localhost for ourselves? Does the cluster view have to be symmetric?
initialCluster = append(initialCluster, node.Name+"="+fmt.Sprintf("http://%s:%d", node.InternalName, c.PeerPort))
}
container.Env = append(container.Env, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(initialCluster, ",")})
// build the the environment variables for etcd service
container.Env = buildEtcdEnvironmentOptions(c)
container.LivenessProbe = &v1.Probe{
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
}
container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{
Host: "127.0.0.1",
Port: intstr.FromInt(c.ClientPort),
Path: "/health",
// ensure we have the correct probe schema
if c.isTLS() {
container.LivenessProbe.TCPSocket = &v1.TCPSocketAction{
Host: "127.0.0.1",
Port: intstr.FromInt(c.ClientPort),
}
} else {
container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{
Host: "127.0.0.1",
Port: intstr.FromInt(c.ClientPort),
Path: "/health",
Scheme: v1.URISchemeHTTP,
}
}
container.Ports = append(container.Ports, v1.ContainerPort{
Name: "serverport",
ContainerPort: int32(c.PeerPort),
@ -104,12 +87,17 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
ContainerPort: int32(c.ClientPort),
HostPort: int32(c.ClientPort),
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varetcdata",
MountPath: "/var/etcd/" + c.DataDirName,
ReadOnly: false,
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varlogetcd",
MountPath: "/var/log/etcd.log",
ReadOnly: false,
})
// add the host path mount to the pod spec
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "varetcdata",
VolumeSource: v1.VolumeSource{
@ -118,12 +106,6 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varlogetcd",
MountPath: "/var/log/etcd.log",
ReadOnly: false,
})
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "varlogetcd",
VolumeSource: v1.VolumeSource{
@ -133,8 +115,108 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
},
})
// @check if tls is enabled and mount the directory. It might be worth considering
// if we you use our own directory in /srv i.e /srv/etcd rather than the default /src/kubernetes
if c.isTLS() {
for _, dirname := range buildCertificateDirectories(c) {
normalized := strings.Replace(dirname, "/", "", -1)
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: normalized,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: dirname,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: normalized,
MountPath: dirname,
ReadOnly: true,
})
}
}
pod.Spec.Containers = append(pod.Spec.Containers, container)
}
return pod
}
// buildEtcdEnvironmentOptions is responsible for building the environment variabled for etcd
// @question should we perhaps make this version specific in prep for v3 support?
func buildEtcdEnvironmentOptions(c *EtcdCluster) []v1.EnvVar {
var options []v1.EnvVar
// @check if we are using TLS
scheme := "http"
if c.isTLS() {
scheme = "https"
}
// add the default setting for masters - http or https
options = append(options, []v1.EnvVar{
{Name: "ETCD_NAME", Value: c.Me.Name},
{Name: "ETCD_DATA_DIR", Value: "/var/etcd/" + c.DataDirName},
{Name: "ETCD_LISTEN_PEER_URLS", Value: fmt.Sprintf("%s://0.0.0.0:%d", scheme, c.PeerPort)},
{Name: "ETCD_LISTEN_CLIENT_URLS", Value: fmt.Sprintf("%s://0.0.0.0:%d", scheme, c.ClientPort)},
{Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: fmt.Sprintf("%s://%s:%d", scheme, c.Me.InternalName, c.ClientPort)},
{Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: fmt.Sprintf("%s://%s:%d", scheme, c.Me.InternalName, c.PeerPort)},
{Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"},
{Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: c.ClusterToken}}...)
// @check if we are using peer certificates
if notEmpty(c.PeerCA) {
options = append(options, []v1.EnvVar{
{Name: "ETCD_PEER_TRUSTED_CA_FILE", Value: c.PeerCA}}...)
}
if notEmpty(c.PeerCert) {
options = append(options, v1.EnvVar{Name: "ETCD_PEER_CERT_FILE", Value: c.PeerCert})
}
if notEmpty(c.PeerKey) {
options = append(options, v1.EnvVar{Name: "ETCD_PEER_KEY_FILE", Value: c.PeerKey})
}
if notEmpty(c.TLSCA) {
options = append(options, v1.EnvVar{Name: "ETCD_TRUSTED_CA_FILE", Value: c.TLSCA})
}
if notEmpty(c.TLSCert) {
options = append(options, v1.EnvVar{Name: "ETCD_CERT_FILE", Value: c.TLSCert})
}
if notEmpty(c.TLSKey) {
options = append(options, v1.EnvVar{Name: "ETCD_KEY_FILE", Value: c.TLSKey})
}
// @step: generate the initial cluster
var hosts []string
for _, node := range c.Nodes {
hosts = append(hosts, node.Name+"="+fmt.Sprintf("%s://%s:%d", scheme, node.InternalName, c.PeerPort))
}
options = append(options, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(hosts, ",")})
return options
}
// buildCertificateDirectories generates a list of the base directories which the certificates are located
// so we can map in as volumes. They will probably all be placed into /src/kubernetes, but just to make it
// generic.
func buildCertificateDirectories(c *EtcdCluster) []string {
tracked := make(map[string]bool, 0)
for _, x := range []string{c.TLSCA, c.TLSCert, c.TLSKey, c.PeerCA, c.PeerKey, c.PeerKey} {
if x == "" || tracked[filepath.Dir(x)] == true {
continue
}
tracked[filepath.Dir(x)] = true
}
var list []string
for k := range tracked {
list = append(list, k)
}
return list
}
// notEmpty is just a code pretty version if string != ""
func notEmpty(v string) bool {
return v != ""
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"fmt"
"os"
)
// touchFile does what is says on the tin, it touches a file
func touchFile(p string) error {
_, err := os.Lstat(p)
if err == nil {
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("error getting state of file %q: %v", p, err)
}
f, err := os.Create(p)
if err != nil {
return fmt.Errorf("error touching file %q: %v", p, err)
}
if err = f.Close(); err != nil {
return fmt.Errorf("error closing touched file %q: %v", p, err)
}
return nil
}

View File

@ -25,59 +25,61 @@ import (
"github.com/golang/glog"
)
type KubeBoot struct {
Master bool
var (
// Containerized indicates the etcd is containerized
Containerized = false
// RootFS is the root fs path
RootFS = "/"
)
// KubeBoot is the options for the protokube service
type KubeBoot struct {
// Channels is a list of channel to apply
Channels []string
// InitializeRBAC should be set to true if we should create the core RBAC roles
InitializeRBAC bool
// InternalDNSSuffix is the dns zone we are living in
InternalDNSSuffix string
InternalIP net.IP
//MasterID int
//EtcdClusters []*EtcdClusterSpec
// InternalIP is the internal ip address of the node
InternalIP net.IP
// ApplyTaints controls whether we set taints based on the master label
// This should not be needed in k8s 1.6, because kubelet has the --taint flag
ApplyTaints bool
volumeMounter *VolumeMountController
etcdControllers map[string]*EtcdController
// DNS is the dns provider
DNS DNSProvider
// ModelDir is the model directory
ModelDir string
Channels []string
Kubernetes *KubernetesContext
// Etcd container registry location.
EtcdImageSource string
// TLSCA is the path to a client ca for etcd
TLSCA string
// TLSCert is the path to a tls certificate for etcd
TLSCert string
// TLSKey is the path to a tls private key for etcd
TLSKey string
// PeerCA is the path to a peer ca for etcd
PeerCA string
// PeerCert is the path to a peer certificate for etcd
PeerCert string
// PeerKey is the path to a peer private key for etcd
PeerKey string
// Kubernetes is the context methods for kubernetes
Kubernetes *KubernetesContext
// Master indicates we are a master node
Master bool
volumeMounter *VolumeMountController
etcdControllers map[string]*EtcdController
}
// Init is responsible for initializing the controllers
func (k *KubeBoot) Init(volumesProvider Volumes) {
k.volumeMounter = newVolumeMountController(volumesProvider)
k.etcdControllers = make(map[string]*EtcdController)
}
var Containerized = false
var RootFS = "/"
func PathFor(hostPath string) string {
if hostPath[0] != '/' {
glog.Fatalf("path was not absolute: %q", hostPath)
}
return RootFS + hostPath[1:]
}
func (k *KubeBoot) String() string {
return DebugString(k)
}
// RunSyncLoop is responsible for provision the cluster
func (k *KubeBoot) RunSyncLoop() {
for {
err := k.syncOnce()
if err != nil {
if err := k.syncOnce(); err != nil {
glog.Warningf("error during attempt to bootstrap (will sleep and retry): %v", err)
}
@ -87,19 +89,19 @@ func (k *KubeBoot) RunSyncLoop() {
func (k *KubeBoot) syncOnce() error {
if k.Master {
// attempt to mount the volumes
volumes, err := k.volumeMounter.mountMasterVolumes()
if err != nil {
return err
}
for _, v := range volumes {
for _, etcdClusterSpec := range v.Info.EtcdClusters {
key := etcdClusterSpec.ClusterKey + "::" + etcdClusterSpec.NodeName
for _, etcdSpec := range v.Info.EtcdClusters {
key := etcdSpec.ClusterKey + "::" + etcdSpec.NodeName
etcdController := k.etcdControllers[key]
if etcdController == nil {
glog.Infof("Found etcd cluster spec on volume %q: %v", v.ID, etcdClusterSpec)
etcdController, err := newEtcdController(k, v, etcdClusterSpec)
glog.Infof("Found etcd cluster spec on volume %q: %v", v.ID, etcdSpec)
etcdController, err := newEtcdController(k, v, etcdSpec)
if err != nil {
glog.Warningf("error building etcd controller: %v", err)
} else {
@ -109,30 +111,20 @@ func (k *KubeBoot) syncOnce() error {
}
}
}
//// Copy roles from volume
//k.EtcdClusters = volumeInfo.EtcdClusters
//for _, etcdClusterSpec := range volumeInfo.EtcdClusters {
// glog.Infof("Found etcd cluster spec on volume: %v", etcdClusterSpec)
//}
//k.MasterID = volumeInfo.MasterID
// TODO: Should we set up symlinks here?
} else {
glog.V(4).Infof("Not in role master; won't scan for volumes")
}
if k.Master && k.ApplyTaints {
if err := ApplyMasterTaints(k.Kubernetes); err != nil {
if err := applyMasterTaints(k.Kubernetes); err != nil {
glog.Warningf("error updating master taints: %v", err)
}
}
if k.InitializeRBAC {
// TODO: Idempotency
if err := InitializeRBAC(k.Kubernetes); err != nil {
glog.Warningf("error initializing RBAC: %v", err)
// @TODO: Idempotency: good question; not sure this should ever be done on the node though
if err := applyRBAC(k.Kubernetes); err != nil {
glog.Warningf("error initializing rbac: %v", err)
}
}
@ -140,12 +132,12 @@ func (k *KubeBoot) syncOnce() error {
// that when kubelet comes up the first time, all volume mounts
// and DNS are available, avoiding the scenario where
// etcd/apiserver retry too many times and go into backoff.
if err := enableKubelet(); err != nil {
if err := startKubeletService(); err != nil {
glog.Warningf("error ensuring kubelet started: %v", err)
}
for _, channel := range k.Channels {
if err := ApplyChannel(channel); err != nil {
if err := applyChannel(channel); err != nil {
glog.Warningf("error applying channel %q: %v", channel, err)
}
}
@ -153,11 +145,12 @@ func (k *KubeBoot) syncOnce() error {
return nil
}
// enableKubelet: Make sure kubelet is running.
func enableKubelet() error {
// startKubeletService is responsible for checking and if not starting the kubelet service
func startKubeletService() error {
// TODO: Check/log status of kubelet
// (in particular, we want to avoid kubernetes/kubernetes#40123 )
glog.V(2).Infof("ensuring that kubelet systemd service is running")
cmd := exec.Command("systemctl", "status", "--no-block", "kubelet")
output, err := cmd.CombinedOutput()
glog.V(2).Infof("'systemctl status kubelet' output:\n%s", string(output))
@ -165,6 +158,7 @@ func enableKubelet() error {
glog.V(2).Infof("kubelet systemd service already running")
return nil
}
glog.Infof("kubelet systemd service not running. Starting")
cmd = exec.Command("systemctl", "start", "--no-block", "kubelet")
output, err = cmd.CombinedOutput()
@ -172,5 +166,17 @@ func enableKubelet() error {
return fmt.Errorf("error starting kubelet: %v\nOutput: %s", err, output)
}
glog.V(2).Infof("'systemctl start kubelet' output:\n%s", string(output))
return nil
}
func pathFor(hostPath string) string {
if hostPath[0] != '/' {
glog.Fatalf("path was not absolute: %q", hostPath)
}
return RootFS + hostPath[1:]
}
func (k *KubeBoot) String() string {
return DebugString(k)
}

View File

@ -44,7 +44,7 @@ package protokube
//// RunKubelet runs the bootstrap tasks, and watches them until they exit
//// Currently only one task is supported / will work properly
//func (k *KubeBoot) RunBootstrapTasks() error {
// bootstrapDir := PathFor(BootstrapDir)
// bootstrapDir := pathFor(BootstrapDir)
//
// var dirs []os.FileInfo
// var err error

View File

@ -18,20 +18,24 @@ package protokube
import (
"fmt"
"sync"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sync"
)
// KubernetesContext is the kubernetes context
type KubernetesContext struct {
mutex sync.Mutex
k8sClient kubernetes.Interface
}
// NewKubernetesContext returns a new KubernetesContext
func NewKubernetesContext() *KubernetesContext {
return &KubernetesContext{}
}
// KubernetesClient returns a new kubernetes api client
func (c *KubernetesContext) KubernetesClient() (kubernetes.Interface, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
@ -56,5 +60,6 @@ func (c *KubernetesContext) KubernetesClient() (kubernetes.Interface, error) {
}
c.k8sClient = k8sClient
}
return c.k8sClient, nil
}

View File

@ -18,15 +18,18 @@ package protokube
import (
"fmt"
"github.com/golang/glog"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
rbac "k8s.io/client-go/pkg/apis/rbac/v1beta1"
"github.com/golang/glog"
)
func InitializeRBAC(kubeContext *KubernetesContext) error {
// applyRBAC is responsible for initializing RBAC
func applyRBAC(kubeContext *KubernetesContext) error {
k8sClient, err := kubeContext.KubernetesClient()
if err != nil {
return fmt.Errorf("error connecting to kubernetes: %v", err)
@ -73,7 +76,7 @@ const (
KubeProxyServiceAccountName = "kube-proxy"
)
// CreateServiceAccounts creates the necessary serviceaccounts that kubeadm uses/might use, if they don't already exist.
// createServiceAccounts creates the necessary serviceaccounts that kubeadm uses/might use, if they don't already exist.
func createServiceAccounts(clientset kubernetes.Interface) error {
serviceAccounts := []v1.ServiceAccount{
{

View File

@ -19,11 +19,13 @@ package protokube
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/pkg/api/v1"
"github.com/golang/glog"
)
type nodePatch struct {
@ -44,10 +46,10 @@ type nodePatchSpec struct {
// Note that this is for k8s <= 1.5 only
const TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// ApplyMasterTaints finds masters that have not yet been tainted, and applies the master taint.
// applyMasterTaints finds masters that have not yet been tainted, and applies the master taint.
// Once all supported kubelet versions accept the --register-with-taints flag introduced in 1.6.0, this can probably
// go away entirely. It also sets the unschedulable flag to false, so pods (with a toleration) can target the node
func ApplyMasterTaints(kubeContext *KubernetesContext) error {
func applyMasterTaints(kubeContext *KubernetesContext) error {
client, err := kubeContext.KubernetesClient()
if err != nil {
return err

View File

@ -18,13 +18,15 @@ package protokube
import (
"fmt"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"os"
"sort"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"github.com/golang/glog"
)
type VolumeMountController struct {
@ -60,7 +62,7 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
mountpoint := "/mnt/master-" + v.ID
// On ContainerOS, we mount to /mnt/disks instead (/mnt is readonly)
_, err := os.Stat(PathFor("/mnt/disks"))
_, err := os.Stat(pathFor("/mnt/disks"))
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("error checking for /mnt/disks: %v", err)
@ -92,9 +94,8 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint string, fstype string) error {
// Wait for the device to show up
for {
_, err := os.Stat(PathFor(device))
_, err := os.Stat(pathFor(device))
if err == nil {
break
}
@ -106,16 +107,6 @@ func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint str
}
glog.Infof("Found device %q", device)
//// Mount the device
//var mounter mount.Interface
//runner := exec.New()
//if k.Containerized {
// mounter = mount.NewNsenterMounter()
// runner = NewChrootRunner(runner, "/rootfs")
//} else {
// mounter = mount.New()
//}
// If we are containerized, we still first SafeFormatAndMount in our namespace
// This is because SafeFormatAndMount doesn't seem to work in a container
safeFormatAndMount := &mount.SafeFormatAndMount{Interface: mount.New(""), Runner: exec.New()}
@ -128,7 +119,7 @@ func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint str
// Note: IsLikelyNotMountPoint is not containerized
findMountpoint := PathFor(mountpoint)
findMountpoint := pathFor(mountpoint)
var existing []*mount.MountPoint
for i := range mounts {
m := &mounts[i]
@ -143,17 +134,17 @@ func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint str
// options = append(options, "ro")
//}
if len(existing) == 0 {
glog.Infof("Creating mount directory %q", PathFor(mountpoint))
if err := os.MkdirAll(PathFor(mountpoint), 0750); err != nil {
glog.Infof("Creating mount directory %q", pathFor(mountpoint))
if err := os.MkdirAll(pathFor(mountpoint), 0750); err != nil {
return err
}
glog.Infof("Mounting device %q on %q", PathFor(device), PathFor(mountpoint))
glog.Infof("Mounting device %q on %q", pathFor(device), pathFor(mountpoint))
err = safeFormatAndMount.FormatAndMount(PathFor(device), PathFor(mountpoint), fstype, options)
err = safeFormatAndMount.FormatAndMount(pathFor(device), pathFor(mountpoint), fstype, options)
if err != nil {
//os.Remove(mountpoint)
return fmt.Errorf("error formatting and mounting disk %q on %q: %v", PathFor(device), PathFor(mountpoint), err)
return fmt.Errorf("error formatting and mounting disk %q on %q: %v", pathFor(device), pathFor(mountpoint), err)
}
// If we are containerized, we then also mount it into the host

View File

@ -105,7 +105,7 @@ func getDevice(mountPoint string) (string, error) {
}
if Containerized {
mountPoint = PathFor(mountPoint)
mountPoint = pathFor(mountPoint)
}
lines := strings.Split(string(out), "\n")
for _, line := range lines {
@ -122,7 +122,7 @@ func getDevice(mountPoint string) (string, error) {
}
func getVolMetadata() ([]vsphere.VolumeMetadata, error) {
rawData, err := ioutil.ReadFile(PathFor(VolumeMetaDataFile))
rawData, err := ioutil.ReadFile(pathFor(VolumeMetaDataFile))
if err != nil {
return nil, err

View File

@ -19,7 +19,6 @@ package main
import (
"io/ioutil"
"path"
"strconv"
"strings"
"testing"
@ -31,54 +30,47 @@ import (
)
func TestBuildEtcdManifest(t *testing.T) {
runTest(t, "main")
}
func runTest(t *testing.T, srcDir string) {
sourcePath := path.Join(srcDir, "cluster.yaml")
sourceBytes, err := ioutil.ReadFile(sourcePath)
if err != nil {
t.Fatalf("unexpected error reading sourcePath %q: %v", sourcePath, err)
cs := []struct {
TestFile string
}{
{TestFile: "non_tls.yaml"},
{TestFile: "tls.yaml"},
}
expectedPath := path.Join(srcDir, "manifest.yaml")
expectedBytes, err := ioutil.ReadFile(expectedPath)
if err != nil {
t.Fatalf("unexpected error reading expectedPath %q: %v", expectedPath, err)
}
cluster := &protokube.EtcdCluster{}
err = kops.ParseRawYaml(sourceBytes, cluster)
if err != nil {
t.Fatalf("error parsing options yaml: %v", err)
}
cluster.Me = &protokube.EtcdNode{
Name: "node0",
InternalName: "node0" + ".internal",
}
for i := 0; i <= 2; i++ {
node := &protokube.EtcdNode{
Name: "node" + strconv.Itoa(i),
InternalName: "node" + strconv.Itoa(i) + ".internal",
for i, x := range cs {
cluster, expected := loadTestIntegration(t, path.Join("main", x.TestFile))
definition := protokube.BuildEtcdManifest(cluster)
generated, err := protokube.ToVersionedYaml(definition)
if err != nil {
t.Errorf("case %d, unable to convert to yaml, error: %v", i, err)
continue
}
cluster.Nodes = append(cluster.Nodes, node)
}
rendered := strings.TrimSpace(string(generated))
expected = strings.TrimSpace(expected)
pod := protokube.BuildEtcdManifest(cluster)
actual, err := protokube.ToVersionedYaml(pod)
if err != nil {
t.Fatalf("error marshalling to yaml: %v", err)
}
actualString := strings.TrimSpace(string(actual))
expectedString := strings.TrimSpace(string(expectedBytes))
if actualString != expectedString {
diffString := diff.FormatDiff(expectedString, actualString)
t.Logf("diff:\n%s\n", diffString)
t.Fatalf("manifest differed from expected")
if rendered != expected {
diffString := diff.FormatDiff(expected, string(rendered))
t.Logf("diff:\n%s\n", diffString)
t.Errorf("case %d, failed, manifest differed from expected", i)
}
}
}
// loadTestIntegration is responsible for loading the integration files
func loadTestIntegration(t *testing.T, path string) (*protokube.EtcdCluster, string) {
content, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("unable to read in the integretion file: %s, error: %v", path, err)
}
documents := strings.Split(string(content), "---")
if len(documents) != 2 {
t.Fatalf("unable to find both documents in the integration file: %s, error %v:", path, err)
}
// read the specifiction into a etcd spec
cluster := &protokube.EtcdCluster{}
err = kops.ParseRawYaml([]byte(documents[0]), cluster)
if err != nil {
t.Fatalf("error parsing etcd specification in file: %s, error: %v", path, err)
}
return cluster, documents[1]
}

View File

@ -1,9 +0,0 @@
volumeMountPath: /mnt/main
clusterName: etcd-main
dataDirName: data-main
podName: etcd-server-main
cpuRequest: "200m"
clientPort: 4001
peerPort: 2380
clusterToken: token-main
logFile: /var/log/main.log

View File

@ -1,3 +1,24 @@
clientPort: 4001
clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
dataDirName: data-main
logFile: /var/log/etcd.log
peerPort: 2380
podName: etcd-server-main
volumeMountPath: /mnt/main
me:
name: node0
internalName: node0.internal
nodes:
- name: node0
internalName: node0.internal
- name: node1
internalName: node1.internal
- name: node2
internalName: node2.internal
spec: {}
---
apiVersion: v1
kind: Pod
metadata:
@ -11,7 +32,7 @@ spec:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd 1>>/var/log/etcd.log 2>&1
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log
env:
- name: ETCD_NAME
value: node0
@ -37,6 +58,7 @@ spec:
host: 127.0.0.1
path: /health
port: 4001
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd-container
@ -61,6 +83,6 @@ spec:
path: /mnt/main/var/etcd/data-main
name: varetcdata
- hostPath:
path: /var/log/main.log
path: /var/log/etcd.log
name: varlogetcd
status: {}

View File

@ -0,0 +1,110 @@
tlsCA: /srv/kubernetes/ca.crt
tlsCert: /srv/kubernetes/etcd.pem
tlsKey: /srv/kubernetes/etcd-key.pem
clientPort: 4001
clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
dataDirName: data-main
logFile: /var/log/etcd.log
peerCA: /srv/kubernetes/ca.crt
peerCert: /srv/kubernetes/etcd.pem
peerKey: /srv/kubernetes/etcd-key.pem
peerPort: 2380
podName: etcd-server-main
volumeMountPath: /mnt/main
me:
name: node0
internalName: node0.internal
nodes:
- name: node0
internalName: node0.internal
- name: node1
internalName: node1.internal
- name: node2
internalName: node2.internal
spec: {}
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-server-main
name: etcd-server-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd 2>&1 | /bin/tee /var/log/etcd.log
env:
- name: ETCD_NAME
value: node0
- name: ETCD_DATA_DIR
value: /var/etcd/data-main
- name: ETCD_LISTEN_PEER_URLS
value: https://0.0.0.0:2380
- name: ETCD_LISTEN_CLIENT_URLS
value: https://0.0.0.0:4001
- name: ETCD_ADVERTISE_CLIENT_URLS
value: https://node0.internal:4001
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: https://node0.internal:2380
- name: ETCD_INITIAL_CLUSTER_STATE
value: new
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: token-main
- name: ETCD_PEER_TRUSTED_CA_FILE
value: /srv/kubernetes/ca.crt
- name: ETCD_PEER_CERT_FILE
value: /srv/kubernetes/etcd.pem
- name: ETCD_PEER_KEY_FILE
value: /srv/kubernetes/etcd-key.pem
- name: ETCD_TRUSTED_CA_FILE
value: /srv/kubernetes/ca.crt
- name: ETCD_CERT_FILE
value: /srv/kubernetes/etcd.pem
- name: ETCD_KEY_FILE
value: /srv/kubernetes/etcd-key.pem
- name: ETCD_INITIAL_CLUSTER
value: node0=https://node0.internal:2380,node1=https://node1.internal:2380,node2=https://node2.internal:2380
image: gcr.io/google_containers/etcd:2.2.1
livenessProbe:
initialDelaySeconds: 15
tcpSocket:
host: 127.0.0.1
port: 4001
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /var/etcd/data-main
name: varetcdata
- mountPath: /var/log/etcd.log
name: varlogetcd
- mountPath: /srv/kubernetes
name: srvkubernetes
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /mnt/main/var/etcd/data-main
name: varetcdata
- hostPath:
path: /var/log/etcd.log
name: varlogetcd
- hostPath:
path: /srv/kubernetes
name: srvkubernetes
status: {}

View File

@ -27,11 +27,13 @@ import (
"encoding/json"
"encoding/pem"
"fmt"
"github.com/golang/glog"
"io"
"k8s.io/kops/util/pkg/vfs"
"math/big"
"time"
"k8s.io/kops/util/pkg/vfs"
"github.com/golang/glog"
)
const CertificateId_CA = "ca"

View File

@ -290,7 +290,6 @@ func (c *populateClusterSpec) run() error {
case "config":
// Note: DefaultOptionsBuilder comes first
codeModels = append(codeModels, &components.DefaultsOptionsBuilder{Context: optionsContext})
codeModels = append(codeModels, &components.KubeAPIServerOptionsBuilder{OptionsContext: optionsContext})
codeModels = append(codeModels, &components.DockerOptionsBuilder{Context: optionsContext})
codeModels = append(codeModels, &components.NetworkingOptionsBuilder{Context: optionsContext})

View File

@ -19,11 +19,12 @@ package fitasks
import (
"crypto/x509"
"fmt"
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
"net"
"sort"
"strings"
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
)
var wellKnownCertificateTypes = map[string]string{
@ -33,9 +34,8 @@ var wellKnownCertificateTypes = map[string]string{
//go:generate fitask -type=Keypair
type Keypair struct {
Name *string
Lifecycle *fi.Lifecycle
Name *string
Lifecycle *fi.Lifecycle
Subject string `json:"subject"`
Type string `json:"type"`
AlternateNames []string `json:"alternateNames"`

View File

@ -18,17 +18,19 @@ package nodetasks
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
"k8s.io/kops/upup/pkg/fi/nodeup/local"
"k8s.io/kops/upup/pkg/fi/utils"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
"k8s.io/kops/upup/pkg/fi/nodeup/local"
"k8s.io/kops/upup/pkg/fi/utils"
"github.com/golang/glog"
)
const FileType_Symlink = "symlink"

View File

@ -18,10 +18,10 @@ package kops
import "strings"
// This should be replaced by the makefile
// Version should be replaced by the makefile
var Version = "1.5.0"
// This should be replaced by the makefile
// GitVersion should be replaced by the makefile
var GitVersion = ""
// DefaultProtokubeImageName is the name of the protokube image, as we would pass to "docker run"