Support etcd-manager v3, suitable for backporting

Add etcd-manager v3 in a way that we can safely backport.
This commit is contained in:
Justin SB 2019-01-28 14:07:19 -05:00
parent 2c20424656
commit 65e2fb2db4
7 changed files with 171 additions and 20 deletions

View File

@ -41,7 +41,7 @@ it won't change the configuration:
```bash
# Enable etcd-manager
kops set cluster cluster.spec.etcdClusters[*].manager.image=kopeio/etcd-manager:latest
kops set cluster cluster.spec.etcdClusters[*].provider=Manager
kops update cluster --yes
kops rolling-update cluster --yes

View File

@ -10,6 +10,7 @@ go_library(
"directories.go",
"docker.go",
"etcd.go",
"etcd_manager_tls.go",
"etcd_tls.go",
"file_assets.go",
"firewall.go",

View File

@ -371,8 +371,13 @@ func (c *NodeupModelContext) BuildCertificateTask(ctx *fi.ModelBuilderContext, n
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: filepath.Join(c.PathSrvKubernetes(), filename),
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),
@ -397,8 +402,13 @@ func (c *NodeupModelContext) BuildPrivateKeyTask(ctx *fi.ModelBuilderContext, na
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: filepath.Join(c.PathSrvKubernetes(), filename),
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),

View File

@ -0,0 +1,63 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
)
// EtcdManagerTLSBuilder configures TLS support for etcd-manager
type EtcdManagerTLSBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &EtcdManagerTLSBuilder{}
// Build is responsible for TLS configuration for etcd-manager
func (b *EtcdManagerTLSBuilder) Build(ctx *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
for _, k := range []string{"main", "events"} {
d := "/etc/kubernetes/pki/etcd-manager-" + k
keys := make(map[string]string)
keys["etcd-manager-ca"] = "etcd-manager-ca-" + k
for fileName, keystoreName := range keys {
cert, err := b.KeyStore.FindCert(keystoreName)
if err != nil {
return err
}
if cert == nil {
glog.Warningf("keypair %q not found, won't configure", keystoreName)
continue
}
if err := b.BuildCertificateTask(ctx, keystoreName, d+"/"+fileName+".crt"); err != nil {
return err
}
if err := b.BuildPrivateKeyTask(ctx, keystoreName, d+"/"+fileName+".key"); err != nil {
return err
}
}
}
return nil
}

View File

@ -100,6 +100,8 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
format := string(fi.KeysetFormatV1Alpha2)
c.AddTask(&fitasks.ManagedFile{
Contents: fi.WrapResource(fi.NewBytesResource(d)),
Lifecycle: b.Lifecycle,
@ -107,14 +109,22 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error {
Location: fi.String("backups/etcd/" + etcdCluster.Name + "/control/etcd-cluster-spec"),
Name: fi.String("etcd-cluster-spec-" + name),
})
// We create a CA keypair to enable secure communication
c.AddTask(&fitasks.Keypair{
Name: fi.String("etcd-manager-ca-" + etcdCluster.Name),
Subject: "cn=etcd-manager-ca-" + etcdCluster.Name,
Type: "ca",
Format: format,
})
}
return nil
}
type etcdClusterSpec struct {
MemberCount int32 `json:"member_count,omitempty"`
EtcdVersion string `json:"etcd_version,omitempty"`
MemberCount int32 `json:"memberCount,omitempty"`
EtcdVersion string `json:"etcdVersion,omitempty"`
}
func (b *EtcdManagerBuilder) buildManifest(etcdCluster *kops.EtcdClusterSpec) (*v1.Pod, error) {
@ -159,7 +169,7 @@ metadata:
namespace: kube-system
spec:
containers:
- image: kopeio/etcd-manager:1.0.20181001
- image: kopeio/etcd-manager:3.0.20190125
name: etcd-manager
resources:
requests:
@ -174,6 +184,8 @@ spec:
# We write artificial hostnames into etc hosts for the etcd nodes, so they have stable names
- mountPath: /etc/hosts
name: hosts
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
hostNetwork: true
hostPID: true # helps with mounting volumes from inside a container
volumes:
@ -185,6 +197,10 @@ spec:
path: /etc/hosts
type: File
name: hosts
- hostPath:
path: /etc/kubernetes/pki/etcd-manager
type: DirectoryOrCreate
name: pki
`
// buildPod creates the pod spec, based on the EtcdClusterSpec
@ -300,6 +316,7 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Po
BackupStore: backupStore,
GrpcPort: grpcPort,
DNSSuffix: dnsInternalSuffix,
EtcdInsecure: !isTLS,
}
config.LogVerbosity = 8
@ -389,10 +406,28 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Po
},
},
})
}
if isTLS {
return nil, fmt.Errorf("TLS not supported for etcd-manager")
{
foundPKI := false
for i := range pod.Spec.Volumes {
v := &pod.Spec.Volumes[i]
if v.Name == "pki" {
if v.HostPath == nil {
return nil, fmt.Errorf("found PKI volume, but HostPath was nil")
}
dirname := "etcd-manager-" + etcdCluster.Name
v.HostPath.Path = "/etc/kubernetes/pki/" + dirname
foundPKI = true
}
}
if !foundPKI {
return nil, fmt.Errorf("did not find PKI volume")
}
}
if isTLS {
return nil, fmt.Errorf("TLS not supported for etcd-manager")
}
kubemanifest.MarkPodAsCritical(pod)
@ -408,6 +443,15 @@ type config struct {
// Containerized is set if etcd-manager is running in a container
Containerized bool `flag:"containerized"`
// PKIDir is set to the directory for PKI keys, used to secure commucations between etcd-manager peers
PKIDir string `flag:"pki-dir"`
// Insecure can be used to turn off tls for etcd-manager (compare with EtcdInsecure)
Insecure bool `flag:"insecure"`
// EtcdInsecure can be used to turn off tls for etcd itself (compare with Insecure)
EtcdInsecure bool `flag:"etcd-insecure"`
Address string `flag:"address"`
PeerUrls string `flag:"peer-urls"`
GrpcPort int `flag:"grpc-port"`

View File

@ -1,8 +1,26 @@
Lifecycle: null
Name: etcd-manager-ca-events
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-manager-ca-events
type: ca
---
Lifecycle: null
Name: etcd-manager-ca-main
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-manager-ca-main
type: ca
---
Contents:
Name: ""
Resource: |-
{
"member_count": 1
"memberCount": 1
}
Lifecycle: null
Location: backups/etcd/events/control/etcd-cluster-spec
@ -12,7 +30,7 @@ Contents:
Name: ""
Resource: |-
{
"member_count": 1
"memberCount": 1
}
Lifecycle: null
Location: backups/etcd/main/control/etcd-cluster-spec
@ -39,11 +57,12 @@ Contents:
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=http://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3997 --peer-urls=http://__name__:2381
--quarantine-client-urls=http://__name__:3995 --v=8 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: kopeio/etcd-manager:1.0.20181001
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=http://__name__:2381 --quarantine-client-urls=http://__name__:3995
--v=8 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20190125
name: etcd-manager
resources:
requests:
@ -55,6 +74,8 @@ Contents:
name: rootfs
- mountPath: /etc/hosts
name: hosts
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
@ -71,6 +92,10 @@ Contents:
path: /etc/hosts
type: File
name: hosts
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
@ -101,11 +126,12 @@ Contents:
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=http://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3996 --peer-urls=http://__name__:2380
--quarantine-client-urls=http://__name__:3994 --v=8 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: kopeio/etcd-manager:1.0.20181001
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=http://__name__:2380 --quarantine-client-urls=http://__name__:3994
--v=8 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20190125
name: etcd-manager
resources:
requests:
@ -117,6 +143,8 @@ Contents:
name: rootfs
- mountPath: /etc/hosts
name: hosts
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
@ -133,6 +161,10 @@ Contents:
path: /etc/hosts
type: File
name: hosts
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate

View File

@ -243,6 +243,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
loader.Builders = append(loader.Builders, &model.KubeAPIServerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeControllerManagerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeSchedulerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.EtcdManagerTLSBuilder{NodeupModelContext: modelContext})
if c.cluster.Spec.Networking.Kuberouter == nil {
loader.Builders = append(loader.Builders, &model.KubeProxyBuilder{NodeupModelContext: modelContext})
} else {