mirror of https://github.com/kubernetes/kops.git
upup: improved delete; create upup export and upup upgrade
This commit is contained in:
parent
607e002e08
commit
90d7fb87ad
|
@ -3,10 +3,15 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"bytes"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/kutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
type DeleteClusterCmd struct {
|
||||
|
@ -38,6 +43,8 @@ func init() {
|
|||
cmd.Flags().StringVar(&deleteCluster.Region, "region", "", "region")
|
||||
}
|
||||
|
||||
type getter func(o interface{}) interface{}
|
||||
|
||||
func (c *DeleteClusterCmd) Run() error {
|
||||
if c.Region == "" {
|
||||
return fmt.Errorf("--region is required")
|
||||
|
@ -65,8 +72,65 @@ func (c *DeleteClusterCmd) Run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
for k := range resources {
|
||||
fmt.Printf("%v\n", k)
|
||||
columns := []string{"TYPE", "ID", "NAME"}
|
||||
fields := []string{"Type", "ID", "Name"}
|
||||
|
||||
var b bytes.Buffer
|
||||
w := new(tabwriter.Writer)
|
||||
|
||||
// Format in tab-separated columns with a tab stop of 8.
|
||||
w.Init(os.Stdout, 0, 8, 0, '\t', tabwriter.StripEscape)
|
||||
|
||||
writeHeader := true
|
||||
if writeHeader {
|
||||
for i, c := range columns {
|
||||
if i != 0 {
|
||||
b.WriteByte('\t')
|
||||
}
|
||||
b.WriteByte(tabwriter.Escape)
|
||||
b.WriteString(c)
|
||||
b.WriteByte(tabwriter.Escape)
|
||||
}
|
||||
b.WriteByte('\n')
|
||||
|
||||
_, err := w.Write(b.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to output: %v", err)
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
|
||||
for _, t := range resources {
|
||||
for i := range columns {
|
||||
if i != 0 {
|
||||
b.WriteByte('\t')
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(t)
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
fv := v.FieldByName(fields[i])
|
||||
|
||||
s := fi.ValueAsString(fv)
|
||||
|
||||
b.WriteByte(tabwriter.Escape)
|
||||
b.WriteString(s)
|
||||
b.WriteByte(tabwriter.Escape)
|
||||
}
|
||||
b.WriteByte('\n')
|
||||
|
||||
_, err := w.Write(b.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to output: %v", err)
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
if len(resources) == 0 {
|
||||
fmt.Printf("Nothing to delete\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !c.Yes {
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// exportCmd represents the export command
|
||||
var exportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "export clusters",
|
||||
Long: `export clusters`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("Usage: cluster")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCommand.AddCommand(exportCmd)
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/kutil"
|
||||
)
|
||||
|
||||
type ExportClusterCmd struct {
|
||||
ClusterName string
|
||||
Region string
|
||||
}
|
||||
|
||||
var exportCluster ExportClusterCmd
|
||||
|
||||
func init() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Export cluster",
|
||||
Long: `Exports a k8s cluster.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := exportCluster.Run()
|
||||
if err != nil {
|
||||
glog.Exitf("%v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
exportCmd.AddCommand(cmd)
|
||||
|
||||
cmd.Flags().StringVar(&exportCluster.ClusterName, "name", "", "cluster name")
|
||||
cmd.Flags().StringVar(&exportCluster.Region, "region", "", "region")
|
||||
}
|
||||
|
||||
func (c *ExportClusterCmd) Run() error {
|
||||
if c.Region == "" {
|
||||
return fmt.Errorf("--region is required")
|
||||
}
|
||||
if c.ClusterName == "" {
|
||||
return fmt.Errorf("--name is required")
|
||||
}
|
||||
|
||||
tags := map[string]string{"KubernetesCluster": c.ClusterName}
|
||||
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||
}
|
||||
|
||||
stateStore, err := rootCommand.StateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error state store: %v", err)
|
||||
}
|
||||
|
||||
d := &kutil.ExportCluster{}
|
||||
d.ClusterName = c.ClusterName
|
||||
d.Cloud = cloud
|
||||
d.StateStore = stateStore
|
||||
|
||||
err = d.ReverseAWS()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -3,22 +3,17 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/kubecfg"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
type KubecfgGenerateCommand struct {
|
||||
ClusterName string
|
||||
CloudProvider string
|
||||
Project string
|
||||
Master string
|
||||
|
||||
tmpdir string
|
||||
caStore fi.CAStore
|
||||
}
|
||||
|
@ -40,28 +35,35 @@ func init() {
|
|||
}
|
||||
|
||||
kubecfgCmd.AddCommand(cmd)
|
||||
|
||||
// TODO: We need to store this in the persistent state dir
|
||||
cmd.Flags().StringVarP(&kubecfgGenerateCommand.ClusterName, "name", "", kubecfgGenerateCommand.ClusterName, "Name for cluster")
|
||||
cmd.Flags().StringVarP(&kubecfgGenerateCommand.CloudProvider, "cloud", "", kubecfgGenerateCommand.CloudProvider, "Cloud provider to use - gce, aws")
|
||||
cmd.Flags().StringVarP(&kubecfgGenerateCommand.Project, "project", "", kubecfgGenerateCommand.Project, "Project to use (must be set on GCE)")
|
||||
|
||||
cmd.Flags().StringVarP(&kubecfgGenerateCommand.Master, "master", "", kubecfgGenerateCommand.Master, "IP adddress or host of API server")
|
||||
}
|
||||
|
||||
func (c *KubecfgGenerateCommand) Run() error {
|
||||
if c.ClusterName == "" {
|
||||
return fmt.Errorf("name must be specified")
|
||||
}
|
||||
if c.Master == "" {
|
||||
c.Master = "api." + c.ClusterName
|
||||
glog.Infof("Connecting to master at %s", c.Master)
|
||||
}
|
||||
if c.CloudProvider == "" {
|
||||
return fmt.Errorf("cloud must be specified")
|
||||
stateStore, err := rootCommand.StateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error state store: %v", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
config := &cloudup.CloudConfig{}
|
||||
err = stateStore.ReadConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading configuration: %v", err)
|
||||
}
|
||||
|
||||
clusterName := config.ClusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("ClusterName must be set in config")
|
||||
}
|
||||
|
||||
master := config.MasterPublicName
|
||||
if master == "" {
|
||||
master = "api." + clusterName
|
||||
}
|
||||
|
||||
//cloudProvider := config.CloudProvider
|
||||
//if cloudProvider == "" {
|
||||
// return fmt.Errorf("cloud must be specified")
|
||||
//}
|
||||
|
||||
c.tmpdir, err = ioutil.TempDir("", "k8s")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temporary directory: %v", err)
|
||||
|
@ -71,19 +73,20 @@ func (c *KubecfgGenerateCommand) Run() error {
|
|||
b := &kubecfg.KubeconfigBuilder{}
|
||||
b.Init()
|
||||
|
||||
switch c.CloudProvider {
|
||||
case "aws":
|
||||
b.Context = "aws_" + c.ClusterName
|
||||
|
||||
case "gce":
|
||||
if c.Project == "" {
|
||||
return fmt.Errorf("--project must be specified (for GCE)")
|
||||
}
|
||||
b.Context = c.Project + "_" + c.ClusterName
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Unknown cloud provider %q", c.CloudProvider)
|
||||
}
|
||||
b.Context = clusterName
|
||||
//switch cloudProvider {
|
||||
//case "aws":
|
||||
// b.Context = "aws_" + clusterName
|
||||
//
|
||||
//case "gce":
|
||||
// if config.Project == "" {
|
||||
// return fmt.Errorf("Project must be configured (for GCE)")
|
||||
// }
|
||||
// b.Context = config.Project + "_" + clusterName
|
||||
//
|
||||
//default:
|
||||
// return fmt.Errorf("Unknown cloud provider %q", cloudProvider)
|
||||
//}
|
||||
|
||||
c.caStore, err = rootCommand.CA()
|
||||
if err != nil {
|
||||
|
@ -102,7 +105,7 @@ func (c *KubecfgGenerateCommand) Run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
b.KubeMasterIP = c.Master
|
||||
b.KubeMasterIP = master
|
||||
|
||||
err = b.CreateKubeconfig()
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// upgradeCmd represents the upgrade command
|
||||
var upgradeCmd = &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "upgrade clusters",
|
||||
Long: `upgrade clusters`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("Usage: cluster")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCommand.AddCommand(upgradeCmd)
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/kutil"
|
||||
)
|
||||
|
||||
type UpgradeClusterCmd struct {
|
||||
NewClusterName string
|
||||
}
|
||||
|
||||
var upgradeCluster UpgradeClusterCmd
|
||||
|
||||
func init() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Upgrade cluster",
|
||||
Long: `Upgrades a k8s cluster.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := upgradeCluster.Run()
|
||||
if err != nil {
|
||||
glog.Exitf("%v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
upgradeCmd.AddCommand(cmd)
|
||||
|
||||
cmd.Flags().StringVar(&upgradeCluster.NewClusterName, "newname", "", "new cluster name")
|
||||
}
|
||||
|
||||
func (c *UpgradeClusterCmd) Run() error {
|
||||
if c.NewClusterName == "" {
|
||||
return fmt.Errorf("--newname is required")
|
||||
}
|
||||
|
||||
stateStore, err := rootCommand.StateStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error state store: %v", err)
|
||||
}
|
||||
|
||||
config := &cloudup.CloudConfig{}
|
||||
err = stateStore.ReadConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading configuration: %v", err)
|
||||
}
|
||||
|
||||
oldClusterName := config.ClusterName
|
||||
if oldClusterName == "" {
|
||||
return fmt.Errorf("(Old) ClusterName must be set in configuration")
|
||||
}
|
||||
|
||||
if len(config.NodeZones) == 0 {
|
||||
return fmt.Errorf("Configuration must include NodeZones")
|
||||
}
|
||||
|
||||
region := ""
|
||||
for _, zone := range config.NodeZones {
|
||||
if len(zone.Name) <= 2 {
|
||||
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
||||
}
|
||||
|
||||
zoneRegion := zone.Name[:len(zone.Name)-1]
|
||||
if region != "" && zoneRegion != region {
|
||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
region = zoneRegion
|
||||
}
|
||||
|
||||
tags := map[string]string{"KubernetesCluster": oldClusterName}
|
||||
cloud, err := awsup.NewAWSCloud(region, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||
}
|
||||
|
||||
d := &kutil.UpgradeCluster{}
|
||||
d.NewClusterName = c.NewClusterName
|
||||
d.OldClusterName = oldClusterName
|
||||
d.Cloud = cloud
|
||||
d.Config = config
|
||||
d.StateStore = stateStore
|
||||
|
||||
err = d.Upgrade()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -26,30 +26,7 @@ func (t *AWSAPITarget) Finish(taskMap map[string]fi.Task) error {
|
|||
}
|
||||
|
||||
func (t *AWSAPITarget) AddAWSTags(id string, expected map[string]string) error {
|
||||
actual, err := t.Cloud.GetTags(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error fetching tags for resource: %v", err)
|
||||
}
|
||||
|
||||
missing := map[string]string{}
|
||||
for k, v := range expected {
|
||||
actualValue, found := actual[k]
|
||||
if found && actualValue == v {
|
||||
continue
|
||||
}
|
||||
missing[k] = v
|
||||
}
|
||||
|
||||
if len(missing) != 0 {
|
||||
glog.V(4).Infof("adding tags to %q: %v", id, missing)
|
||||
|
||||
err := t.Cloud.CreateTags(id, missing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding tags to resource %q: %v", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return t.Cloud.AddAWSTags(id, expected)
|
||||
}
|
||||
|
||||
func (t *AWSAPITarget) AddELBTags(loadBalancerName string, expected map[string]string) error {
|
||||
|
|
|
@ -166,6 +166,72 @@ func (c *AWSCloud) CreateTags(resourceId string, tags map[string]string) error {
|
|||
}
|
||||
}
|
||||
|
||||
// DeleteTags will remove tags from the specified resource, retrying up to MaxCreateTagsAttempts times if it hits an eventual-consistency type error
|
||||
func (c *AWSCloud) DeleteTags(resourceId string, tags map[string]string) error {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ec2Tags := []*ec2.Tag{}
|
||||
for k, v := range tags {
|
||||
ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})
|
||||
}
|
||||
|
||||
attempt := 0
|
||||
for {
|
||||
attempt++
|
||||
|
||||
request := &ec2.DeleteTagsInput{
|
||||
Tags: ec2Tags,
|
||||
Resources: []*string{&resourceId},
|
||||
}
|
||||
|
||||
_, err := c.EC2.DeleteTags(request)
|
||||
if err != nil {
|
||||
if isTagsEventualConsistencyError(err) {
|
||||
if attempt > MaxCreateTagsAttempts {
|
||||
return fmt.Errorf("Got retryable error while deleting tags on %q, but retried too many times without success: %v", resourceId, err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("will retry after encountering error deleting tags on %q: %v", resourceId, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error deleting tags on %v: %v", resourceId, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *AWSCloud) AddAWSTags(id string, expected map[string]string) error {
|
||||
actual, err := c.GetTags(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error fetching tags for resource: %v", err)
|
||||
}
|
||||
|
||||
missing := map[string]string{}
|
||||
for k, v := range expected {
|
||||
actualValue, found := actual[k]
|
||||
if found && actualValue == v {
|
||||
continue
|
||||
}
|
||||
missing[k] = v
|
||||
}
|
||||
|
||||
if len(missing) != 0 {
|
||||
glog.V(4).Infof("adding tags to %q: %v", id, missing)
|
||||
|
||||
err := c.CreateTags(id, missing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding tags to resource %q: %v", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AWSCloud) GetELBTags(loadBalancerName string) (map[string]string, error) {
|
||||
tags := map[string]string{}
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/elb"
|
||||
"github.com/golang/glog"
|
||||
"os"
|
||||
)
|
||||
|
@ -36,3 +38,33 @@ func ValidateRegion(region string) error {
|
|||
|
||||
return fmt.Errorf("Region is not a recognized EC2 region: %q (check you have specified valid zones?)", region)
|
||||
}
|
||||
|
||||
// FindEC2Tag find the value of the tag with the specified key
|
||||
func FindEC2Tag(tags []*ec2.Tag, key string) (string, bool) {
|
||||
for _, tag := range tags {
|
||||
if key == aws.StringValue(tag.Key) {
|
||||
return aws.StringValue(tag.Value), true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// FindASGTag find the value of the tag with the specified key
|
||||
func FindASGTag(tags []*autoscaling.TagDescription, key string) (string, bool) {
|
||||
for _, tag := range tags {
|
||||
if key == aws.StringValue(tag.Key) {
|
||||
return aws.StringValue(tag.Value), true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// FindELBTag find the value of the tag with the specified key
|
||||
func FindELBTag(tags []*elb.Tag, key string) (string, bool) {
|
||||
for _, tag := range tags {
|
||||
if key == aws.StringValue(tag.Key) {
|
||||
return aws.StringValue(tag.Value), true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error
|
|||
//case SimpleUnit:
|
||||
// ignored = true
|
||||
default:
|
||||
description = fmt.Sprintf(" %v -> %v", asString(fieldValA), asString(fieldValE))
|
||||
description = fmt.Sprintf(" %v -> %v", ValueAsString(fieldValA), ValueAsString(fieldValE))
|
||||
}
|
||||
}
|
||||
if ignored {
|
||||
|
@ -152,7 +152,7 @@ func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error
|
|||
}
|
||||
|
||||
// asString returns a human-readable string representation of the passed value
|
||||
func asString(value reflect.Value) string {
|
||||
func ValueAsString(value reflect.Value) string {
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
walker := func(path string, field *reflect.StructField, v reflect.Value) error {
|
||||
|
@ -182,7 +182,7 @@ func asString(value reflect.Value) string {
|
|||
if i != 0 {
|
||||
fmt.Fprintf(b, ", ")
|
||||
}
|
||||
fmt.Fprintf(b, "%s", asString(av))
|
||||
fmt.Fprintf(b, "%s", ValueAsString(av))
|
||||
}
|
||||
fmt.Fprintf(b, "]")
|
||||
return utils.SkipReflection
|
||||
|
@ -196,7 +196,7 @@ func asString(value reflect.Value) string {
|
|||
if i != 0 {
|
||||
fmt.Fprintf(b, ", ")
|
||||
}
|
||||
fmt.Fprintf(b, "%s: %s", asString(key), asString(mv))
|
||||
fmt.Fprintf(b, "%s: %s", ValueAsString(key), ValueAsString(mv))
|
||||
}
|
||||
fmt.Fprintf(b, "}")
|
||||
return utils.SkipReflection
|
||||
|
|
|
@ -55,7 +55,9 @@ func NewPackage(name string, contents string, meta string) (fi.Task, error) {
|
|||
|
||||
func (e *Package) Find(c *fi.Context) (*Package, error) {
|
||||
args := []string{"dpkg-query", "-f", "${db:Status-Abbrev}${Version}\\n", "-W", e.Name}
|
||||
glog.V(2).Infof("Listing installed packages: %q", args)
|
||||
human := strings.Join(args, " ")
|
||||
|
||||
glog.V(2).Infof("Listing installed packages: %s", human)
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,616 @@
|
|||
package kutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExportCluster tries to reverse engineer an existing k8s cluster
|
||||
type ExportCluster struct {
|
||||
ClusterName string
|
||||
Cloud fi.Cloud
|
||||
|
||||
StateStore fi.StateStore
|
||||
}
|
||||
|
||||
func (x *ExportCluster) ReverseAWS() error {
|
||||
awsCloud := x.Cloud.(*awsup.AWSCloud)
|
||||
clusterName := x.ClusterName
|
||||
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("ClusterName must be specified")
|
||||
}
|
||||
|
||||
k8s := &cloudup.CloudConfig{}
|
||||
k8s.CloudProvider = "aws"
|
||||
k8s.ClusterName = clusterName
|
||||
|
||||
instances, err := findInstances(awsCloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding instances: %v", err)
|
||||
}
|
||||
|
||||
var master *ec2.Instance
|
||||
for _, instance := range instances {
|
||||
role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
|
||||
if role == clusterName+"-master" {
|
||||
if master != nil {
|
||||
return fmt.Errorf("found multiple masters")
|
||||
}
|
||||
master = instance
|
||||
}
|
||||
}
|
||||
if master == nil {
|
||||
return fmt.Errorf("could not find master node")
|
||||
}
|
||||
masterInstanceID := aws.StringValue(master.InstanceId)
|
||||
glog.Infof("Found master: %q", masterInstanceID)
|
||||
|
||||
k8s.MasterMachineType = aws.StringValue(master.InstanceType)
|
||||
|
||||
masterSubnetID := aws.StringValue(master.SubnetId)
|
||||
|
||||
subnets, err := DescribeSubnets(x.Cloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding subnets: %v", err)
|
||||
}
|
||||
var masterSubnet *ec2.Subnet
|
||||
for _, s := range subnets {
|
||||
if masterSubnetID == aws.StringValue(s.SubnetId) {
|
||||
if masterSubnet != nil {
|
||||
return fmt.Errorf("found duplicate subnet")
|
||||
}
|
||||
masterSubnet = s
|
||||
}
|
||||
}
|
||||
if masterSubnet == nil {
|
||||
return fmt.Errorf("cannot find subnet %q", masterSubnetID)
|
||||
}
|
||||
|
||||
vpcID := aws.StringValue(master.VpcId)
|
||||
k8s.NetworkID = vpcID
|
||||
|
||||
az := aws.StringValue(masterSubnet.AvailabilityZone)
|
||||
k8s.MasterZones = []string{az}
|
||||
k8s.NodeZones = append(k8s.NodeZones, &cloudup.ZoneConfig{
|
||||
Name: az,
|
||||
//CIDR: aws.StringValue(masterSubnet.CidrBlock),
|
||||
})
|
||||
|
||||
userData, err := GetInstanceUserData(awsCloud, aws.StringValue(master.InstanceId))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting master user-data: %v", err)
|
||||
}
|
||||
|
||||
conf, err := ParseUserDataConfiguration(userData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing master user-data: %v", err)
|
||||
}
|
||||
|
||||
//master := &NodeSSH{
|
||||
// Hostname: c.Master,
|
||||
//}
|
||||
//err := master.AddSSHIdentity(c.SSHIdentity)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//
|
||||
//fmt.Printf("Connecting to node on %s\n", c.Node)
|
||||
//
|
||||
//node := &NodeSSH{
|
||||
// Hostname: c.Node,
|
||||
//}
|
||||
//err = node.AddSSHIdentity(c.SSHIdentity)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
instancePrefix := conf.Settings["INSTANCE_PREFIX"]
|
||||
if instancePrefix == "" {
|
||||
return fmt.Errorf("cannot determine INSTANCE_PREFIX")
|
||||
}
|
||||
if instancePrefix != clusterName {
|
||||
return fmt.Errorf("INSTANCE_PREFIX %q did not match cluster name %q", instancePrefix, clusterName)
|
||||
}
|
||||
|
||||
k8s.NodeMachineType = k8s.MasterMachineType
|
||||
|
||||
//k8s.NodeMachineType, err = InstanceType(node)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot determine node instance type: %v", err)
|
||||
//}
|
||||
|
||||
// We want to upgrade!
|
||||
// k8s.ImageId = ""
|
||||
|
||||
k8s.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
|
||||
k8s.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
|
||||
k8s.KubeUser = conf.Settings["KUBE_USER"]
|
||||
k8s.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
|
||||
k8s.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
|
||||
k8s.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
|
||||
k8s.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
|
||||
k8s.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
|
||||
k8s.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
|
||||
}
|
||||
k8s.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
|
||||
k8s.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
|
||||
k8s.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
||||
}
|
||||
k8s.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
k8s.DNSDomain = conf.Settings["DNS_DOMAIN"]
|
||||
k8s.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
||||
k8s.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
||||
k8s.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
k8s.DockerStorage = conf.Settings["DOCKER_STORAGE"]
|
||||
//k8s.MasterExtraSans = conf.Settings["MASTER_EXTRA_SANS"] // Not user set
|
||||
k8s.NodeCount, err = parseInt(conf.Settings["NUM_MINIONS"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse NUM_MINIONS=%q: %v", conf.Settings["NUM_MINIONS"], err)
|
||||
}
|
||||
|
||||
if conf.Version == "1.1" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
if k8s.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
|
||||
// More admission controllers in 1.2
|
||||
k8s.AdmissionControl = ""
|
||||
}
|
||||
if k8s.MasterMachineType == "t2.micro" {
|
||||
// Different defaults in 1.2
|
||||
k8s.MasterMachineType = ""
|
||||
}
|
||||
if k8s.NodeMachineType == "t2.micro" {
|
||||
// Encourage users to pick something better...
|
||||
k8s.NodeMachineType = ""
|
||||
}
|
||||
}
|
||||
if conf.Version == "1.2" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
if k8s.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
|
||||
// More admission controllers in 1.2
|
||||
k8s.AdmissionControl = ""
|
||||
}
|
||||
}
|
||||
|
||||
//if masterInstance.PublicIpAddress != nil {
|
||||
// eip, err := findElasticIP(cloud, *masterInstance.PublicIpAddress)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if eip != nil {
|
||||
// k8s.MasterElasticIP = masterInstance.PublicIpAddress
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//vpc, err := cloud.DescribeVPC(*k8s.VPCID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//k8s.DHCPOptionsID = vpc.DhcpOptionsId
|
||||
//
|
||||
//igw, err := findInternetGateway(cloud, *k8s.VPCID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//if igw == nil {
|
||||
// return fmt.Errorf("unable to find internet gateway for VPC %q", k8s.VPCID)
|
||||
//}
|
||||
//k8s.InternetGatewayID = igw.InternetGatewayId
|
||||
//
|
||||
//rt, err := findRouteTable(cloud, *k8s.SubnetID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//if rt == nil {
|
||||
// return fmt.Errorf("unable to find route table for Subnet %q", k8s.SubnetID)
|
||||
//}
|
||||
//k8s.RouteTableID = rt.RouteTableId
|
||||
|
||||
//b.Context = "aws_" + instancePrefix
|
||||
|
||||
keyStore := x.StateStore.CA()
|
||||
|
||||
//caCert, err := masterSSH.Join("ca.crt").ReadFile()
|
||||
caCert, err := conf.ParseCert("CA_CERT")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = keyStore.AddCert(fi.CertificateId_CA, caCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
////masterKey, err := masterSSH.Join("server.key").ReadFile()
|
||||
//masterKey, err := conf.ParseKey("MASTER_KEY")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
////masterCert, err := masterSSH.Join("server.cert").ReadFile()
|
||||
//masterCert, err := conf.ParseCert("MASTER_CERT")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("master", masterKey, masterCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
////kubeletKey, err := kubeletSSH.Join("kubelet.key").ReadFile()
|
||||
//kubeletKey, err := conf.ParseKey("KUBELET_KEY")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
////kubeletCert, err := kubeletSSH.Join("kubelet.cert").ReadFile()
|
||||
//kubeletCert, err := conf.ParseCert("KUBELET_CERT")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("kubelet", kubeletKey, kubeletCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
// We don't store the kubecfg key
|
||||
//kubecfgKey, err := masterSSH.Join("kubecfg.key").ReadFile()
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//kubecfgCert, err := masterSSH.Join("kubecfg.cert").ReadFile()
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("kubecfg", kubecfgKey, kubecfgCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
// We will generate new tokens...
|
||||
//secretStore := x.StateStore.Secrets()
|
||||
//kubePassword := conf.Settings["KUBE_PASSWORD"]
|
||||
//kubeletToken = conf.Settings["KUBELET_TOKEN"]
|
||||
//kubeProxyToken = conf.Settings["KUBE_PROXY_TOKEN"]
|
||||
|
||||
err = x.StateStore.WriteConfig(k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInt(s string) (int, error) {
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(n), nil
|
||||
}
|
||||
|
||||
//func writeConf(p string, k8s *cloudup.CloudConfig) error {
|
||||
// jsonBytes, err := json.Marshal(k8s)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (json write phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// var confObj interface{}
|
||||
// err = yaml.Unmarshal(jsonBytes, &confObj)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (yaml read phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// m := confObj.(map[interface{}]interface{})
|
||||
//
|
||||
// for k, v := range m {
|
||||
// if v == nil {
|
||||
// delete(m, k)
|
||||
// }
|
||||
// s, ok := v.(string)
|
||||
// if ok && s == "" {
|
||||
// delete(m, k)
|
||||
// }
|
||||
// //glog.Infof("%v=%v", k, v)
|
||||
// }
|
||||
//
|
||||
// yaml, err := yaml.Marshal(confObj)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (yaml write phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// err = ioutil.WriteFile(p, yaml, 0600)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error writing configuration to file %q: %v", p, err)
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func findInternetGateway(cloud *awsup.AWSCloud, vpcID string) (*ec2.InternetGateway, error) {
|
||||
// request := &ec2.DescribeInternetGatewaysInput{
|
||||
// Filters: []*ec2.Filter{fi.NewEC2Filter("attachment.vpc-id", vpcID)},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeInternetGateways(request)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error listing InternetGateways: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.InternetGateways) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.InternetGateways) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple InternetGatewayAttachments to VPC")
|
||||
// }
|
||||
// igw := response.InternetGateways[0]
|
||||
// return igw, nil
|
||||
//}
|
||||
|
||||
//func findRouteTable(cloud *awsup.AWSCloud, subnetID string) (*ec2.RouteTable, error) {
|
||||
// request := &ec2.DescribeRouteTablesInput{
|
||||
// Filters: []*ec2.Filter{fi.NewEC2Filter("association.subnet-id", subnetID)},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeRouteTables(request)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error listing RouteTables: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.RouteTables) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.RouteTables) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple RouteTables matching tags")
|
||||
// }
|
||||
// rt := response.RouteTables[0]
|
||||
// return rt, nil
|
||||
//}
|
||||
//
|
||||
//func findElasticIP(cloud *awsup.AWSCloud, publicIP string) (*ec2.Address, error) {
|
||||
// request := &ec2.DescribeAddressesInput{
|
||||
// PublicIps: []*string{&publicIP},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeAddresses(request)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// if awsErr.Code() == "InvalidAddress.NotFound" {
|
||||
// return nil, nil
|
||||
// }
|
||||
// }
|
||||
// return nil, fmt.Errorf("error listing Addresses: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.Addresses) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.Addresses) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple Addresses matching IP %q", publicIP)
|
||||
// }
|
||||
// return response.Addresses[0], nil
|
||||
//}
|
||||
|
||||
func findInstances(c *awsup.AWSCloud) ([]*ec2.Instance, error) {
|
||||
filters := c.BuildFilters(nil)
|
||||
|
||||
request := &ec2.DescribeInstancesInput{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Querying EC2 instances")
|
||||
|
||||
var instances []*ec2.Instance
|
||||
|
||||
err := c.EC2.DescribeInstancesPages(request, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
|
||||
for _, reservation := range p.Reservations {
|
||||
for _, instance := range reservation.Instances {
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error describing instances: %v", err)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
//func GetMetadata(t *NodeSSH, key string) (string, error) {
|
||||
// b, err := t.exec("curl -s http://169.254.169.254/latest/meta-data/" + key)
|
||||
// if err != nil {
|
||||
// return "", fmt.Errorf("error querying for metadata %q: %v", key, err)
|
||||
// }
|
||||
// return string(b), nil
|
||||
//}
|
||||
//
|
||||
//func InstanceType(t *NodeSSH) (string, error) {
|
||||
// return GetMetadata(t, "instance-type")
|
||||
//}
|
||||
//
|
||||
//func GetMetadataList(t *NodeSSH, key string) ([]string, error) {
|
||||
// d, err := GetMetadata(t, key)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var macs []string
|
||||
// for _, line := range strings.Split(d, "\n") {
|
||||
// mac := line
|
||||
// mac = strings.Trim(mac, "/")
|
||||
// mac = strings.TrimSpace(mac)
|
||||
// if mac == "" {
|
||||
// continue
|
||||
// }
|
||||
// macs = append(macs, mac)
|
||||
// }
|
||||
//
|
||||
// return macs, nil
|
||||
//}
|
||||
|
||||
// Fetch instance UserData
|
||||
func GetInstanceUserData(cloud *awsup.AWSCloud, instanceID string) ([]byte, error) {
|
||||
request := &ec2.DescribeInstanceAttributeInput{}
|
||||
request.InstanceId = aws.String(instanceID)
|
||||
request.Attribute = aws.String("userData")
|
||||
response, err := cloud.EC2.DescribeInstanceAttribute(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying EC2 for user metadata for instance %q: %v", instanceID)
|
||||
}
|
||||
if response.UserData != nil {
|
||||
b, err := base64.StdEncoding.DecodeString(aws.StringValue(response.UserData.Value))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding EC2 UserData: %v", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type UserDataConfiguration struct {
|
||||
Version string
|
||||
Settings map[string]string
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseBool(key string) *bool {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
s = strings.ToLower(s)
|
||||
if s == "true" || s == "1" || s == "y" || s == "yes" || s == "t" {
|
||||
return fi.Bool(true)
|
||||
}
|
||||
return fi.Bool(false)
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseCert(key string) (*fi.Certificate, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding base64 certificate %q: %v", key, err)
|
||||
}
|
||||
cert, err := fi.LoadPEMCertificate(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate %q: %v", key, err)
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseKey(key string) (*fi.PrivateKey, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding base64 private key %q: %v", key, err)
|
||||
}
|
||||
k, err := fi.ParsePEMPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing private key %q: %v", key, err)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func ParseUserDataConfiguration(raw []byte) (*UserDataConfiguration, error) {
|
||||
userData, err := UserDataToString(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
settings := make(map[string]string)
|
||||
|
||||
version := ""
|
||||
if strings.Contains(userData, "install-salt master") || strings.Contains(userData, "dpkg -s salt-minion") {
|
||||
version = "1.1"
|
||||
} else {
|
||||
version = "1.2"
|
||||
}
|
||||
if version == "1.1" {
|
||||
for _, line := range strings.Split(userData, "\n") {
|
||||
if !strings.HasPrefix(line, "readonly ") {
|
||||
continue
|
||||
}
|
||||
line = line[9:]
|
||||
sep := strings.Index(line, "=")
|
||||
k := ""
|
||||
v := ""
|
||||
if sep != -1 {
|
||||
k = line[0:sep]
|
||||
v = line[sep+1:]
|
||||
}
|
||||
|
||||
if k == "" {
|
||||
glog.V(4).Infof("Unknown line: %s", line)
|
||||
}
|
||||
|
||||
if len(v) >= 2 && v[0] == '\'' && v[len(v)-1] == '\'' {
|
||||
v = v[1 : len(v)-1]
|
||||
}
|
||||
settings[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, line := range strings.Split(userData, "\n") {
|
||||
sep := strings.Index(line, ": ")
|
||||
k := ""
|
||||
v := ""
|
||||
if sep != -1 {
|
||||
k = line[0:sep]
|
||||
v = line[sep+2:]
|
||||
}
|
||||
|
||||
if k == "" {
|
||||
glog.V(4).Infof("Unknown line: %s", line)
|
||||
}
|
||||
|
||||
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
|
||||
v = v[1 : len(v)-1]
|
||||
} else if len(v) >= 2 && v[0] == '\'' && v[len(v)-1] == '\'' {
|
||||
v = v[1 : len(v)-1]
|
||||
}
|
||||
settings[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
c := &UserDataConfiguration{
|
||||
Version: version,
|
||||
Settings: settings,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func UserDataToString(userData []byte) (string, error) {
|
||||
var err error
|
||||
if len(userData) > 2 && userData[0] == 31 && userData[1] == 139 {
|
||||
// GZIP
|
||||
glog.V(2).Infof("gzip data detected; will decompress")
|
||||
|
||||
userData, err = gunzipBytes(userData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error decompressing user data: %v", err)
|
||||
}
|
||||
}
|
||||
return string(userData), nil
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
package kutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
)
|
||||
|
||||
// UpgradeCluster performs an upgrade of a k8s cluster
|
||||
type UpgradeCluster struct {
|
||||
OldClusterName string
|
||||
NewClusterName string
|
||||
Cloud fi.Cloud
|
||||
|
||||
StateStore fi.StateStore
|
||||
|
||||
Config *cloudup.CloudConfig
|
||||
}
|
||||
|
||||
func (x *UpgradeCluster) Upgrade() error {
|
||||
awsCloud := x.Cloud.(*awsup.AWSCloud)
|
||||
|
||||
config := x.Config
|
||||
|
||||
newClusterName := x.NewClusterName
|
||||
if newClusterName == "" {
|
||||
return fmt.Errorf("NewClusterName must be specified")
|
||||
}
|
||||
oldClusterName := x.OldClusterName
|
||||
if oldClusterName == "" {
|
||||
return fmt.Errorf("OldClusterName must be specified")
|
||||
}
|
||||
|
||||
newTags := awsCloud.Tags()
|
||||
newTags["KubernetesCluster"] = newClusterName
|
||||
|
||||
// Try to pre-query as much as possible before doing anything destructive
|
||||
instances, err := findInstances(awsCloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding instances: %v", err)
|
||||
}
|
||||
|
||||
volumes, err := DescribeVolumes(x.Cloud)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dhcpOptions, err := DescribeDhcpOptions(x.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find masters
|
||||
var masters []*ec2.Instance
|
||||
for _, instance := range instances {
|
||||
role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
|
||||
if role == oldClusterName+"-master" {
|
||||
masters = append(masters, instance)
|
||||
}
|
||||
}
|
||||
if len(masters) == 0 {
|
||||
return fmt.Errorf("could not find masters")
|
||||
}
|
||||
|
||||
// Stop masters
|
||||
for _, master := range masters {
|
||||
masterInstanceID := aws.StringValue(master.InstanceId)
|
||||
glog.Infof("Stopping master: %q", masterInstanceID)
|
||||
|
||||
request := &ec2.StopInstancesInput{
|
||||
InstanceIds: []*string{master.InstanceId},
|
||||
}
|
||||
|
||||
_, err := awsCloud.EC2.StopInstances(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error stopping master instance: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Wait for master to stop & detach volumes?
|
||||
|
||||
//subnets, err := DescribeSubnets(x.Cloud)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("error finding subnets: %v", err)
|
||||
//}
|
||||
//for _, s := range subnets {
|
||||
// id := aws.StringValue(s.SubnetId)
|
||||
// err := awsCloud.AddAWSTags(id, newTags)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error re-tagging subnet %q: %v", id, err)
|
||||
// }
|
||||
//}
|
||||
|
||||
// Retag VPC
|
||||
// We have to be careful because VPCs can be shared
|
||||
{
|
||||
vpcID := config.NetworkID
|
||||
if vpcID != "" {
|
||||
tags, err := awsCloud.GetTags(vpcID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting VPC tags: %v", err)
|
||||
}
|
||||
|
||||
clusterTag := tags[awsup.TagClusterName]
|
||||
if clusterTag != "" {
|
||||
if clusterTag != oldClusterName {
|
||||
return fmt.Errorf("VPC is tagged with a different cluster: %v", clusterTag)
|
||||
}
|
||||
replaceTags := make(map[string]string)
|
||||
replaceTags[awsup.TagClusterName] = newClusterName
|
||||
err := awsCloud.CreateTags(vpcID, replaceTags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error re-tagging VPC: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Retag DHCP options
|
||||
// We have to be careful because DHCP options can be shared
|
||||
for _, dhcpOption := range dhcpOptions {
|
||||
clusterTag := dhcpOption.Tags[awsup.TagClusterName]
|
||||
if clusterTag != "" {
|
||||
if clusterTag != oldClusterName {
|
||||
return fmt.Errorf("DHCP options are tagged with a different cluster: %v", clusterTag)
|
||||
}
|
||||
replaceTags := make(map[string]string)
|
||||
replaceTags[awsup.TagClusterName] = newClusterName
|
||||
err := awsCloud.CreateTags(*dhcpOption.DhcpOptionsId, replaceTags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error re-tagging DHCP options: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO: Retag internet gateway (may not be tagged at all though...)
|
||||
// TODO: Share more code with cluste deletion?
|
||||
|
||||
// TODO: Adopt LoadBalancers & LoadBalancer Security Groups
|
||||
|
||||
// Adopt Volumes
|
||||
for _, volume := range volumes {
|
||||
id := aws.StringValue(volume.VolumeId)
|
||||
|
||||
// TODO: Batch re-tag?
|
||||
replaceTags := make(map[string]string)
|
||||
replaceTags[awsup.TagClusterName] = newClusterName
|
||||
|
||||
name, _ := awsup.FindEC2Tag(volume.Tags, "Name")
|
||||
if name == oldClusterName+"-master-pd" {
|
||||
glog.Infof("Found master volume %q: %s", id, name)
|
||||
replaceTags["Name"] = "kubernetes.master." + aws.StringValue(volume.AvailabilityZone) + "." + newClusterName
|
||||
}
|
||||
err := awsCloud.CreateTags(id, replaceTags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error re-tagging volume %q: %v", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
config.ClusterName = newClusterName
|
||||
err = x.StateStore.WriteConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing updated configuration: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -25,38 +25,43 @@ func findAutoscalingGroups(cloud *awsup.AWSCloud, tags map[string]string) ([]*au
|
|||
request := &autoscaling.DescribeTagsInput{
|
||||
Filters: asFilters,
|
||||
}
|
||||
response, err := cloud.Autoscaling.DescribeTags(request)
|
||||
|
||||
err := cloud.Autoscaling.DescribeTagsPages(request, func(p *autoscaling.DescribeTagsOutput, lastPage bool) bool {
|
||||
for _, t := range p.Tags {
|
||||
switch *t.ResourceType {
|
||||
case "auto-scaling-group":
|
||||
asgNames = append(asgNames, t.ResourceId)
|
||||
default:
|
||||
glog.Warningf("Unknown resource type: %v", *t.ResourceType)
|
||||
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing autoscaling cluster tags: %v", err)
|
||||
}
|
||||
|
||||
for _, t := range response.Tags {
|
||||
switch *t.ResourceType {
|
||||
case "auto-scaling-group":
|
||||
asgNames = append(asgNames, t.ResourceId)
|
||||
default:
|
||||
glog.Warningf("Unknown resource type: %v", *t.ResourceType)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(asgNames) != 0 {
|
||||
request := &autoscaling.DescribeAutoScalingGroupsInput{
|
||||
AutoScalingGroupNames: asgNames,
|
||||
}
|
||||
response, err := cloud.Autoscaling.DescribeAutoScalingGroups(request)
|
||||
err := cloud.Autoscaling.DescribeAutoScalingGroupsPages(request, func(p *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool {
|
||||
for _, asg := range p.AutoScalingGroups {
|
||||
if !matchesAsgTags(tags, asg.Tags) {
|
||||
// We used an inexact filter above
|
||||
continue
|
||||
}
|
||||
asgs = append(asgs, asg)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing autoscaling groups: %v", err)
|
||||
}
|
||||
|
||||
for _, asg := range response.AutoScalingGroups {
|
||||
if !matchesAsgTags(tags, asg.Tags) {
|
||||
// We used an inexact filter above
|
||||
continue
|
||||
}
|
||||
asgs = append(asgs, asg)
|
||||
}
|
||||
}
|
||||
|
||||
return asgs, nil
|
||||
|
|
Loading…
Reference in New Issue