mirror of https://github.com/kubernetes/kops.git
support multiple zones in `kops import`
Seems to be less special-cased also. Fix #216
This commit is contained in:
parent
e3d6231fbe
commit
e3e58f605e
|
@ -14,9 +14,10 @@ import (
|
|||
)
|
||||
|
||||
type DeleteClusterCmd struct {
|
||||
Yes bool
|
||||
Region string
|
||||
External bool
|
||||
Yes bool
|
||||
Region string
|
||||
External bool
|
||||
Unregister bool
|
||||
}
|
||||
|
||||
var deleteCluster DeleteClusterCmd
|
||||
|
@ -37,7 +38,7 @@ func init() {
|
|||
deleteCmd.AddCommand(cmd)
|
||||
|
||||
cmd.Flags().BoolVar(&deleteCluster.Yes, "yes", false, "Delete without confirmation")
|
||||
|
||||
cmd.Flags().BoolVar(&deleteCluster.Unregister, "unregister", false, "Don't delete cloud resources, just unregister the cluster")
|
||||
cmd.Flags().BoolVar(&deleteCluster.External, "external", false, "Delete an external cluster")
|
||||
|
||||
cmd.Flags().StringVar(&deleteCluster.Region, "region", "", "region")
|
||||
|
@ -53,33 +54,32 @@ func (c *DeleteClusterCmd) Run(args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
clusterName := rootCommand.clusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("--name is required (when --external)")
|
||||
}
|
||||
|
||||
var cloud fi.Cloud
|
||||
clusterName := ""
|
||||
region := ""
|
||||
var cluster *api.Cluster
|
||||
|
||||
if c.External {
|
||||
region = c.Region
|
||||
region := c.Region
|
||||
if region == "" {
|
||||
return fmt.Errorf("--region is required (when --external)")
|
||||
}
|
||||
clusterName = rootCommand.clusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("--name is required (when --external)")
|
||||
}
|
||||
|
||||
tags := map[string]string{"KubernetesCluster": clusterName}
|
||||
cloud, err = awsup.NewAWSCloud(c.Region, tags)
|
||||
cloud, err = awsup.NewAWSCloud(region, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||
}
|
||||
} else {
|
||||
clusterName = rootCommand.clusterName
|
||||
|
||||
clusterRegistry, err = rootCommand.ClusterRegistry()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster, err := clusterRegistry.Find(clusterName)
|
||||
cluster, err = clusterRegistry.Find(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -91,57 +91,60 @@ func (c *DeleteClusterCmd) Run(args []string) error {
|
|||
if clusterName != cluster.Name {
|
||||
return fmt.Errorf("sanity check failed: cluster name mismatch")
|
||||
}
|
||||
|
||||
cloud, err = cloudup.BuildCloud(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d := &kutil.DeleteCluster{}
|
||||
d.ClusterName = clusterName
|
||||
d.Region = region
|
||||
d.Cloud = cloud
|
||||
|
||||
resources, err := d.ListResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resources) == 0 {
|
||||
fmt.Printf("No resources to delete\n")
|
||||
} else {
|
||||
t := &Table{}
|
||||
t.AddColumn("TYPE", func(r *kutil.ResourceTracker) string {
|
||||
return r.Type
|
||||
})
|
||||
t.AddColumn("ID", func(r *kutil.ResourceTracker) string {
|
||||
return r.ID
|
||||
})
|
||||
t.AddColumn("NAME", func(r *kutil.ResourceTracker) string {
|
||||
return r.Name
|
||||
})
|
||||
var l []*kutil.ResourceTracker
|
||||
for _, v := range resources {
|
||||
l = append(l, v)
|
||||
if !c.Unregister {
|
||||
if cloud == nil {
|
||||
cloud, err = cloudup.BuildCloud(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := t.Render(l, os.Stdout, "TYPE", "NAME", "ID")
|
||||
d := &kutil.DeleteCluster{}
|
||||
d.ClusterName = clusterName
|
||||
d.Cloud = cloud
|
||||
|
||||
resources, err := d.ListResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.Yes {
|
||||
return fmt.Errorf("Must specify --yes to delete")
|
||||
}
|
||||
if len(resources) == 0 {
|
||||
fmt.Printf("No resources to delete\n")
|
||||
} else {
|
||||
t := &Table{}
|
||||
t.AddColumn("TYPE", func(r *kutil.ResourceTracker) string {
|
||||
return r.Type
|
||||
})
|
||||
t.AddColumn("ID", func(r *kutil.ResourceTracker) string {
|
||||
return r.ID
|
||||
})
|
||||
t.AddColumn("NAME", func(r *kutil.ResourceTracker) string {
|
||||
return r.Name
|
||||
})
|
||||
var l []*kutil.ResourceTracker
|
||||
for _, v := range resources {
|
||||
l = append(l, v)
|
||||
}
|
||||
|
||||
err = d.DeleteResources(resources)
|
||||
if err != nil {
|
||||
return err
|
||||
err := t.Render(l, os.Stdout, "TYPE", "NAME", "ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.Yes {
|
||||
return fmt.Errorf("Must specify --yes to delete")
|
||||
}
|
||||
|
||||
err = d.DeleteResources(resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if clusterRegistry != nil {
|
||||
if !c.External {
|
||||
if !c.Yes {
|
||||
fmt.Printf("\nMust specify --yes to delete\n")
|
||||
return nil
|
||||
|
|
|
@ -53,6 +53,14 @@ func (c *ImportClusterCmd) Run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
cluster, err := clusterRegistry.Find(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cluster != nil {
|
||||
return fmt.Errorf("cluster already exists: %q", clusterName)
|
||||
}
|
||||
|
||||
d := &kutil.ImportCluster{}
|
||||
d.ClusterName = clusterName
|
||||
d.Cloud = cloud
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
# Upgrading from k8s 1.2
|
||||
# Upgrading from kubernetes 1.2 to kubernetes 1.3
|
||||
|
||||
Kops let you upgrade an existing 1.2 cluster, installed using kube-up, to a cluster managed by
|
||||
kops running kubernetes version 1.3.
|
||||
|
||||
** This is an experimental and slightly risky procedure, so we recommend backing up important data before proceeding.
|
||||
Take a snapshot of your EBS volumes; export all your data from kubectl etc. **
|
||||
|
@ -11,7 +14,7 @@ Limitations:
|
|||
|
||||
## Overview
|
||||
|
||||
There are a few steps:
|
||||
There are a few steps to upgrade a kubernetes cluster from 1.2 to 1.3:
|
||||
|
||||
* First you import the existing cluster state, so you can see and edit the configuration
|
||||
* You verify the cluster configuration
|
||||
|
@ -143,7 +146,3 @@ instance kubernetes-master i-67af2ec8
|
|||
And once you've confirmed it looks right, run with `--yes`
|
||||
|
||||
You will also need to release the old ElasticIP manually.
|
||||
|
||||
Note that there is an issue in EC2/ELB: it seems that the NetworkInterfaces for the ELB aren't immediately deleted,
|
||||
and this prevents full teardown of the old resources (the subnet in particular). A workaround is to delete
|
||||
the "Network Interfaces" for the old ELB subnet in the AWS console.
|
||||
|
|
|
@ -33,7 +33,6 @@ const (
|
|||
// (e.g. ELB dependencies).
|
||||
type DeleteCluster struct {
|
||||
ClusterName string
|
||||
Region string
|
||||
Cloud fi.Cloud
|
||||
}
|
||||
|
||||
|
|
|
@ -50,18 +50,35 @@ func (x *ImportCluster) ImportAWSCluster() error {
|
|||
}
|
||||
|
||||
var masterInstance *ec2.Instance
|
||||
zones := make(map[string]*api.ClusterZoneSpec)
|
||||
|
||||
for _, instance := range instances {
|
||||
instanceState := aws.StringValue(instance.State.Name)
|
||||
|
||||
if instanceState != "terminated" && instance.Placement != nil {
|
||||
zoneName := aws.StringValue(instance.Placement.AvailabilityZone)
|
||||
zone := zones[zoneName]
|
||||
if zone == nil {
|
||||
zone = &api.ClusterZoneSpec{Name: zoneName}
|
||||
zones[zoneName] = zone
|
||||
}
|
||||
|
||||
subnet := aws.StringValue(instance.SubnetId)
|
||||
if subnet != "" {
|
||||
zone.ProviderID = subnet
|
||||
}
|
||||
}
|
||||
|
||||
role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
|
||||
if role == clusterName+"-master" {
|
||||
if masterInstance != nil {
|
||||
masterState := aws.StringValue(masterInstance.State.Name)
|
||||
thisState := aws.StringValue(instance.State.Name)
|
||||
|
||||
glog.Infof("Found multiple masters: %s and %s", masterState, thisState)
|
||||
glog.Infof("Found multiple masters: %s and %s", masterState, instanceState)
|
||||
|
||||
if masterState == "terminated" && thisState != "terminated" {
|
||||
if masterState == "terminated" && instanceState != "terminated" {
|
||||
// OK
|
||||
} else if thisState == "terminated" && masterState != "terminated" {
|
||||
} else if instanceState == "terminated" && masterState != "terminated" {
|
||||
// Ignore this one
|
||||
continue
|
||||
} else {
|
||||
|
@ -79,23 +96,33 @@ func (x *ImportCluster) ImportAWSCluster() error {
|
|||
|
||||
masterGroup.Spec.MachineType = aws.StringValue(masterInstance.InstanceType)
|
||||
|
||||
masterSubnetID := aws.StringValue(masterInstance.SubnetId)
|
||||
|
||||
subnets, err := DescribeSubnets(x.Cloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding subnets: %v", err)
|
||||
}
|
||||
var masterSubnet *ec2.Subnet
|
||||
|
||||
for _, s := range subnets {
|
||||
if masterSubnetID == aws.StringValue(s.SubnetId) {
|
||||
if masterSubnet != nil {
|
||||
return fmt.Errorf("found duplicate subnet")
|
||||
subnetID := aws.StringValue(s.SubnetId)
|
||||
|
||||
found := false
|
||||
for _, zone := range zones {
|
||||
if zone.ProviderID == subnetID {
|
||||
zone.CIDR = aws.StringValue(s.CidrBlock)
|
||||
found = true
|
||||
}
|
||||
masterSubnet = s
|
||||
}
|
||||
|
||||
if !found {
|
||||
glog.Warningf("Ignoring subnet %q in which no instances were found", subnetID)
|
||||
}
|
||||
}
|
||||
if masterSubnet == nil {
|
||||
return fmt.Errorf("cannot find subnet %q. If you used an existing subnet, please tag it with %s=%s and retry the import", masterSubnetID, awsup.TagClusterName, clusterName)
|
||||
for k, zone := range zones {
|
||||
if zone.ProviderID == "" {
|
||||
return fmt.Errorf("cannot find subnet %q. Please report this issue", k)
|
||||
}
|
||||
if zone.CIDR == "" {
|
||||
return fmt.Errorf("cannot find subnet %q. If you used an existing subnet, please tag it with %s=%s and retry the import", zone.ProviderID, awsup.TagClusterName, clusterName)
|
||||
}
|
||||
}
|
||||
|
||||
vpcID := aws.StringValue(masterInstance.VpcId)
|
||||
|
@ -110,18 +137,18 @@ func (x *ImportCluster) ImportAWSCluster() error {
|
|||
}
|
||||
}
|
||||
|
||||
az := aws.StringValue(masterSubnet.AvailabilityZone)
|
||||
|
||||
cluster.Spec.NetworkID = vpcID
|
||||
cluster.Spec.NetworkCIDR = aws.StringValue(vpc.CidrBlock)
|
||||
cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
|
||||
Name: az,
|
||||
CIDR: aws.StringValue(masterSubnet.CidrBlock),
|
||||
ProviderID: aws.StringValue(masterSubnet.SubnetId),
|
||||
})
|
||||
for _, zone := range zones {
|
||||
cluster.Spec.Zones = append(cluster.Spec.Zones, zone)
|
||||
}
|
||||
|
||||
masterGroup.Spec.Zones = []string{az}
|
||||
masterGroup.Name = "master-" + az
|
||||
masterZone := zones[aws.StringValue(masterInstance.Placement.AvailabilityZone)]
|
||||
if masterZone == nil {
|
||||
return fmt.Errorf("cannot find zone %q for master. Please report this issue", aws.StringValue(masterInstance.Placement.AvailabilityZone))
|
||||
}
|
||||
masterGroup.Spec.Zones = []string{masterZone.Name}
|
||||
masterGroup.Name = "master-" + masterZone.Name
|
||||
|
||||
userData, err := GetInstanceUserData(awsCloud, aws.StringValue(masterInstance.InstanceId))
|
||||
if err != nil {
|
||||
|
@ -198,7 +225,9 @@ func (x *ImportCluster) ImportAWSCluster() error {
|
|||
nodeGroup := &api.InstanceGroup{}
|
||||
nodeGroup.Spec.Role = api.InstanceGroupRoleNode
|
||||
nodeGroup.Name = "nodes"
|
||||
nodeGroup.Spec.Zones = []string{az}
|
||||
for _, zone := range zones {
|
||||
nodeGroup.Spec.Zones = append(nodeGroup.Spec.Zones, zone.Name)
|
||||
}
|
||||
instanceGroups = append(instanceGroups, nodeGroup)
|
||||
|
||||
//primaryNodeSet.Spec.MinSize, err = conf.ParseInt("NUM_MINIONS")
|
||||
|
|
Loading…
Reference in New Issue