mirror of https://github.com/kubernetes/kops.git
Merge pull request #100 from justinsb/upup_zones
upup: separate node & master zone configuration; validate
This commit is contained in:
commit
f9ac7af7df
|
|
@ -0,0 +1,110 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TODO: Refactor CreateClusterCmd into pkg/fi/cloudup
|
||||
|
||||
func buildDefaultCreateCluster() *CreateClusterCmd {
|
||||
var err error
|
||||
|
||||
c := &CreateClusterCmd{}
|
||||
|
||||
c.Config = &cloudup.CloudConfig{}
|
||||
c.Config.ClusterName = "testcluster.mydomain.com"
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.SSHPublicKey = "~/.ssh/id_rsa.pub"
|
||||
|
||||
c.Config.CloudProvider = "aws"
|
||||
|
||||
c.StateStore, err = fi.NewVFSStateStore(vfs.NewFSPath("test-state"))
|
||||
if err != nil {
|
||||
glog.Fatalf("error building state store: %v", err)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func expectErrorFromRun(t *testing.T, c *CreateClusterCmd, message string) {
|
||||
err := c.Run()
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error from run")
|
||||
}
|
||||
actualMessage := fmt.Sprintf("%v", err)
|
||||
if actualMessage != message {
|
||||
t.Fatalf("Expected error %q, got %q", message, actualMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateCluster_DuplicateZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1b"}
|
||||
c.Config.MasterZones = []string{"us-east-1a"}
|
||||
expectErrorFromRun(t, c, "NodeZones contained a duplicate value: us-east-1b")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoClusterName(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.ClusterName = ""
|
||||
expectErrorFromRun(t, c, "-name is required (e.g. mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoCloud(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.CloudProvider = ""
|
||||
expectErrorFromRun(t, c, "-cloud is required (e.g. aws, gce)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_ExtraMasterZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1c"}
|
||||
c.Config.MasterZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
expectErrorFromRun(t, c, "All MasterZones must (currently) also be NodeZones")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoMasterZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.MasterZones = []string{}
|
||||
expectErrorFromRun(t, c, "must specify at least one MasterZone")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoNodeZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{}
|
||||
expectErrorFromRun(t, c, "must specify at least one NodeZone")
|
||||
}
|
||||
|
||||
func TestCreateCluster_RegionAsZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
expectErrorFromRun(t, c, "Region is not a recognized EC2 region: \"us-east-\" (check you have specified valid zones?)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_BadZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1z"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
expectErrorFromRun(t, c, "Zone is not a recognized AZ: \"us-east-1z\" (check you have specified a valid zone?)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_MixedRegion(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-west-1a", "us-west-2b", "us-west-2c"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
expectErrorFromRun(t, c, "Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
func TestCreateCluster_EvenEtcdClusterSize(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
expectErrorFromRun(t, c, "There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zone and -master-zone to declare node zones and master zones separately.")
|
||||
}
|
||||
|
|
@ -39,10 +39,13 @@ func main() {
|
|||
// (we have plenty of reflection helpers if one isn't already available!)
|
||||
config := &cloudup.CloudConfig{}
|
||||
|
||||
zones := strings.Join(config.Zones, ",")
|
||||
|
||||
flag.StringVar(&config.CloudProvider, "cloud", config.CloudProvider, "Cloud provider to use - gce, aws")
|
||||
flag.StringVar(&zones, "zone", zones, "Cloud zone to target (warning - will be replaced by region)")
|
||||
|
||||
zones := ""
|
||||
flag.StringVar(&zones, "zones", "", "Zones in which to run nodes")
|
||||
masterZones := ""
|
||||
flag.StringVar(&zones, "master-zones", masterZones, "Zones in which to run masters (must be an odd number)")
|
||||
|
||||
flag.StringVar(&config.Project, "project", config.Project, "Project to use (must be set on GCE)")
|
||||
flag.StringVar(&config.ClusterName, "name", config.ClusterName, "Name for cluster")
|
||||
flag.StringVar(&config.KubernetesVersion, "kubernetes-version", config.KubernetesVersion, "Version of kubernetes to run (defaults to latest)")
|
||||
|
|
@ -59,10 +62,12 @@ func main() {
|
|||
|
||||
flag.Parse()
|
||||
|
||||
config.Zones = strings.Split(zones, ",")
|
||||
|
||||
config.MasterZones = config.Zones
|
||||
config.NodeZones = config.Zones
|
||||
config.NodeZones = parseZoneList(zones)
|
||||
if masterZones == "" {
|
||||
config.MasterZones = config.NodeZones
|
||||
} else {
|
||||
config.MasterZones = parseZoneList(masterZones)
|
||||
}
|
||||
|
||||
if nodeSize != "" {
|
||||
config.NodeMachineType = nodeSize
|
||||
|
|
@ -112,6 +117,19 @@ func main() {
|
|||
glog.Infof("Completed successfully")
|
||||
}
|
||||
|
||||
func parseZoneList(s string) []string {
|
||||
var filtered []string
|
||||
for _, v := range strings.Split(s, ",") {
|
||||
v = strings.TrimSpace(v)
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
v = strings.ToLower(v)
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
type CreateClusterCmd struct {
|
||||
// Config is the cluster configuration
|
||||
Config *cloudup.CloudConfig
|
||||
|
|
@ -149,6 +167,10 @@ func (c *CreateClusterCmd) Run() error {
|
|||
// We (currently) have to use protokube with ASGs
|
||||
useProtokube := useMasterASG
|
||||
|
||||
if c.Config.ClusterName == "" {
|
||||
return fmt.Errorf("-name is required (e.g. mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
if c.Config.MasterPublicName == "" {
|
||||
c.Config.MasterPublicName = "api." + c.Config.ClusterName
|
||||
}
|
||||
|
|
@ -157,12 +179,47 @@ func (c *CreateClusterCmd) Run() error {
|
|||
c.Config.DNSZone = strings.Join(tokens[len(tokens)-2:], ".")
|
||||
}
|
||||
|
||||
if len(c.Config.NodeZones) == 0 {
|
||||
return fmt.Errorf("must specify at least one NodeZone")
|
||||
}
|
||||
|
||||
if len(c.Config.MasterZones) == 0 {
|
||||
return fmt.Errorf("must specify at least one MasterZone")
|
||||
}
|
||||
|
||||
// Check for master zone duplicates
|
||||
{
|
||||
masterZones := make(map[string]bool)
|
||||
for _, z := range c.Config.MasterZones {
|
||||
if masterZones[z] {
|
||||
return fmt.Errorf("MasterZones contained a duplicate value: %v", z)
|
||||
}
|
||||
masterZones[z] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check for node zone duplicates
|
||||
{
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, z := range c.Config.NodeZones {
|
||||
if nodeZones[z] {
|
||||
return fmt.Errorf("NodeZones contained a duplicate value: %v", z)
|
||||
}
|
||||
nodeZones[z] = true
|
||||
}
|
||||
}
|
||||
|
||||
if (len(c.Config.MasterZones) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
return fmt.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zone and -master-zone to declare node zones and master zones separately.")
|
||||
}
|
||||
|
||||
if c.StateStore == nil {
|
||||
return fmt.Errorf("StateStore is required")
|
||||
}
|
||||
|
||||
if c.Config.CloudProvider == "" {
|
||||
return fmt.Errorf("must specify CloudProvider. Specify with -cloud")
|
||||
return fmt.Errorf("-cloud is required (e.g. aws, gce)")
|
||||
}
|
||||
|
||||
tags := make(map[string]struct{})
|
||||
|
|
@ -272,7 +329,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
|
||||
// For now a zone to be specified...
|
||||
// This will be replace with a region when we go full HA
|
||||
zone := c.Config.Zones[0]
|
||||
zone := c.Config.NodeZones[0]
|
||||
if zone == "" {
|
||||
return fmt.Errorf("Must specify a zone (use -zone)")
|
||||
}
|
||||
|
|
@ -340,15 +397,22 @@ func (c *CreateClusterCmd) Run() error {
|
|||
"dnsZone": &awstasks.DNSZone{},
|
||||
})
|
||||
|
||||
if len(c.Config.Zones) == 0 {
|
||||
if len(c.Config.NodeZones) == 0 {
|
||||
// TODO: Auto choose zones from region?
|
||||
return fmt.Errorf("Must specify a zone (use -zone)")
|
||||
}
|
||||
for _, zone := range c.Config.Zones {
|
||||
if len(c.Config.MasterZones) == 0 {
|
||||
return fmt.Errorf("Must specify a master zones")
|
||||
}
|
||||
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, zone := range c.Config.NodeZones {
|
||||
if len(zone) <= 2 {
|
||||
return fmt.Errorf("Invalid AWS zone: %q", zone)
|
||||
}
|
||||
|
||||
nodeZones[zone] = true
|
||||
|
||||
region = zone[:len(zone)-1]
|
||||
if c.Config.Region != "" && c.Config.Region != region {
|
||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||
|
|
@ -357,12 +421,20 @@ func (c *CreateClusterCmd) Run() error {
|
|||
c.Config.Region = region
|
||||
}
|
||||
|
||||
if c.SSHPublicKey == "" {
|
||||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
||||
for _, zone := range c.Config.MasterZones {
|
||||
if !nodeZones[zone] {
|
||||
// We could relax this, but this seems like a reasonable constraint
|
||||
return fmt.Errorf("All MasterZones must (currently) also be NodeZones")
|
||||
}
|
||||
}
|
||||
|
||||
if c.Config.ClusterName == "" {
|
||||
return fmt.Errorf("ClusterName is required for AWS")
|
||||
err := awsup.ValidateRegion(region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.SSHPublicKey == "" {
|
||||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
||||
}
|
||||
|
||||
cloudTags := map[string]string{"KubernetesCluster": c.Config.ClusterName}
|
||||
|
|
@ -371,6 +443,11 @@ func (c *CreateClusterCmd) Run() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = awsCloud.ValidateZones(c.Config.NodeZones)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cloud = awsCloud
|
||||
|
||||
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
|
||||
|
|
@ -422,14 +499,6 @@ func (c *CreateClusterCmd) Run() error {
|
|||
glog.Exitf("error building: %v", err)
|
||||
}
|
||||
|
||||
if c.Config.ClusterName == "" {
|
||||
return fmt.Errorf("ClusterName is required")
|
||||
}
|
||||
|
||||
if len(c.Config.Zones) == 0 {
|
||||
return fmt.Errorf("Zone is required")
|
||||
}
|
||||
|
||||
var target fi.Target
|
||||
|
||||
switch c.Target {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ routeTable/kubernetes.{{ .ClusterName }}:
|
|||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
|
||||
|
||||
{{ range $zone := .Zones }}
|
||||
{{ range $zone := .NodeZones }}
|
||||
|
||||
subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}:
|
||||
vpc: vpc/kubernetes.{{ $.ClusterName }}
|
||||
|
|
|
|||
|
|
@ -358,3 +358,38 @@ func (c *AWSCloud) ResolveImage(name string) (*ec2.Image, error) {
|
|||
image := response.Images[0]
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// ValidateZones checks that every zone in the sliced passed is recognized
|
||||
func (c *AWSCloud) ValidateZones(zones []string) error {
|
||||
glog.V(2).Infof("Querying EC2 for all valid zones in region")
|
||||
|
||||
request := &ec2.DescribeAvailabilityZonesInput{}
|
||||
response, err := c.EC2.DescribeAvailabilityZones(request)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Got an error while querying for valid AZs (verify your AWS credentials?)")
|
||||
}
|
||||
|
||||
zoneMap := make(map[string]*ec2.AvailabilityZone)
|
||||
for _, z := range response.AvailabilityZones {
|
||||
name := aws.StringValue(z.ZoneName)
|
||||
zoneMap[name] = z
|
||||
}
|
||||
|
||||
for _, zone := range zones {
|
||||
z := zoneMap[zone]
|
||||
if z == nil {
|
||||
return fmt.Errorf("Zone is not a recognized AZ: %q (check you have specified a valid zone?)", zone)
|
||||
}
|
||||
|
||||
for _, message := range z.Messages {
|
||||
glog.Warningf("Zone %q has message: %q", aws.StringValue(message.Message))
|
||||
}
|
||||
|
||||
if aws.StringValue(z.State) != "available" {
|
||||
glog.Warningf("Zone %q has state %q", aws.StringValue(z.State))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
package awsup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ValidateRegion checks that an AWS region name is valid
|
||||
func ValidateRegion(region string) error {
|
||||
glog.V(2).Infof("Querying EC2 for all valid regions")
|
||||
|
||||
request := &ec2.DescribeRegionsInput{}
|
||||
config := aws.NewConfig().WithRegion("us-east-1")
|
||||
client := ec2.New(session.New(), config)
|
||||
|
||||
response, err := client.DescribeRegions(request)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Got an error while querying for valid regions (verify your AWS credentials?)")
|
||||
}
|
||||
for _, r := range response.Regions {
|
||||
name := aws.StringValue(r.RegionName)
|
||||
if name == region {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if os.Getenv("SKIP_REGION_CHECK") != "" {
|
||||
glog.Infof("AWS region does not appear to be valid, but skipping because SKIP_REGION_CHECK is set")
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Region is not a recognized EC2 region: %q (check you have specified valid zones?)", region)
|
||||
}
|
||||
|
|
@ -19,7 +19,6 @@ type CloudConfig struct {
|
|||
NodeInit string `json:",omitempty"`
|
||||
|
||||
// Configuration of zones we are targeting
|
||||
Zones []string `json:",omitempty"`
|
||||
MasterZones []string `json:",omitempty"`
|
||||
NodeZones []string `json:",omitempty"`
|
||||
Region string `json:",omitempty"`
|
||||
|
|
@ -184,12 +183,11 @@ func (c *CloudConfig) WellKnownServiceIP(id int) (net.IP, error) {
|
|||
}
|
||||
|
||||
return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", c.ServiceClusterIPRange)
|
||||
|
||||
}
|
||||
|
||||
func (c *CloudConfig) SubnetCIDR(zone string) (string, error) {
|
||||
index := -1
|
||||
for i, z := range c.Zones {
|
||||
for i, z := range c.NodeZones {
|
||||
if z == zone {
|
||||
index = i
|
||||
break
|
||||
|
|
|
|||
Loading…
Reference in New Issue