mirror of https://github.com/kubernetes/kops.git
Merge pull request #5 from justinsb/rename_cloudup_to_kops_create
Merge cloudup tool into `kops create cluster`
This commit is contained in:
commit
153320078f
1
Makefile
1
Makefile
|
|
@ -23,7 +23,6 @@ gofmt:
|
|||
kops-tar: gocode
|
||||
rm -rf .build/kops/tar
|
||||
mkdir -p .build/kops/tar/kops/
|
||||
cp ${GOPATH}/bin/cloudup .build/kops/tar/kops/cloudup
|
||||
cp ${GOPATH}/bin/kops .build/kops/tar/kops/kops
|
||||
cp -r models/ .build/kops/tar/kops/models/
|
||||
tar czvf .build/kops.tar.gz -C .build/kops/tar/ .
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ you should use Go 1.6 or later)
|
|||
```
|
||||
export MYZONE=<kubernetes.myzone.com>
|
||||
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE}
|
||||
${GOPATH}/bin/kops create cluster --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE}
|
||||
```
|
||||
|
||||
If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack!
|
||||
|
|
@ -96,7 +96,7 @@ the desired state of the world.
|
|||
Each file in the tree describes a Task.
|
||||
|
||||
On the nodeup side, Tasks can manage files, systemd services, packages etc.
|
||||
On the cloudup side, Tasks manage cloud resources: instances, networks, disks etc.
|
||||
On the `kops create cluster` side, Tasks manage cloud resources: instances, networks, disks etc.
|
||||
|
||||
## Workaround for terraform bug
|
||||
|
||||
|
|
@ -104,24 +104,24 @@ Terraform currently has a bug where it can't create AWS tags containing a dot.
|
|||
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
|
||||
the volumes, and it isn't the worst idea to build these separately anyway.
|
||||
|
||||
We divide the 'cloudup' model into three parts:
|
||||
We divide the 'kops create cluster' model into three parts:
|
||||
* models/config which contains all the options
|
||||
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
||||
* models/cloudup which is the main cloudup model for configuration everything else
|
||||
* models/cloudup which is the main cloud model for configuring everything else
|
||||
|
||||
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
|
||||
|
||||
```
|
||||
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||
export CLUSTER_NAME=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,proto
|
||||
${GOPATH}/bin/kops create cluster --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,proto
|
||||
```
|
||||
|
||||
And then you can use terraform to do the remainder of the installation:
|
||||
|
||||
```
|
||||
export CLUSTER_NAME=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,cloudup --target=terraform
|
||||
${GOPATH}/bin/kops create cluster --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,cloudup --target=terraform
|
||||
```
|
||||
|
||||
Then, to apply using terraform:
|
||||
|
|
@ -1,395 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
goflag "flag"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/kops/upup/pkg/api"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var EtcdClusters = []string{"main", "events"}
|
||||
|
||||
// zonesToCloud allows us to infer from certain well-known zones to a cloud
|
||||
// Note it is safe to "overmap" zones that don't exist: we'll check later if the zones actually exist
|
||||
var zonesToCloud = map[string]fi.CloudProviderID{
|
||||
"us-east-1a": fi.CloudProviderAWS,
|
||||
"us-east-1b": fi.CloudProviderAWS,
|
||||
"us-east-1c": fi.CloudProviderAWS,
|
||||
"us-east-1d": fi.CloudProviderAWS,
|
||||
"us-east-1e": fi.CloudProviderAWS,
|
||||
|
||||
"us-west-1a": fi.CloudProviderAWS,
|
||||
"us-west-1b": fi.CloudProviderAWS,
|
||||
"us-west-1c": fi.CloudProviderAWS,
|
||||
"us-west-1d": fi.CloudProviderAWS,
|
||||
"us-west-1e": fi.CloudProviderAWS,
|
||||
|
||||
"us-west-2a": fi.CloudProviderAWS,
|
||||
"us-west-2b": fi.CloudProviderAWS,
|
||||
"us-west-2c": fi.CloudProviderAWS,
|
||||
"us-west-2d": fi.CloudProviderAWS,
|
||||
"us-west-2e": fi.CloudProviderAWS,
|
||||
|
||||
"eu-west-1a": fi.CloudProviderAWS,
|
||||
"eu-west-1b": fi.CloudProviderAWS,
|
||||
"eu-west-1c": fi.CloudProviderAWS,
|
||||
"eu-west-1d": fi.CloudProviderAWS,
|
||||
"eu-west-1e": fi.CloudProviderAWS,
|
||||
|
||||
"eu-central-1a": fi.CloudProviderAWS,
|
||||
"eu-central-1b": fi.CloudProviderAWS,
|
||||
"eu-central-1c": fi.CloudProviderAWS,
|
||||
"eu-central-1d": fi.CloudProviderAWS,
|
||||
"eu-central-1e": fi.CloudProviderAWS,
|
||||
|
||||
"ap-southeast-1a": fi.CloudProviderAWS,
|
||||
"ap-southeast-1b": fi.CloudProviderAWS,
|
||||
"ap-southeast-1c": fi.CloudProviderAWS,
|
||||
"ap-southeast-1d": fi.CloudProviderAWS,
|
||||
"ap-southeast-1e": fi.CloudProviderAWS,
|
||||
|
||||
"ap-southeast-2a": fi.CloudProviderAWS,
|
||||
"ap-southeast-2b": fi.CloudProviderAWS,
|
||||
"ap-southeast-2c": fi.CloudProviderAWS,
|
||||
"ap-southeast-2d": fi.CloudProviderAWS,
|
||||
"ap-southeast-2e": fi.CloudProviderAWS,
|
||||
|
||||
"ap-northeast-1a": fi.CloudProviderAWS,
|
||||
"ap-northeast-1b": fi.CloudProviderAWS,
|
||||
"ap-northeast-1c": fi.CloudProviderAWS,
|
||||
"ap-northeast-1d": fi.CloudProviderAWS,
|
||||
"ap-northeast-1e": fi.CloudProviderAWS,
|
||||
|
||||
"ap-northeast-2a": fi.CloudProviderAWS,
|
||||
"ap-northeast-2b": fi.CloudProviderAWS,
|
||||
"ap-northeast-2c": fi.CloudProviderAWS,
|
||||
"ap-northeast-2d": fi.CloudProviderAWS,
|
||||
"ap-northeast-2e": fi.CloudProviderAWS,
|
||||
|
||||
"sa-east-1a": fi.CloudProviderAWS,
|
||||
"sa-east-1b": fi.CloudProviderAWS,
|
||||
"sa-east-1c": fi.CloudProviderAWS,
|
||||
"sa-east-1d": fi.CloudProviderAWS,
|
||||
"sa-east-1e": fi.CloudProviderAWS,
|
||||
}
|
||||
|
||||
func main() {
|
||||
executableLocation, err := exec.LookPath(os.Args[0])
|
||||
if err != nil {
|
||||
glog.Fatalf("Cannot determine location of cloudup tool: %q. Please report this problem!", os.Args[0])
|
||||
}
|
||||
|
||||
modelsBaseDirDefault := path.Join(path.Dir(executableLocation), "models")
|
||||
|
||||
dryrun := pflag.Bool("dryrun", false, "Don't create cloud resources; just show what would be done")
|
||||
target := pflag.String("target", "direct", "Target - direct, terraform")
|
||||
//configFile := pflag.String("conf", "", "Configuration file to load")
|
||||
modelsBaseDir := pflag.String("modelstore", modelsBaseDirDefault, "Source directory where models are stored")
|
||||
models := pflag.String("model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)")
|
||||
nodeModel := pflag.String("nodemodel", "nodeup", "Model to use for node configuration")
|
||||
|
||||
defaultStateStore := os.Getenv("KOPS_STATE_STORE")
|
||||
stateLocation := pflag.String("state", defaultStateStore, "Location to use to store configuration state")
|
||||
|
||||
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
|
||||
|
||||
zones := pflag.String("zones", "", "Zones in which to run the cluster")
|
||||
masterZones := pflag.String("master-zones", "", "Zones in which to run masters (must be an odd number)")
|
||||
|
||||
project := pflag.String("project", "", "Project to use (must be set on GCE)")
|
||||
clusterName := pflag.String("name", "", "Name for cluster")
|
||||
kubernetesVersion := pflag.String("kubernetes-version", "", "Version of kubernetes to run (defaults to latest)")
|
||||
|
||||
sshPublicKey := pflag.String("ssh-public-key", "~/.ssh/id_rsa.pub", "SSH public key to use")
|
||||
|
||||
nodeSize := pflag.String("node-size", "", "Set instance size for nodes")
|
||||
|
||||
masterSize := pflag.String("master-size", "", "Set instance size for masters")
|
||||
|
||||
vpcID := pflag.String("vpc", "", "Set to use a shared VPC")
|
||||
networkCIDR := pflag.String("network-cidr", "", "Set to override the default network CIDR")
|
||||
|
||||
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
|
||||
|
||||
image := pflag.String("image", "", "Image to use")
|
||||
|
||||
dnsZone := pflag.String("dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
||||
outDir := pflag.String("out", "", "Path to write any local output")
|
||||
|
||||
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
pflag.Parse()
|
||||
goflag.CommandLine.Parse([]string{})
|
||||
|
||||
isDryrun := false
|
||||
if *dryrun {
|
||||
isDryrun = true
|
||||
*target = "dryrun"
|
||||
}
|
||||
|
||||
if *stateLocation == "" {
|
||||
glog.Errorf("--state is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *clusterName == "" {
|
||||
glog.Errorf("--name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
statePath, err := vfs.Context.BuildVfsPath(*stateLocation)
|
||||
if err != nil {
|
||||
glog.Errorf("error building state location: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *outDir == "" {
|
||||
*outDir = "out"
|
||||
}
|
||||
|
||||
stateStore, err := fi.NewVFSStateStore(statePath, *clusterName, isDryrun)
|
||||
if err != nil {
|
||||
glog.Errorf("error building state store: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cluster, instanceGroups, err := api.ReadConfig(stateStore)
|
||||
if err != nil {
|
||||
glog.Errorf("error loading configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *zones != "" {
|
||||
existingZones := make(map[string]*api.ClusterZoneSpec)
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
existingZones[zone.Name] = zone
|
||||
}
|
||||
|
||||
for _, zone := range parseZoneList(*zones) {
|
||||
if existingZones[zone] == nil {
|
||||
cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
|
||||
Name: zone,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var masters []*api.InstanceGroup
|
||||
var nodes []*api.InstanceGroup
|
||||
|
||||
for _, group := range instanceGroups {
|
||||
if group.IsMaster() {
|
||||
masters = append(masters, group)
|
||||
} else {
|
||||
nodes = append(nodes, group)
|
||||
}
|
||||
}
|
||||
|
||||
if *masterZones == "" {
|
||||
if len(masters) == 0 {
|
||||
// Default to putting into every zone
|
||||
// TODO: just the first 1 or 3 zones; or should we force users to declare?
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleMaster
|
||||
g.Spec.Zones = []string{zone.Name}
|
||||
g.Spec.MinSize = fi.Int(1)
|
||||
g.Spec.MaxSize = fi.Int(1)
|
||||
g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
masters = append(masters, g)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(masters) == 0 {
|
||||
for _, zone := range parseZoneList(*masterZones) {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleMaster
|
||||
g.Spec.Zones = []string{zone}
|
||||
g.Spec.MinSize = fi.Int(1)
|
||||
g.Spec.MaxSize = fi.Int(1)
|
||||
g.Name = "master-" + zone
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
masters = append(masters, g)
|
||||
}
|
||||
} else {
|
||||
// This is hard, because of the etcd cluster
|
||||
glog.Errorf("Cannot change master-zones from the CLI")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cluster.Spec.EtcdClusters) == 0 {
|
||||
zones := sets.NewString()
|
||||
for _, group := range instanceGroups {
|
||||
for _, zone := range group.Spec.Zones {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
etcdZones := zones.List()
|
||||
if (len(etcdZones) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, etcdCluster := range EtcdClusters {
|
||||
etcd := &api.EtcdClusterSpec{}
|
||||
etcd.Name = etcdCluster
|
||||
for _, zone := range etcdZones {
|
||||
m := &api.EtcdMemberSpec{}
|
||||
m.Name = zone
|
||||
m.Zone = zone
|
||||
etcd.Members = append(etcd.Members, m)
|
||||
}
|
||||
cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleNode
|
||||
g.Name = "nodes"
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
nodes = append(nodes, g)
|
||||
}
|
||||
|
||||
if *nodeSize != "" {
|
||||
for _, group := range nodes {
|
||||
group.Spec.MachineType = *nodeSize
|
||||
}
|
||||
}
|
||||
|
||||
if *image != "" {
|
||||
for _, group := range instanceGroups {
|
||||
group.Spec.Image = *image
|
||||
}
|
||||
}
|
||||
|
||||
if *nodeCount != 0 {
|
||||
for _, group := range nodes {
|
||||
group.Spec.MinSize = nodeCount
|
||||
group.Spec.MaxSize = nodeCount
|
||||
}
|
||||
}
|
||||
|
||||
if *masterSize != "" {
|
||||
for _, group := range masters {
|
||||
group.Spec.MachineType = *masterSize
|
||||
}
|
||||
}
|
||||
|
||||
if *dnsZone != "" {
|
||||
cluster.Spec.DNSZone = *dnsZone
|
||||
}
|
||||
|
||||
if *cloudProvider != "" {
|
||||
cluster.Spec.CloudProvider = *cloudProvider
|
||||
}
|
||||
|
||||
if *project != "" {
|
||||
cluster.Spec.Project = *project
|
||||
}
|
||||
|
||||
if *clusterName != "" {
|
||||
cluster.Name = *clusterName
|
||||
}
|
||||
|
||||
if *kubernetesVersion != "" {
|
||||
cluster.Spec.KubernetesVersion = *kubernetesVersion
|
||||
}
|
||||
|
||||
if *vpcID != "" {
|
||||
cluster.Spec.NetworkID = *vpcID
|
||||
}
|
||||
|
||||
if *networkCIDR != "" {
|
||||
cluster.Spec.NetworkCIDR = *networkCIDR
|
||||
}
|
||||
|
||||
if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
|
||||
glog.Errorf("Must specify NetworkCIDR when VPC is set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cluster.Spec.CloudProvider == "" {
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
cloud := zonesToCloud[zone.Name]
|
||||
if cloud != "" {
|
||||
glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
|
||||
cluster.Spec.CloudProvider = string(cloud)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *sshPublicKey != "" {
|
||||
*sshPublicKey = utils.ExpandPath(*sshPublicKey)
|
||||
}
|
||||
|
||||
err = cluster.PerformAssignments()
|
||||
if err != nil {
|
||||
glog.Errorf("error populating configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = api.PerformAssignmentsInstanceGroups(instanceGroups)
|
||||
if err != nil {
|
||||
glog.Errorf("error populating configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = api.WriteConfig(stateStore, cluster, instanceGroups)
|
||||
if err != nil {
|
||||
glog.Errorf("error writing updated configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cmd := &cloudup.CreateClusterCmd{
|
||||
Cluster: cluster,
|
||||
InstanceGroups: instanceGroups,
|
||||
ModelStore: *modelsBaseDir,
|
||||
Models: strings.Split(*models, ","),
|
||||
StateStore: stateStore,
|
||||
Target: *target,
|
||||
NodeModel: *nodeModel,
|
||||
SSHPublicKey: *sshPublicKey,
|
||||
OutDir: *outDir,
|
||||
}
|
||||
//if *configFile != "" {
|
||||
// //confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
|
||||
// err := cmd.LoadConfig(configFile)
|
||||
// if err != nil {
|
||||
// glog.Errorf("error loading config: %v", err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
//}
|
||||
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
glog.Errorf("error running command: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
glog.Infof("Completed successfully")
|
||||
}
|
||||
|
||||
func parseZoneList(s string) []string {
|
||||
var filtered []string
|
||||
for _, v := range strings.Split(s, ",") {
|
||||
v = strings.TrimSpace(v)
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
v = strings.ToLower(v)
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// createCmd represents the create command
|
||||
var createCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "create clusters",
|
||||
Long: `Create clusters`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCommand.AddCommand(createCmd)
|
||||
}
|
||||
|
|
@ -0,0 +1,363 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kops/upup/pkg/api"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type CreateClusterCmd struct {
|
||||
DryRun bool
|
||||
Target string
|
||||
ModelsBaseDir string
|
||||
Models string
|
||||
NodeModel string
|
||||
//StateStore string
|
||||
Cloud string
|
||||
//Region string
|
||||
Zones string
|
||||
MasterZones string
|
||||
NodeSize string
|
||||
MasterSize string
|
||||
NodeCount int
|
||||
Project string
|
||||
KubernetesVersion string
|
||||
OutDir string
|
||||
Image string
|
||||
SSHPublicKey string
|
||||
VPCID string
|
||||
NetworkCIDR string
|
||||
DNSZone string
|
||||
}
|
||||
|
||||
var createCluster CreateClusterCmd
|
||||
|
||||
func init() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Create cluster",
|
||||
Long: `Creates a k8s cluster.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := createCluster.Run()
|
||||
if err != nil {
|
||||
glog.Exitf("%v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
createCmd.AddCommand(cmd)
|
||||
|
||||
executableLocation, err := exec.LookPath(os.Args[0])
|
||||
if err != nil {
|
||||
glog.Fatalf("Cannot determine location of kops tool: %q. Please report this problem!", os.Args[0])
|
||||
}
|
||||
|
||||
modelsBaseDirDefault := path.Join(path.Dir(executableLocation), "models")
|
||||
|
||||
cmd.Flags().BoolVar(&createCluster.DryRun, "dryrun", false, "Don't create cloud resources; just show what would be done")
|
||||
cmd.Flags().StringVar(&createCluster.Target, "target", "direct", "Target - direct, terraform")
|
||||
//configFile := cmd.Flags().StringVar(&createCluster., "conf", "", "Configuration file to load")
|
||||
cmd.Flags().StringVar(&createCluster.ModelsBaseDir, "modeldir", modelsBaseDirDefault, "Source directory where models are stored")
|
||||
cmd.Flags().StringVar(&createCluster.Models, "model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)")
|
||||
cmd.Flags().StringVar(&createCluster.NodeModel, "nodemodel", "nodeup", "Model to use for node configuration")
|
||||
|
||||
//defaultStateStore := os.Getenv("KOPS_STATE_STORE")
|
||||
//cmd.Flags().StringVar(&createCluster.StateStore, "state", defaultStateStore, "Location to use to store configuration state")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.Cloud, "cloud", "", "Cloud provider to use - gce, aws")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.Zones, "zones", "", "Zones in which to run the cluster")
|
||||
cmd.Flags().StringVar(&createCluster.MasterZones, "master-zones", "", "Zones in which to run masters (must be an odd number)")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.Project, "project", "", "Project to use (must be set on GCE)")
|
||||
//cmd.Flags().StringVar(&createCluster.Name, "name", "", "Name for cluster")
|
||||
cmd.Flags().StringVar(&createCluster.KubernetesVersion, "kubernetes-version", "", "Version of kubernetes to run (defaults to latest)")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.SSHPublicKey, "ssh-public-key", "~/.ssh/id_rsa.pub", "SSH public key to use")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.NodeSize, "node-size", "", "Set instance size for nodes")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.MasterSize, "master-size", "", "Set instance size for masters")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.VPCID, "vpc", "", "Set to use a shared VPC")
|
||||
cmd.Flags().StringVar(&createCluster.NetworkCIDR, "network-cidr", "", "Set to override the default network CIDR")
|
||||
|
||||
cmd.Flags().IntVar(&createCluster.NodeCount, "node-count", 0, "Set the number of nodes")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.Image, "image", "", "Image to use")
|
||||
|
||||
cmd.Flags().StringVar(&createCluster.DNSZone, "dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
||||
cmd.Flags().StringVar(&createCluster.OutDir, "out", "", "Path to write any local output")
|
||||
}
|
||||
|
||||
var EtcdClusters = []string{"main", "events"}
|
||||
|
||||
func (c *CreateClusterCmd) Run() error {
|
||||
isDryrun := false
|
||||
if c.DryRun {
|
||||
isDryrun = true
|
||||
c.Target = "dryrun"
|
||||
}
|
||||
|
||||
stateStoreLocation := rootCommand.stateLocation
|
||||
if stateStoreLocation == "" {
|
||||
return fmt.Errorf("--state is required")
|
||||
}
|
||||
|
||||
clusterName := rootCommand.clusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("--name is required")
|
||||
}
|
||||
|
||||
// TODO: Reuse rootCommand stateStore logic?
|
||||
|
||||
statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building state location: %v", err)
|
||||
}
|
||||
|
||||
if c.OutDir == "" {
|
||||
c.OutDir = "out"
|
||||
}
|
||||
|
||||
stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building state store: %v", err)
|
||||
}
|
||||
|
||||
cluster, instanceGroups, err := api.ReadConfig(stateStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading configuration: %v", err)
|
||||
}
|
||||
|
||||
if c.Zones != "" {
|
||||
existingZones := make(map[string]*api.ClusterZoneSpec)
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
existingZones[zone.Name] = zone
|
||||
}
|
||||
|
||||
for _, zone := range parseZoneList(c.Zones) {
|
||||
if existingZones[zone] == nil {
|
||||
cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
|
||||
Name: zone,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(cluster.Spec.Zones) == 0 {
|
||||
return fmt.Errorf("must specify at least one zone for the cluster (use --zones)")
|
||||
}
|
||||
|
||||
var masters []*api.InstanceGroup
|
||||
var nodes []*api.InstanceGroup
|
||||
|
||||
for _, group := range instanceGroups {
|
||||
if group.IsMaster() {
|
||||
masters = append(masters, group)
|
||||
} else {
|
||||
nodes = append(nodes, group)
|
||||
}
|
||||
}
|
||||
|
||||
if c.MasterZones == "" {
|
||||
if len(masters) == 0 {
|
||||
// Default to putting into every zone
|
||||
// TODO: just the first 1 or 3 zones; or should we force users to declare?
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleMaster
|
||||
g.Spec.Zones = []string{zone.Name}
|
||||
g.Spec.MinSize = fi.Int(1)
|
||||
g.Spec.MaxSize = fi.Int(1)
|
||||
g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
masters = append(masters, g)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(masters) == 0 {
|
||||
for _, zone := range parseZoneList(c.MasterZones) {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleMaster
|
||||
g.Spec.Zones = []string{zone}
|
||||
g.Spec.MinSize = fi.Int(1)
|
||||
g.Spec.MaxSize = fi.Int(1)
|
||||
g.Name = "master-" + zone
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
masters = append(masters, g)
|
||||
}
|
||||
} else {
|
||||
// This is hard, because of the etcd cluster
|
||||
glog.Errorf("Cannot change master-zones from the CLI")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cluster.Spec.EtcdClusters) == 0 {
|
||||
zones := sets.NewString()
|
||||
for _, group := range instanceGroups {
|
||||
for _, zone := range group.Spec.Zones {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
etcdZones := zones.List()
|
||||
if (len(etcdZones) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, etcdCluster := range EtcdClusters {
|
||||
etcd := &api.EtcdClusterSpec{}
|
||||
etcd.Name = etcdCluster
|
||||
for _, zone := range etcdZones {
|
||||
m := &api.EtcdMemberSpec{}
|
||||
m.Name = zone
|
||||
m.Zone = zone
|
||||
etcd.Members = append(etcd.Members, m)
|
||||
}
|
||||
cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
g := &api.InstanceGroup{}
|
||||
g.Spec.Role = api.InstanceGroupRoleNode
|
||||
g.Name = "nodes"
|
||||
instanceGroups = append(instanceGroups, g)
|
||||
nodes = append(nodes, g)
|
||||
}
|
||||
|
||||
if c.NodeSize != "" {
|
||||
for _, group := range nodes {
|
||||
group.Spec.MachineType = c.NodeSize
|
||||
}
|
||||
}
|
||||
|
||||
if c.Image != "" {
|
||||
for _, group := range instanceGroups {
|
||||
group.Spec.Image = c.Image
|
||||
}
|
||||
}
|
||||
|
||||
if c.NodeCount != 0 {
|
||||
for _, group := range nodes {
|
||||
group.Spec.MinSize = fi.Int(c.NodeCount)
|
||||
group.Spec.MaxSize = fi.Int(c.NodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
if c.MasterSize != "" {
|
||||
for _, group := range masters {
|
||||
group.Spec.MachineType = c.MasterSize
|
||||
}
|
||||
}
|
||||
|
||||
if c.DNSZone != "" {
|
||||
cluster.Spec.DNSZone = c.DNSZone
|
||||
}
|
||||
|
||||
if c.Cloud != "" {
|
||||
cluster.Spec.CloudProvider = c.Cloud
|
||||
}
|
||||
|
||||
if c.Project != "" {
|
||||
cluster.Spec.Project = c.Project
|
||||
}
|
||||
|
||||
if clusterName != "" {
|
||||
cluster.Name = clusterName
|
||||
}
|
||||
|
||||
if c.KubernetesVersion != "" {
|
||||
cluster.Spec.KubernetesVersion = c.KubernetesVersion
|
||||
}
|
||||
|
||||
if c.VPCID != "" {
|
||||
cluster.Spec.NetworkID = c.VPCID
|
||||
}
|
||||
|
||||
if c.NetworkCIDR != "" {
|
||||
cluster.Spec.NetworkCIDR = c.NetworkCIDR
|
||||
}
|
||||
|
||||
if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
|
||||
glog.Errorf("Must specify NetworkCIDR when VPC is set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cluster.Spec.CloudProvider == "" {
|
||||
for _, zone := range cluster.Spec.Zones {
|
||||
cloud, known := fi.GuessCloudForZone(zone.Name)
|
||||
if known {
|
||||
glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
|
||||
cluster.Spec.CloudProvider = string(cloud)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.SSHPublicKey != "" {
|
||||
c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
|
||||
}
|
||||
|
||||
err = cluster.PerformAssignments()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
err = api.PerformAssignmentsInstanceGroups(instanceGroups)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
err = api.WriteConfig(stateStore, cluster, instanceGroups)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing updated configuration: %v", err)
|
||||
}
|
||||
|
||||
cmd := &cloudup.CreateClusterCmd{
|
||||
Cluster: cluster,
|
||||
InstanceGroups: instanceGroups,
|
||||
ModelStore: c.ModelsBaseDir,
|
||||
Models: strings.Split(c.Models, ","),
|
||||
StateStore: stateStore,
|
||||
Target: c.Target,
|
||||
NodeModel: c.NodeModel,
|
||||
SSHPublicKey: c.SSHPublicKey,
|
||||
OutDir: c.OutDir,
|
||||
}
|
||||
//if *configFile != "" {
|
||||
// //confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
|
||||
// err := cmd.LoadConfig(configFile)
|
||||
// if err != nil {
|
||||
// glog.Errorf("error loading config: %v", err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
//}
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func parseZoneList(s string) []string {
|
||||
var filtered []string
|
||||
for _, v := range strings.Split(s, ",") {
|
||||
v = strings.TrimSpace(v)
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
v = strings.ToLower(v)
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
When launching into a shared VPC, the VPC & the Internet Gateway will be reused, but we create a new subnet per zone,
|
||||
and a new route table.
|
||||
|
||||
Use cloudup with the `--vpc` and `--network-cidr` arguments for your existing VPC, with --dryrun so we can see the
|
||||
Use kops create cluster with the `--vpc` and `--network-cidr` arguments for your existing VPC, with --dryrun so we can see the
|
||||
config before we apply it.
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ config before we apply it.
|
|||
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||
export CLUSTER_NAME=<sharedvpc.mydomain.com>
|
||||
|
||||
cloudup --zones=us-east-1b --name=${CLUSTER_NAME} \
|
||||
kops create cluster --zones=us-east-1b --name=${CLUSTER_NAME} \
|
||||
--vpc=vpc-a80734c1 --network-cidr=10.100.0.0/16 --dryrun
|
||||
```
|
||||
|
||||
|
|
@ -36,27 +36,27 @@ Verify that networkCIDR & networkID match your VPC CIDR & ID. You likely need t
|
|||
because subnets in a VPC cannot overlap.
|
||||
|
||||
|
||||
You can then run cloudup again in dryrun mode (you don't need any arguments, because they're all in the config file):
|
||||
You can then run `kops create cluster` again in dryrun mode (you don't need any arguments, because they're all in the config file):
|
||||
|
||||
```
|
||||
cloudup --dryrun --name=${CLUSTER_NAME}
|
||||
kops create cluster --dryrun --name=${CLUSTER_NAME}
|
||||
```
|
||||
|
||||
Review the changes to make sure they are OK - the Kubernetes settings might not be ones you want on a shared VPC (in which case,
|
||||
open an issue!)
|
||||
|
||||
Note also the Kubernetes VPCs (currently) require `EnableDNSHostnames=true`. Cloudup will detect the required change,
|
||||
Note also the Kubernetes VPCs (currently) require `EnableDNSHostnames=true`. kops will detect the required change,
|
||||
but refuse to make it automatically because it is a shared VPC. Please review the implications and make the change
|
||||
to the VPC manually.
|
||||
|
||||
Once you're happy, you can create the cluster using:
|
||||
|
||||
```
|
||||
cloudup --name=${CLUSTER_NAME}
|
||||
kops create cluster --name=${CLUSTER_NAME}
|
||||
```
|
||||
|
||||
|
||||
Finally, if your shared VPC has a KubernetesCluster tag (because it was created with cloudup), you should
|
||||
Finally, if your shared VPC has a KubernetesCluster tag (because it was created with kops), you should
|
||||
probably remove that tag to indicate to indicate that the resources are not owned by that cluster, and so
|
||||
deleting the cluster won't try to delete the VPC. (Deleting the VPC won't succeed anyway, because it's in use,
|
||||
but it's better to avoid the later confusion!)
|
||||
|
|
|
|||
|
|
@ -18,14 +18,14 @@ control system).
|
|||
One of the most important files in the state store is the top-level config file. This file stores the main
|
||||
configuration for your cluster (instance types, zones, etc)\
|
||||
|
||||
When you run cloudup, we create a config file for you based on the command line options you specify.
|
||||
For example, when you run with `--node-size=m4.large`, we actually set a line in the config file
|
||||
When you run `kops create cluster`, we create a state store entry for you based on the command line options you specify.
|
||||
For example, when you run with `--node-size=m4.large`, we actually set a line in the configuration
|
||||
that looks like `NodeMachineType: m4.large`.
|
||||
|
||||
The configuration you specify on the command line is actually just a convenient short-cut to
|
||||
manually editing the config file. Options you specify on the command line are merged into the existing
|
||||
manually editing the configuration. Options you specify on the command line are merged into the existing
|
||||
configuration. If you want to configure advanced options, or prefer a text-based configuration, you
|
||||
may prefer to just edit the config file.
|
||||
may prefer to just edit the config file with `kops edit cluster`.
|
||||
|
||||
Because the configuration is merged, this is how you can just specify the changed arguments when
|
||||
reconfiguring your cluster - for example just `cloudup` after a dry-run.
|
||||
reconfiguring your cluster - for example just `kops create cluster` after a dry-run.
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ If you now list the clusters, you should see both the old cluster & the new clus
|
|||
Use the normal tool to bring up the new cluster:
|
||||
|
||||
```
|
||||
cloudup --name ${NEW_NAME} --dryrun
|
||||
kops create cluster --name ${NEW_NAME} --dryrun
|
||||
```
|
||||
|
||||
Things to check are that it is reusing the existing volume for the _main_ etcd cluster (but not the events clusters).
|
||||
|
|
@ -80,7 +80,7 @@ Things to check are that it is reusing the existing volume for the _main_ etcd c
|
|||
And then when you are happy:
|
||||
|
||||
```
|
||||
cloudup --name ${NEW_NAME}
|
||||
kops create cluster --name ${NEW_NAME}
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -149,7 +149,7 @@ Due to a limitation in ELBs (you can't replace all the subnets), if you have ELB
|
|||
* `kops edit cluster --name ${NEW_NAME}`
|
||||
* Add a zone to the `zones` section and save the file (it normally suffices to just add `- name: us-west-2b` or whatever
|
||||
zone you are adding; kops will auto-populate the CIDR.
|
||||
* cloudup --name ${NEW_NAME}
|
||||
* kops create cluster --name ${NEW_NAME}
|
||||
|
||||
|
||||
In the AWS control panel open the "Load Balancers" section, and for each ELB:
|
||||
|
|
|
|||
|
|
@ -8,3 +8,75 @@ const CloudProviderGCE CloudProviderID = "gce"
|
|||
type Cloud interface {
|
||||
ProviderID() CloudProviderID
|
||||
}
|
||||
|
||||
|
||||
|
||||
// zonesToCloud allows us to infer from certain well-known zones to a cloud
|
||||
// Note it is safe to "overmap" zones that don't exist: we'll check later if the zones actually exist
|
||||
var zonesToCloud = map[string]CloudProviderID{
|
||||
"us-east-1a": CloudProviderAWS,
|
||||
"us-east-1b": CloudProviderAWS,
|
||||
"us-east-1c": CloudProviderAWS,
|
||||
"us-east-1d": CloudProviderAWS,
|
||||
"us-east-1e": CloudProviderAWS,
|
||||
|
||||
"us-west-1a": CloudProviderAWS,
|
||||
"us-west-1b": CloudProviderAWS,
|
||||
"us-west-1c": CloudProviderAWS,
|
||||
"us-west-1d": CloudProviderAWS,
|
||||
"us-west-1e": CloudProviderAWS,
|
||||
|
||||
"us-west-2a": CloudProviderAWS,
|
||||
"us-west-2b": CloudProviderAWS,
|
||||
"us-west-2c": CloudProviderAWS,
|
||||
"us-west-2d": CloudProviderAWS,
|
||||
"us-west-2e": CloudProviderAWS,
|
||||
|
||||
"eu-west-1a": CloudProviderAWS,
|
||||
"eu-west-1b": CloudProviderAWS,
|
||||
"eu-west-1c": CloudProviderAWS,
|
||||
"eu-west-1d": CloudProviderAWS,
|
||||
"eu-west-1e": CloudProviderAWS,
|
||||
|
||||
"eu-central-1a": CloudProviderAWS,
|
||||
"eu-central-1b": CloudProviderAWS,
|
||||
"eu-central-1c": CloudProviderAWS,
|
||||
"eu-central-1d": CloudProviderAWS,
|
||||
"eu-central-1e": CloudProviderAWS,
|
||||
|
||||
"ap-southeast-1a": CloudProviderAWS,
|
||||
"ap-southeast-1b": CloudProviderAWS,
|
||||
"ap-southeast-1c": CloudProviderAWS,
|
||||
"ap-southeast-1d": CloudProviderAWS,
|
||||
"ap-southeast-1e": CloudProviderAWS,
|
||||
|
||||
"ap-southeast-2a": CloudProviderAWS,
|
||||
"ap-southeast-2b": CloudProviderAWS,
|
||||
"ap-southeast-2c": CloudProviderAWS,
|
||||
"ap-southeast-2d": CloudProviderAWS,
|
||||
"ap-southeast-2e": CloudProviderAWS,
|
||||
|
||||
"ap-northeast-1a": CloudProviderAWS,
|
||||
"ap-northeast-1b": CloudProviderAWS,
|
||||
"ap-northeast-1c": CloudProviderAWS,
|
||||
"ap-northeast-1d": CloudProviderAWS,
|
||||
"ap-northeast-1e": CloudProviderAWS,
|
||||
|
||||
"ap-northeast-2a": CloudProviderAWS,
|
||||
"ap-northeast-2b": CloudProviderAWS,
|
||||
"ap-northeast-2c": CloudProviderAWS,
|
||||
"ap-northeast-2d": CloudProviderAWS,
|
||||
"ap-northeast-2e": CloudProviderAWS,
|
||||
|
||||
"sa-east-1a": CloudProviderAWS,
|
||||
"sa-east-1b": CloudProviderAWS,
|
||||
"sa-east-1c": CloudProviderAWS,
|
||||
"sa-east-1d": CloudProviderAWS,
|
||||
"sa-east-1e": CloudProviderAWS,
|
||||
}
|
||||
|
||||
// GuessCloudForZone tries to infer the cloudprovider from the zone name
|
||||
func GuessCloudForZone(zone string) (CloudProviderID, bool) {
|
||||
c, found := zonesToCloud[zone]
|
||||
return c, found
|
||||
}
|
||||
|
|
@ -78,7 +78,7 @@ func (c *AWSCloud) Tags() map[string]string {
|
|||
func isTagsEventualConsistencyError(err error) bool {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
switch awsErr.Code() {
|
||||
case "InvalidInstanceID.NotFound", "InvalidRouteTableID.NotFound", "InvalidVpcID.NotFound", "InvalidGroup.NotFound":
|
||||
case "InvalidInstanceID.NotFound", "InvalidRouteTableID.NotFound", "InvalidVpcID.NotFound", "InvalidGroup.NotFound", "InvalidSubnetID.NotFound":
|
||||
return true
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -383,6 +383,9 @@ func (c *CreateClusterCmd) Run() error {
|
|||
|
||||
case "aws":
|
||||
{
|
||||
awsCloud := cloud.(*awsup.AWSCloud)
|
||||
region = awsCloud.Region
|
||||
|
||||
tags["_aws"] = struct{}{}
|
||||
c.NodeUpTags = append(c.NodeUpTags, "_aws")
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue