mirror of https://github.com/kubernetes/kops.git
commit
26a7d76992
|
@ -37,10 +37,13 @@ you should use Go 1.6 or later)
|
||||||
|
|
||||||
* Set `AWS_PROFILE` (if you need to select a profile for the AWS CLI to work)
|
* Set `AWS_PROFILE` (if you need to select a profile for the AWS CLI to work)
|
||||||
|
|
||||||
|
* Pick an S3 bucket that you'll use to store your cluster configuration - this is called your state store.
|
||||||
|
|
||||||
* Execute:
|
* Execute:
|
||||||
```
|
```
|
||||||
export MYZONE=<kubernetes.myzone.com>
|
export MYZONE=<kubernetes.myzone.com>
|
||||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --state s3://<mybucket>/${MYZONE}
|
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||||
|
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE}
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack!
|
If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack!
|
||||||
|
@ -52,7 +55,8 @@ for use with kubectl:
|
||||||
|
|
||||||
```
|
```
|
||||||
export MYZONE=<kubernetes.myzone.com>
|
export MYZONE=<kubernetes.myzone.com>
|
||||||
${GOPATH}/bin/upup export kubecfg --state s3://<mybucket>/${MYZONE}
|
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||||
|
${GOPATH}/bin/upup export kubecfg --name=${MYZONE}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Delete the cluster
|
## Delete the cluster
|
||||||
|
@ -69,9 +73,9 @@ You must pass --yes to actually delete resources (without the `#` comment!)
|
||||||
|
|
||||||
## Other interesting modes:
|
## Other interesting modes:
|
||||||
|
|
||||||
* See changes that would be applied: `${GOPATH}/bin/cloudup --dryrun`
|
* See changes that would be applied: `--dryrun`
|
||||||
|
|
||||||
* Build a terraform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform` The terraform model will be built in `state/terraform`
|
* Build a terraform model: `--target=terraform` The terraform model will be built in `out/terraform`
|
||||||
|
|
||||||
* Specify the k8s build to run: `--kubernetes-version=1.2.2`
|
* Specify the k8s build to run: `--kubernetes-version=1.2.2`
|
||||||
|
|
||||||
|
@ -101,22 +105,24 @@ Terraform currently has a bug where it can't create AWS tags containing a dot.
|
||||||
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
|
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
|
||||||
the volumes, and it isn't the worst idea to build these separately anyway.
|
the volumes, and it isn't the worst idea to build these separately anyway.
|
||||||
|
|
||||||
We divide the 'cloudup' model into two parts:
|
We divide the 'cloudup' model into three parts:
|
||||||
|
* models/config which contains all the options
|
||||||
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
||||||
* models/cloudup which is the main cloudup model for configuration everything else
|
* models/cloudup which is the main cloudup model for configuration everything else
|
||||||
|
|
||||||
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
|
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
|
||||||
|
|
||||||
```
|
```
|
||||||
export MYZONE=<kubernetes.myzone.com>
|
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --model=proto
|
export CLUSTER_NAME=<kubernetes.myzone.com>
|
||||||
|
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,proto
|
||||||
```
|
```
|
||||||
|
|
||||||
And then you can use terraform to do the full installation:
|
And then you can use terraform to do the full installation:
|
||||||
|
|
||||||
```
|
```
|
||||||
export MYZONE=<kubernetes.myzone.com>
|
export CLUSTER_NAME=<kubernetes.myzone.com>
|
||||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --model=cloudup --target=terraform
|
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${CLUSTER_NAME} --model=config,cloudup --target=terraform
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, to apply using terraform:
|
Then, to apply using terraform:
|
||||||
|
|
|
@ -107,5 +107,5 @@ func TestCreateCluster_EvenEtcdClusterSize(t *testing.T) {
|
||||||
c := buildDefaultCreateCluster()
|
c := buildDefaultCreateCluster()
|
||||||
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}
|
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}
|
||||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||||
expectErrorFromRun(t, c, "There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zone and -master-zone to declare node zones and master zones separately.")
|
expectErrorFromRun(t, c, "There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zones and -master-zones to declare node zones and master zones separately.")
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,70 @@ import (
|
||||||
|
|
||||||
var EtcdClusters = []string{"main", "events"}
|
var EtcdClusters = []string{"main", "events"}
|
||||||
|
|
||||||
|
// zonesToCloud allows us to infer from certain well-known zones to a cloud
|
||||||
|
// Note it is safe to "overmap" zones that don't exist: we'll check later if the zones actually exist
|
||||||
|
var zonesToCloud = map[string]fi.CloudProviderID{
|
||||||
|
"us-east-1a": fi.CloudProviderAWS,
|
||||||
|
"us-east-1b": fi.CloudProviderAWS,
|
||||||
|
"us-east-1c": fi.CloudProviderAWS,
|
||||||
|
"us-east-1d": fi.CloudProviderAWS,
|
||||||
|
"us-east-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"us-west-1a": fi.CloudProviderAWS,
|
||||||
|
"us-west-1b": fi.CloudProviderAWS,
|
||||||
|
"us-west-1c": fi.CloudProviderAWS,
|
||||||
|
"us-west-1d": fi.CloudProviderAWS,
|
||||||
|
"us-west-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"us-west-2a": fi.CloudProviderAWS,
|
||||||
|
"us-west-2b": fi.CloudProviderAWS,
|
||||||
|
"us-west-2c": fi.CloudProviderAWS,
|
||||||
|
"us-west-2d": fi.CloudProviderAWS,
|
||||||
|
"us-west-2e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"eu-west-1a": fi.CloudProviderAWS,
|
||||||
|
"eu-west-1b": fi.CloudProviderAWS,
|
||||||
|
"eu-west-1c": fi.CloudProviderAWS,
|
||||||
|
"eu-west-1d": fi.CloudProviderAWS,
|
||||||
|
"eu-west-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"eu-central-1a": fi.CloudProviderAWS,
|
||||||
|
"eu-central-1b": fi.CloudProviderAWS,
|
||||||
|
"eu-central-1c": fi.CloudProviderAWS,
|
||||||
|
"eu-central-1d": fi.CloudProviderAWS,
|
||||||
|
"eu-central-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"ap-southeast-1a": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-1b": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-1c": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-1d": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"ap-southeast-2a": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-2b": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-2c": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-2d": fi.CloudProviderAWS,
|
||||||
|
"ap-southeast-2e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"ap-northeast-1a": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-1b": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-1c": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-1d": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-1e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"ap-northeast-2a": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-2b": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-2c": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-2d": fi.CloudProviderAWS,
|
||||||
|
"ap-northeast-2e": fi.CloudProviderAWS,
|
||||||
|
|
||||||
|
"sa-east-1a": fi.CloudProviderAWS,
|
||||||
|
"sa-east-1b": fi.CloudProviderAWS,
|
||||||
|
"sa-east-1c": fi.CloudProviderAWS,
|
||||||
|
"sa-east-1d": fi.CloudProviderAWS,
|
||||||
|
"sa-east-1e": fi.CloudProviderAWS,
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
executableLocation, err := exec.LookPath(os.Args[0])
|
executableLocation, err := exec.LookPath(os.Args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -30,9 +94,11 @@ func main() {
|
||||||
target := pflag.String("target", "direct", "Target - direct, terraform")
|
target := pflag.String("target", "direct", "Target - direct, terraform")
|
||||||
//configFile := pflag.String("conf", "", "Configuration file to load")
|
//configFile := pflag.String("conf", "", "Configuration file to load")
|
||||||
modelsBaseDir := pflag.String("modelstore", modelsBaseDirDefault, "Source directory where models are stored")
|
modelsBaseDir := pflag.String("modelstore", modelsBaseDirDefault, "Source directory where models are stored")
|
||||||
models := pflag.String("model", "proto,cloudup", "Models to apply (separate multiple models with commas)")
|
models := pflag.String("model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)")
|
||||||
nodeModel := pflag.String("nodemodel", "nodeup", "Model to use for node configuration")
|
nodeModel := pflag.String("nodemodel", "nodeup", "Model to use for node configuration")
|
||||||
stateLocation := pflag.String("state", "", "Location to use to store configuration state")
|
|
||||||
|
defaultStateStore := os.Getenv("KOPS_STATE_STORE")
|
||||||
|
stateLocation := pflag.String("state", defaultStateStore, "Location to use to store configuration state")
|
||||||
|
|
||||||
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
|
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
|
||||||
|
|
||||||
|
@ -49,6 +115,9 @@ func main() {
|
||||||
|
|
||||||
masterSize := pflag.String("master-size", "", "Set instance size for masters")
|
masterSize := pflag.String("master-size", "", "Set instance size for masters")
|
||||||
|
|
||||||
|
vpcID := pflag.String("vpc", "", "Set to use a shared VPC")
|
||||||
|
networkCIDR := pflag.String("network-cidr", "", "Set to override the default network CIDR")
|
||||||
|
|
||||||
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
|
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
|
||||||
|
|
||||||
image := pflag.String("image", "", "Image to use")
|
image := pflag.String("image", "", "Image to use")
|
||||||
|
@ -71,6 +140,11 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *clusterName == "" {
|
||||||
|
glog.Errorf("--name is required")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
statePath, err := vfs.Context.BuildVfsPath(*stateLocation)
|
statePath, err := vfs.Context.BuildVfsPath(*stateLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error building state location: %v", err)
|
glog.Errorf("error building state location: %v", err)
|
||||||
|
@ -81,7 +155,7 @@ func main() {
|
||||||
*outDir = "out"
|
*outDir = "out"
|
||||||
}
|
}
|
||||||
|
|
||||||
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
|
stateStore, err := fi.NewVFSStateStore(statePath, *clusterName, isDryrun)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error building state store: %v", err)
|
glog.Errorf("error building state store: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -118,7 +192,7 @@ func main() {
|
||||||
nodes = append(nodes, group)
|
nodes = append(nodes, group)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
createEtcdCluster := false
|
|
||||||
if *masterZones == "" {
|
if *masterZones == "" {
|
||||||
if len(masters) == 0 {
|
if len(masters) == 0 {
|
||||||
// Default to putting into every zone
|
// Default to putting into every zone
|
||||||
|
@ -133,7 +207,6 @@ func main() {
|
||||||
instanceGroups = append(instanceGroups, g)
|
instanceGroups = append(instanceGroups, g)
|
||||||
masters = append(masters, g)
|
masters = append(masters, g)
|
||||||
}
|
}
|
||||||
createEtcdCluster = true
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if len(masters) == 0 {
|
if len(masters) == 0 {
|
||||||
|
@ -147,7 +220,6 @@ func main() {
|
||||||
instanceGroups = append(instanceGroups, g)
|
instanceGroups = append(instanceGroups, g)
|
||||||
masters = append(masters, g)
|
masters = append(masters, g)
|
||||||
}
|
}
|
||||||
createEtcdCluster = true
|
|
||||||
} else {
|
} else {
|
||||||
// This is hard, because of the etcd cluster
|
// This is hard, because of the etcd cluster
|
||||||
glog.Errorf("Cannot change master-zones from the CLI")
|
glog.Errorf("Cannot change master-zones from the CLI")
|
||||||
|
@ -155,7 +227,7 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if createEtcdCluster {
|
if len(cluster.Spec.EtcdClusters) == 0 {
|
||||||
zones := sets.NewString()
|
zones := sets.NewString()
|
||||||
for _, group := range instanceGroups {
|
for _, group := range instanceGroups {
|
||||||
for _, zone := range group.Spec.Zones {
|
for _, zone := range group.Spec.Zones {
|
||||||
|
@ -165,7 +237,7 @@ func main() {
|
||||||
etcdZones := zones.List()
|
etcdZones := zones.List()
|
||||||
if (len(etcdZones) % 2) == 0 {
|
if (len(etcdZones) % 2) == 0 {
|
||||||
// Not technically a requirement, but doesn't really make sense to allow
|
// Not technically a requirement, but doesn't really make sense to allow
|
||||||
glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zone and --master-zone to declare node zones and master zones separately.")
|
glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,6 +307,34 @@ func main() {
|
||||||
cluster.Spec.KubernetesVersion = *kubernetesVersion
|
cluster.Spec.KubernetesVersion = *kubernetesVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *vpcID != "" {
|
||||||
|
cluster.Spec.NetworkID = *vpcID
|
||||||
|
}
|
||||||
|
|
||||||
|
if *networkCIDR != "" {
|
||||||
|
cluster.Spec.NetworkCIDR = *networkCIDR
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
|
||||||
|
glog.Errorf("Must specify NetworkCIDR when VPC is set")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.Spec.CloudProvider == "" {
|
||||||
|
for _, zone := range cluster.Spec.Zones {
|
||||||
|
cloud := zonesToCloud[zone.Name]
|
||||||
|
if cloud != "" {
|
||||||
|
glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
|
||||||
|
cluster.Spec.CloudProvider = string(cloud)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *sshPublicKey != "" {
|
||||||
|
*sshPublicKey = utils.ExpandPath(*sshPublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
err = cluster.PerformAssignments()
|
err = cluster.PerformAssignments()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error populating configuration: %v", err)
|
glog.Errorf("error populating configuration: %v", err)
|
||||||
|
@ -252,10 +352,6 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *sshPublicKey != "" {
|
|
||||||
*sshPublicKey = utils.ExpandPath(*sshPublicKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := &cloudup.CreateClusterCmd{
|
cmd := &cloudup.CreateClusterCmd{
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
InstanceGroups: instanceGroups,
|
InstanceGroups: instanceGroups,
|
||||||
|
@ -267,7 +363,6 @@ func main() {
|
||||||
SSHPublicKey: *sshPublicKey,
|
SSHPublicKey: *sshPublicKey,
|
||||||
OutDir: *outDir,
|
OutDir: *outDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
//if *configFile != "" {
|
//if *configFile != "" {
|
||||||
// //confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
|
// //confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
|
||||||
// err := cmd.LoadConfig(configFile)
|
// err := cmd.LoadConfig(configFile)
|
||||||
|
|
|
@ -12,8 +12,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type AddonsGetCmd struct {
|
type AddonsGetCmd struct {
|
||||||
ClusterName string
|
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
cobraCommand *cobra.Command
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,8 +27,6 @@ func init() {
|
||||||
cmd := addonsGetCmd.cobraCommand
|
cmd := addonsGetCmd.cobraCommand
|
||||||
addonsCmd.cobraCommand.AddCommand(cmd)
|
addonsCmd.cobraCommand.AddCommand(cmd)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&addonsGetCmd.ClusterName, "name", "", "cluster name")
|
|
||||||
|
|
||||||
cmd.Run = func(cmd *cobra.Command, args []string) {
|
cmd.Run = func(cmd *cobra.Command, args []string) {
|
||||||
err := addonsGetCmd.Run()
|
err := addonsGetCmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -15,9 +15,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeleteClusterCmd struct {
|
type DeleteClusterCmd struct {
|
||||||
ClusterName string
|
Yes bool
|
||||||
Yes bool
|
Region string
|
||||||
Region string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var deleteCluster DeleteClusterCmd
|
var deleteCluster DeleteClusterCmd
|
||||||
|
@ -39,7 +38,6 @@ func init() {
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&deleteCluster.Yes, "yes", false, "Delete without confirmation")
|
cmd.Flags().BoolVar(&deleteCluster.Yes, "yes", false, "Delete without confirmation")
|
||||||
|
|
||||||
cmd.Flags().StringVar(&deleteCluster.ClusterName, "name", "", "cluster name")
|
|
||||||
cmd.Flags().StringVar(&deleteCluster.Region, "region", "", "region")
|
cmd.Flags().StringVar(&deleteCluster.Region, "region", "", "region")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,11 +47,12 @@ func (c *DeleteClusterCmd) Run() error {
|
||||||
if c.Region == "" {
|
if c.Region == "" {
|
||||||
return fmt.Errorf("--region is required")
|
return fmt.Errorf("--region is required")
|
||||||
}
|
}
|
||||||
if c.ClusterName == "" {
|
clusterName := rootCommand.clusterName
|
||||||
|
if clusterName == "" {
|
||||||
return fmt.Errorf("--name is required")
|
return fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"KubernetesCluster": c.ClusterName}
|
tags := map[string]string{"KubernetesCluster": clusterName}
|
||||||
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||||
|
@ -61,7 +60,7 @@ func (c *DeleteClusterCmd) Run() error {
|
||||||
|
|
||||||
d := &kutil.DeleteCluster{}
|
d := &kutil.DeleteCluster{}
|
||||||
|
|
||||||
d.ClusterName = c.ClusterName
|
d.ClusterName = clusterName
|
||||||
d.Region = c.Region
|
d.Region = c.Region
|
||||||
d.Cloud = cloud
|
d.Cloud = cloud
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// editCmd represents the edit command
|
||||||
|
var editCmd = &cobra.Command{
|
||||||
|
Use: "edit",
|
||||||
|
Short: "edit clusters",
|
||||||
|
Long: `edit clusters`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCommand.AddCommand(editCmd)
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"bytes"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"k8s.io/kubernetes/pkg/kubectl/cmd/util/editor"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var editorEnvs = []string{"KUBE_EDITOR", "EDITOR"}
|
||||||
|
|
||||||
|
type EditClusterCmd struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var editClusterCmd EditClusterCmd
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "cluster",
|
||||||
|
Short: "Edit cluster",
|
||||||
|
Long: `Edit a cluster configuration.`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := editClusterCmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
glog.Exitf("%v", err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
editCmd.AddCommand(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *EditClusterCmd) Run() error {
|
||||||
|
stateStore, err := rootCommand.StateStore()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//cluster, _, err := api.ReadConfig(stateStore)
|
||||||
|
//if err != nil {
|
||||||
|
// return fmt.Errorf("error reading configuration: %v", err)
|
||||||
|
//}
|
||||||
|
|
||||||
|
var (
|
||||||
|
edit = editor.NewDefaultEditor(editorEnvs)
|
||||||
|
)
|
||||||
|
|
||||||
|
ext := "yaml"
|
||||||
|
|
||||||
|
raw, err := stateStore.VFSPath().Join("config").ReadFile()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// launch the editor
|
||||||
|
edited, file, err := edit.LaunchTempFile(fmt.Sprintf("%s-edit-", filepath.Base(os.Args[0])), ext, bytes.NewReader(raw))
|
||||||
|
defer func() {
|
||||||
|
if file != "" {
|
||||||
|
os.Remove(file)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error launching editor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(edited, raw) {
|
||||||
|
fmt.Fprintln(os.Stderr, "Edit cancelled, no changes made.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = stateStore.VFSPath().Join("config").WriteFile(edited)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -10,8 +10,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExportClusterCmd struct {
|
type ExportClusterCmd struct {
|
||||||
ClusterName string
|
Region string
|
||||||
Region string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var exportCluster ExportClusterCmd
|
var exportCluster ExportClusterCmd
|
||||||
|
@ -31,7 +30,6 @@ func init() {
|
||||||
|
|
||||||
exportCmd.AddCommand(cmd)
|
exportCmd.AddCommand(cmd)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&exportCluster.ClusterName, "name", "", "cluster name")
|
|
||||||
cmd.Flags().StringVar(&exportCluster.Region, "region", "", "region")
|
cmd.Flags().StringVar(&exportCluster.Region, "region", "", "region")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,11 +37,12 @@ func (c *ExportClusterCmd) Run() error {
|
||||||
if c.Region == "" {
|
if c.Region == "" {
|
||||||
return fmt.Errorf("--region is required")
|
return fmt.Errorf("--region is required")
|
||||||
}
|
}
|
||||||
if c.ClusterName == "" {
|
clusterName := rootCommand.clusterName
|
||||||
|
if clusterName == "" {
|
||||||
return fmt.Errorf("--name is required")
|
return fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"KubernetesCluster": c.ClusterName}
|
tags := map[string]string{"KubernetesCluster": clusterName}
|
||||||
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||||
|
@ -55,7 +54,7 @@ func (c *ExportClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &kutil.ExportCluster{}
|
d := &kutil.ExportCluster{}
|
||||||
d.ClusterName = c.ClusterName
|
d.ClusterName = clusterName
|
||||||
d.Cloud = cloud
|
d.Cloud = cloud
|
||||||
d.StateStore = stateStore
|
d.StateStore = stateStore
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type RollingUpdateClusterCmd struct {
|
type RollingUpdateClusterCmd struct {
|
||||||
ClusterName string
|
Yes bool
|
||||||
Yes bool
|
Region string
|
||||||
Region string
|
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
cobraCommand *cobra.Command
|
||||||
}
|
}
|
||||||
|
@ -34,7 +33,6 @@ func init() {
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&rollingupdateCluster.Yes, "yes", false, "Rollingupdate without confirmation")
|
cmd.Flags().BoolVar(&rollingupdateCluster.Yes, "yes", false, "Rollingupdate without confirmation")
|
||||||
|
|
||||||
cmd.Flags().StringVar(&rollingupdateCluster.ClusterName, "name", "", "cluster name")
|
|
||||||
cmd.Flags().StringVar(&rollingupdateCluster.Region, "region", "", "region")
|
cmd.Flags().StringVar(&rollingupdateCluster.Region, "region", "", "region")
|
||||||
|
|
||||||
cmd.Run = func(cmd *cobra.Command, args []string) {
|
cmd.Run = func(cmd *cobra.Command, args []string) {
|
||||||
|
@ -49,11 +47,12 @@ func (c *RollingUpdateClusterCmd) Run() error {
|
||||||
if c.Region == "" {
|
if c.Region == "" {
|
||||||
return fmt.Errorf("--region is required")
|
return fmt.Errorf("--region is required")
|
||||||
}
|
}
|
||||||
if c.ClusterName == "" {
|
clusterName := rootCommand.clusterName
|
||||||
|
if clusterName == "" {
|
||||||
return fmt.Errorf("--name is required")
|
return fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"KubernetesCluster": c.ClusterName}
|
tags := map[string]string{"KubernetesCluster": clusterName}
|
||||||
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||||
|
@ -61,7 +60,7 @@ func (c *RollingUpdateClusterCmd) Run() error {
|
||||||
|
|
||||||
d := &kutil.RollingUpdateCluster{}
|
d := &kutil.RollingUpdateCluster{}
|
||||||
|
|
||||||
d.ClusterName = c.ClusterName
|
d.ClusterName = clusterName
|
||||||
d.Region = c.Region
|
d.Region = c.Region
|
||||||
d.Cloud = cloud
|
d.Cloud = cloud
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ type RootCmd struct {
|
||||||
|
|
||||||
stateStore fi.StateStore
|
stateStore fi.StateStore
|
||||||
stateLocation string
|
stateLocation string
|
||||||
|
clusterName string
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
cobraCommand *cobra.Command
|
||||||
}
|
}
|
||||||
|
@ -45,7 +46,11 @@ func init() {
|
||||||
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
|
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
|
||||||
|
|
||||||
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.upup.yaml)")
|
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.upup.yaml)")
|
||||||
cmd.PersistentFlags().StringVarP(&rootCommand.stateLocation, "state", "", "", "Location of state storage")
|
|
||||||
|
defaultStateStore := os.Getenv("KOPS_STATE_STORE")
|
||||||
|
cmd.PersistentFlags().StringVarP(&rootCommand.stateLocation, "state", "", defaultStateStore, "Location of state storage")
|
||||||
|
|
||||||
|
cmd.PersistentFlags().StringVarP(&rootCommand.clusterName, "name", "", "", "Name of cluster")
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConfig reads in config file and ENV variables if set.
|
// initConfig reads in config file and ENV variables if set.
|
||||||
|
@ -76,6 +81,9 @@ func (c *RootCmd) StateStore() (fi.StateStore, error) {
|
||||||
if c.stateLocation == "" {
|
if c.stateLocation == "" {
|
||||||
return nil, fmt.Errorf("--state is required")
|
return nil, fmt.Errorf("--state is required")
|
||||||
}
|
}
|
||||||
|
if c.clusterName == "" {
|
||||||
|
return nil, fmt.Errorf("--name is required")
|
||||||
|
}
|
||||||
|
|
||||||
statePath, err := vfs.Context.BuildVfsPath(c.stateLocation)
|
statePath, err := vfs.Context.BuildVfsPath(c.stateLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -83,7 +91,7 @@ func (c *RootCmd) StateStore() (fi.StateStore, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
isDryrun := false
|
isDryrun := false
|
||||||
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
|
stateStore, err := fi.NewVFSStateStore(statePath, c.clusterName, isDryrun)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error building state store: %v", err)
|
return nil, fmt.Errorf("error building state store: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,71 +1,60 @@
|
||||||
## Running in a shared VPC
|
## Running in a shared VPC
|
||||||
|
|
||||||
CloudUp is actually driven by a configuration file, stored in your state directory (`./state/config`) by default.
|
|
||||||
|
|
||||||
To build a cluster in an existing VPC, you'll need to configure the config file with the extra information
|
|
||||||
(the CLI flags just act as shortcuts to configuring the config file manually, editing the config file is "expert mode").
|
|
||||||
|
|
||||||
When launching into a shared VPC, the VPC & the Internet Gateway will be reused, but we create a new subnet per zone,
|
When launching into a shared VPC, the VPC & the Internet Gateway will be reused, but we create a new subnet per zone,
|
||||||
and a new route table.
|
and a new route table.
|
||||||
|
|
||||||
Use cloudup in `--dryrun` mode to create a base configuration file:
|
Use cloudup with the `--vpc` and `--network-cidr` arguments for your existing VPC, with --dryrun so we can see the
|
||||||
|
config before we apply it.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
cloudup --cloud=aws --zones=us-east-1b --name=<mycluster.mydomain.com> --node-size=t2.medium --master-size=t2.medium --node-count=2 --dryrun
|
export CLUSTER_NAME=<sharedvpc.mydomain.com>
|
||||||
|
cloudup --zones=us-east-1b --state s3://clusters.awsdata.com/${CLUSTER_NAME} --name=${CLUSTER_NAME} \
|
||||||
|
--vpc=vpc-a80734c1 --network-cidr=10.100.0.0/16 --dryrun
|
||||||
```
|
```
|
||||||
|
|
||||||
Now edit your `./state/config' file. It will probably look like this:
|
Then `upup edit cluster --state s3://clusters.awsdata.com/${CLUSTER_NAME}` should show you something like:
|
||||||
|
|
||||||
```
|
```
|
||||||
CloudProvider: aws
|
metadata:
|
||||||
ClusterName: <mycluster.mydomain.com>
|
creationTimestamp: "2016-06-27T14:23:34Z"
|
||||||
MasterMachineType: t2.medium
|
name: ${CLUSTER_NAME}
|
||||||
MasterZones:
|
spec:
|
||||||
- us-east-1b
|
cloudProvider: aws
|
||||||
NetworkCIDR: 172.22.0.0/16
|
networkCIDR: 10.100.0.0/16
|
||||||
NodeCount: 2
|
networkID: vpc-a80734c1
|
||||||
NodeMachineType: t2.medium
|
nonMasqueradeCIDR: 100.64.0.0/10
|
||||||
NodeZones:
|
zones:
|
||||||
- cidr: 172.22.0.0/19
|
- cidr: 10.100.32.0/19
|
||||||
name: us-east-1b
|
name: eu-central-1a
|
||||||
```
|
```
|
||||||
|
|
||||||
You need to specify your VPC id, which is called NetworkID. You likely also need to update NetworkCIDR to match whatever value your existing VPC is using,
|
|
||||||
and you likely need to set the CIDR on each of the NodeZones, because subnets in a VPC cannot overlap. For example:
|
Verify that networkCIDR & networkID match your VPC CIDR & ID. You likely need to set the CIDR on each of the Zones,
|
||||||
|
because subnets in a VPC cannot overlap.
|
||||||
|
|
||||||
|
|
||||||
|
You can then run cloudup again in dryrun mode (you don't need any arguments, because they're all in the config file):
|
||||||
|
|
||||||
```
|
```
|
||||||
CloudProvider: aws
|
cloudup --dryrun --state s3://clusters.awsdata.com/${CLUSTER_NAME}
|
||||||
ClusterName: cluster2.awsdata.com
|
|
||||||
MasterMachineType: t2.medium
|
|
||||||
MasterZones:
|
|
||||||
- us-east-1b
|
|
||||||
NetworkID: vpc-10f95a77
|
|
||||||
NetworkCIDR: 172.22.0.0/16
|
|
||||||
NodeCount: 2
|
|
||||||
NodeMachineType: t2.medium
|
|
||||||
NodeZones:
|
|
||||||
- cidr: 172.22.224.0/19
|
|
||||||
name: us-east-1b
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then run cloudup in dryrun mode (you don't need any arguments, because they're all in the config file):
|
Review the changes to make sure they are OK - the Kubernetes settings might not be ones you want on a shared VPC (in which case,
|
||||||
|
|
||||||
```
|
|
||||||
cloudup --dryrun
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see that your VPC changes from `Shared <nil> -> true`, and you should review them to make sure
|
|
||||||
that the changes are OK - the Kubernetes settings might not be ones you want on a shared VPC (in which case,
|
|
||||||
open an issue!)
|
open an issue!)
|
||||||
|
|
||||||
|
Note also the Kubernetes VPCs (currently) require `EnableDNSHostnames=true`. Cloudup will detect the required change,
|
||||||
|
but refuse to make it automatically because it is a shared VPC. Please review the implications and make the change
|
||||||
|
to the VPC manually.
|
||||||
|
|
||||||
Once you're happy, you can create the cluster using:
|
Once you're happy, you can create the cluster using:
|
||||||
|
|
||||||
```
|
```
|
||||||
cloudup
|
cloudup --state s3://clusters.awsdata.com/${CLUSTER_NAME}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Finally, if your shared VPC has a KubernetesCluster tag (because it was created with cloudup), you should
|
Finally, if your shared VPC has a KubernetesCluster tag (because it was created with cloudup), you should
|
||||||
probably remove that tag to indicate to indicate that the resources are not owned by that cluster, and so
|
probably remove that tag to indicate to indicate that the resources are not owned by that cluster, and so
|
||||||
deleting the cluster won't try to delete the VPC. (Deleting the VPC won't succeed anyway, because it's in use,
|
deleting the cluster won't try to delete the VPC. (Deleting the VPC won't succeed anyway, because it's in use,
|
||||||
but it's better to avoid the later confusion!)
|
but it's better to avoid the later confusion!)
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
KubeDNS:
|
|
||||||
Replicas: 1
|
|
||||||
ServerIP: 10.0.0.10
|
|
||||||
Domain: cluster.local
|
|
|
@ -1,28 +0,0 @@
|
||||||
#InstancePrefix: kubernetes
|
|
||||||
Multizone: true
|
|
||||||
|
|
||||||
ServiceClusterIPRange: 10.0.0.0/16
|
|
||||||
ClusterIPRange: 10.244.0.0/16
|
|
||||||
MasterIPRange: 10.246.0.0/24
|
|
||||||
NetworkProvider: none
|
|
||||||
|
|
||||||
AdmissionControl: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,PersistentVolumeLabel
|
|
||||||
|
|
||||||
#EnableClusterMonitoring: none
|
|
||||||
#EnableL7LoadBalancing: none
|
|
||||||
#EnableClusterUI: true
|
|
||||||
|
|
||||||
#EnableClusterDNS: true
|
|
||||||
#DNSReplicas: 1
|
|
||||||
#DNSServerIP: 100.64.0.10
|
|
||||||
DNSDomain: cluster.local
|
|
||||||
|
|
||||||
#EnableClusterLogging: true
|
|
||||||
#EnableNodeLogging: true
|
|
||||||
#LoggingDestination: elasticsearch
|
|
||||||
#ElasticsearchLoggingReplicas: 1
|
|
||||||
|
|
||||||
#MasterVolumeSize: 20
|
|
||||||
|
|
||||||
|
|
||||||
KubeUser: admin
|
|
|
@ -5,7 +5,7 @@ keypair/master:
|
||||||
- kubernetes
|
- kubernetes
|
||||||
- kubernetes.default
|
- kubernetes.default
|
||||||
- kubernetes.default.svc
|
- kubernetes.default.svc
|
||||||
- kubernetes.default.svc.{{ .DNSDomain }}
|
- kubernetes.default.svc.{{ .ClusterDNSDomain }}
|
||||||
- "{{ .MasterPublicName }}"
|
- "{{ .MasterPublicName }}"
|
||||||
- "{{ .MasterInternalName }}"
|
- "{{ .MasterInternalName }}"
|
||||||
- "{{ WellKnownServiceIP 1 }}"
|
- "{{ WellKnownServiceIP 1 }}"
|
||||||
|
|
|
@ -6,7 +6,7 @@ KubeAPIServer:
|
||||||
EtcdServers: http://127.0.0.1:4001
|
EtcdServers: http://127.0.0.1:4001
|
||||||
EtcdServersOverrides: /events#http://127.0.0.1:4002
|
EtcdServersOverrides: /events#http://127.0.0.1:4002
|
||||||
AdmissionControl: NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,PersistentVolumeLabel
|
AdmissionControl: NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,PersistentVolumeLabel
|
||||||
ServiceClusterIPRange: 10.0.0.0/16
|
ServiceClusterIPRange: {{ .ServiceClusterIPRange }}
|
||||||
ClientCAFile: /srv/kubernetes/ca.crt
|
ClientCAFile: /srv/kubernetes/ca.crt
|
||||||
BasicAuthFile: /srv/kubernetes/basic_auth.csv
|
BasicAuthFile: /srv/kubernetes/basic_auth.csv
|
||||||
TLSCertFile: /srv/kubernetes/server.cert
|
TLSCertFile: /srv/kubernetes/server.cert
|
|
@ -1,7 +1,6 @@
|
||||||
KubeControllerManager:
|
KubeControllerManager:
|
||||||
PathSrvKubernetes: /srv/kubernetes
|
PathSrvKubernetes: /srv/kubernetes
|
||||||
Master: 127.0.0.1:8080
|
Master: 127.0.0.1:8080
|
||||||
ClusterCIDR: 10.244.0.0/16
|
|
||||||
AllocateNodeCIDRs: true
|
AllocateNodeCIDRs: true
|
||||||
ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key
|
ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key
|
||||||
LogLevel: 2
|
LogLevel: 2
|
|
@ -0,0 +1,4 @@
|
||||||
|
KubeDNS:
|
||||||
|
Replicas: 1
|
||||||
|
ServerIP: {{ WellKnownServiceIP 10 }}
|
||||||
|
Domain: {{ .ClusterDNSDomain }}
|
|
@ -1,4 +1,3 @@
|
||||||
Kubelet:
|
Kubelet:
|
||||||
CloudProvider: aws
|
CloudProvider: aws
|
||||||
CgroupRoot: docker
|
CgroupRoot: docker
|
||||||
NonMasqueradeCidr: 10.0.0.0/8
|
|
|
@ -3,11 +3,12 @@ Kubelet:
|
||||||
Config: /etc/kubernetes/manifests
|
Config: /etc/kubernetes/manifests
|
||||||
AllowPrivileged: true
|
AllowPrivileged: true
|
||||||
LogLevel: 2
|
LogLevel: 2
|
||||||
ClusterDNS: 10.0.0.10
|
ClusterDNS: {{ WellKnownServiceIP 10 }}
|
||||||
ClusterDomain: cluster.local
|
ClusterDomain: {{ .ClusterDNSDomain }}
|
||||||
ConfigureCBR0: true
|
ConfigureCBR0: true
|
||||||
BabysitDaemons: true
|
BabysitDaemons: true
|
||||||
APIServers: https://{{ .MasterInternalName }}
|
APIServers: https://{{ .MasterInternalName }}
|
||||||
|
NonMasqueradeCIDR: {{ .NonMasqueradeCIDR }}
|
||||||
|
|
||||||
MasterKubelet:
|
MasterKubelet:
|
||||||
RegisterSchedulable: false
|
RegisterSchedulable: false
|
|
@ -0,0 +1,6 @@
|
||||||
|
Multizone: true
|
||||||
|
|
||||||
|
ClusterDNSDomain: cluster.local
|
||||||
|
|
||||||
|
KubeUser: admin
|
||||||
|
|
|
@ -8,10 +8,11 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: kope-aws
|
- name: kope-aws
|
||||||
image: kope/aws-controller
|
image: kope/aws-controller:1.3
|
||||||
command:
|
command:
|
||||||
- /usr/bin/aws-controller
|
- /usr/bin/aws-controller
|
||||||
- --healthz-port=10245
|
- -healthz-port=10245
|
||||||
- --zone-name={{ .DNSZone }}
|
- -zone-name={{ .DNSZone }}
|
||||||
|
- -v=4
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{{ range $m := $etcd.Members }}
|
{{ range $m := $etcd.Members }}
|
||||||
|
|
||||||
# EBS volume for each member of the each etcd cluster
|
# EBS volume for each member of the each etcd cluster
|
||||||
ebsVolume/{{$m.Name}}.{{$etcd.Name}}.{{ ClusterName }}:
|
ebsVolume/{{$m.Name}}.etcd-{{$etcd.Name}}.{{ ClusterName }}:
|
||||||
availabilityZone: {{ $m.Zone }}
|
availabilityZone: {{ $m.Zone }}
|
||||||
sizeGB: {{ or $m.VolumeSize 20 }}
|
sizeGB: {{ or $m.VolumeSize 20 }}
|
||||||
volumeType: {{ or $m.VolumeType "gp2" }}
|
volumeType: {{ or $m.VolumeType "gp2" }}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
|
@ -60,8 +61,14 @@ type ClusterSpec struct {
|
||||||
ConfigStore string `json:"configStore,omitempty"`
|
ConfigStore string `json:"configStore,omitempty"`
|
||||||
|
|
||||||
// DNSZone is the DNS zone we should use when configuring DNS
|
// DNSZone is the DNS zone we should use when configuring DNS
|
||||||
|
// This is because some clouds let us define a managed zone foo.bar, and then have
|
||||||
|
// kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone.
|
||||||
|
// DNSZone will probably be a suffix of the MasterPublicName and MasterInternalName
|
||||||
DNSZone string `json:"dnsZone,omitempty"`
|
DNSZone string `json:"dnsZone,omitempty"`
|
||||||
|
|
||||||
|
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
|
||||||
|
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
|
||||||
|
|
||||||
//InstancePrefix string `json:",omitempty"`
|
//InstancePrefix string `json:",omitempty"`
|
||||||
|
|
||||||
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
|
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
|
||||||
|
@ -69,15 +76,18 @@ type ClusterSpec struct {
|
||||||
|
|
||||||
//AllocateNodeCIDRs *bool `json:"allocateNodeCIDRs,omitempty"`
|
//AllocateNodeCIDRs *bool `json:"allocateNodeCIDRs,omitempty"`
|
||||||
|
|
||||||
Multizone *bool `json:"mutlizone,omitempty"`
|
Multizone *bool `json:"multizone,omitempty"`
|
||||||
|
|
||||||
//ClusterIPRange string `json:",omitempty"`
|
//ClusterIPRange string `json:",omitempty"`
|
||||||
|
|
||||||
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
|
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
|
||||||
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
|
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
|
||||||
//MasterIPRange string `json:",omitempty"`
|
//MasterIPRange string `json:",omitempty"`
|
||||||
//NonMasqueradeCidr string `json:",omitempty"`
|
|
||||||
//
|
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
|
||||||
|
// It cannot overlap ServiceClusterIPRange
|
||||||
|
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
|
||||||
|
|
||||||
//NetworkProvider string `json:",omitempty"`
|
//NetworkProvider string `json:",omitempty"`
|
||||||
//
|
//
|
||||||
//HairpinMode string `json:",omitempty"`
|
//HairpinMode string `json:",omitempty"`
|
||||||
|
@ -94,9 +104,6 @@ type ClusterSpec struct {
|
||||||
//DNSReplicas int `json:",omitempty"`
|
//DNSReplicas int `json:",omitempty"`
|
||||||
//DNSServerIP string `json:",omitempty"`
|
//DNSServerIP string `json:",omitempty"`
|
||||||
|
|
||||||
// DNSDomain is the suffix we use for internal DNS names (normally cluster.local)
|
|
||||||
DNSDomain string `json:"dnsDomain,omitempty"`
|
|
||||||
|
|
||||||
//EnableClusterLogging *bool `json:",omitempty"`
|
//EnableClusterLogging *bool `json:",omitempty"`
|
||||||
//EnableNodeLogging *bool `json:",omitempty"`
|
//EnableNodeLogging *bool `json:",omitempty"`
|
||||||
//LoggingDestination string `json:",omitempty"`
|
//LoggingDestination string `json:",omitempty"`
|
||||||
|
@ -241,11 +248,15 @@ type ClusterZoneSpec struct {
|
||||||
// For example, it assigns stable Keys to NodeSets & Masters, and
|
// For example, it assigns stable Keys to NodeSets & Masters, and
|
||||||
// it assigns CIDRs to subnets
|
// it assigns CIDRs to subnets
|
||||||
func (c *Cluster) PerformAssignments() error {
|
func (c *Cluster) PerformAssignments() error {
|
||||||
if c.Spec.NetworkCIDR == "" {
|
if c.Spec.NetworkCIDR == "" && !c.SharedVPC() {
|
||||||
// TODO: Choose non-overlapping networking CIDRs for VPCs?
|
// TODO: Choose non-overlapping networking CIDRs for VPCs?
|
||||||
c.Spec.NetworkCIDR = "172.20.0.0/16"
|
c.Spec.NetworkCIDR = "172.20.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Spec.NonMasqueradeCIDR == "" {
|
||||||
|
c.Spec.NonMasqueradeCIDR = "100.64.0.0/10"
|
||||||
|
}
|
||||||
|
|
||||||
for _, zone := range c.Spec.Zones {
|
for _, zone := range c.Spec.Zones {
|
||||||
err := zone.performAssignments(c)
|
err := zone.performAssignments(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -333,18 +344,38 @@ func (c *Cluster) SharedVPC() bool {
|
||||||
|
|
||||||
// CloudPermissions holds IAM-style permissions
|
// CloudPermissions holds IAM-style permissions
|
||||||
type CloudPermissions struct {
|
type CloudPermissions struct {
|
||||||
S3Buckets []string `json:"s3Buckets,omitempty"`
|
Permissions []*CloudPermission `json:"permissions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloudPermission holds a single IAM-style permission
|
||||||
|
type CloudPermission struct {
|
||||||
|
Resource string `json:"resource,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddS3Bucket adds a bucket if it does not already exist
|
// AddS3Bucket adds a bucket if it does not already exist
|
||||||
func (p *CloudPermissions) AddS3Bucket(bucket string) {
|
func (p *CloudPermissions) AddS3Bucket(bucket string) {
|
||||||
for _, b := range p.S3Buckets {
|
for _, p := range p.Permissions {
|
||||||
if b == bucket {
|
if p.Resource == "s3://"+bucket {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p.S3Buckets = append(p.S3Buckets, bucket)
|
p.Permissions = append(p.Permissions, &CloudPermission{
|
||||||
|
Resource: "s3://" + bucket,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// S3Buckets returns each of the S3 buckets in the permission
|
||||||
|
// TODO: Replace with something generic (probably we should just generate the permission)
|
||||||
|
func (p *CloudPermissions) S3Buckets() []string {
|
||||||
|
var buckets []string
|
||||||
|
for _, p := range p.Permissions {
|
||||||
|
if strings.HasPrefix(p.Resource, "s3://") {
|
||||||
|
buckets = append(buckets, strings.TrimPrefix(p.Resource, "s3://"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -0,0 +1,189 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Cluster) Validate() error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if c.Spec.Kubelet == nil {
|
||||||
|
return fmt.Errorf("Kubelet not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.MasterKubelet == nil {
|
||||||
|
return fmt.Errorf("MasterKubelet not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeControllerManager == nil {
|
||||||
|
return fmt.Errorf("KubeControllerManager not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeDNS == nil {
|
||||||
|
return fmt.Errorf("KubeDNS not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.Kubelet == nil {
|
||||||
|
return fmt.Errorf("Kubelet not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeAPIServer == nil {
|
||||||
|
return fmt.Errorf("KubeAPIServer not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeProxy == nil {
|
||||||
|
return fmt.Errorf("KubeProxy not configured")
|
||||||
|
}
|
||||||
|
if c.Spec.Docker == nil {
|
||||||
|
return fmt.Errorf("Docker not configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check NetworkCIDR
|
||||||
|
var networkCIDR *net.IPNet
|
||||||
|
{
|
||||||
|
if c.Spec.NetworkCIDR == "" {
|
||||||
|
return fmt.Errorf("Cluster did not have NetworkCIDR set")
|
||||||
|
}
|
||||||
|
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Cluster had an invalid NetworkCIDR: %q", c.Spec.NetworkCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check NonMasqueradeCIDR
|
||||||
|
var nonMasqueradeCIDR *net.IPNet
|
||||||
|
{
|
||||||
|
if c.Spec.NonMasqueradeCIDR == "" {
|
||||||
|
return fmt.Errorf("Cluster did not have NonMasqueradeCIDR set")
|
||||||
|
}
|
||||||
|
_, nonMasqueradeCIDR, err = net.ParseCIDR(c.Spec.NonMasqueradeCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Cluster had an invalid NonMasqueradeCIDR: %q", c.Spec.NonMasqueradeCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if subnetsOverlap(nonMasqueradeCIDR, networkCIDR) {
|
||||||
|
return fmt.Errorf("NonMasqueradeCIDR %q cannot overlap with NetworkCIDR %q", c.Spec.NonMasqueradeCIDR, c.Spec.NetworkCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.Kubelet.NonMasqueradeCIDR != c.Spec.NonMasqueradeCIDR {
|
||||||
|
return fmt.Errorf("Kubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR")
|
||||||
|
}
|
||||||
|
if c.Spec.MasterKubelet.NonMasqueradeCIDR != c.Spec.NonMasqueradeCIDR {
|
||||||
|
return fmt.Errorf("MasterKubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check ServiceClusterIPRange
|
||||||
|
var serviceClusterIPRange *net.IPNet
|
||||||
|
{
|
||||||
|
if c.Spec.ServiceClusterIPRange == "" {
|
||||||
|
return fmt.Errorf("Cluster did not have ServiceClusterIPRange set")
|
||||||
|
}
|
||||||
|
_, serviceClusterIPRange, err = net.ParseCIDR(c.Spec.ServiceClusterIPRange)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Cluster had an invalid ServiceClusterIPRange: %q", c.Spec.ServiceClusterIPRange)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSubnet(nonMasqueradeCIDR, serviceClusterIPRange) {
|
||||||
|
return fmt.Errorf("ServiceClusterIPRange %q must be a subnet of NonMasqueradeCIDR %q", c.Spec.ServiceClusterIPRange, c.Spec.NonMasqueradeCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.KubeAPIServer.ServiceClusterIPRange != c.Spec.ServiceClusterIPRange {
|
||||||
|
return fmt.Errorf("KubeAPIServer ServiceClusterIPRange did not match cluster ServiceClusterIPRange")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check ClusterCIDR
|
||||||
|
var clusterCIDR *net.IPNet
|
||||||
|
{
|
||||||
|
if c.Spec.KubeControllerManager.ClusterCIDR == "" {
|
||||||
|
return fmt.Errorf("Cluster did not have KubeControllerManager.ClusterCIDR set")
|
||||||
|
}
|
||||||
|
_, clusterCIDR, err = net.ParseCIDR(c.Spec.KubeControllerManager.ClusterCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Cluster had an invalid KubeControllerManager.ClusterCIDR: %q", c.Spec.KubeControllerManager.ClusterCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSubnet(nonMasqueradeCIDR, clusterCIDR) {
|
||||||
|
return fmt.Errorf("KubeControllerManager.ClusterCIDR %q must be a subnet of NonMasqueradeCIDR %q", c.Spec.KubeControllerManager.ClusterCIDR, c.Spec.NonMasqueradeCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check KubeDNS.ServerIP
|
||||||
|
{
|
||||||
|
if c.Spec.KubeDNS.ServerIP == "" {
|
||||||
|
return fmt.Errorf("Cluster did not have KubeDNS.ServerIP set")
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsServiceIP := net.ParseIP(c.Spec.KubeDNS.ServerIP)
|
||||||
|
if dnsServiceIP == nil {
|
||||||
|
return fmt.Errorf("Cluster had an invalid KubeDNS.ServerIP: %q", c.Spec.KubeDNS.ServerIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !serviceClusterIPRange.Contains(dnsServiceIP) {
|
||||||
|
return fmt.Errorf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, c.Spec.KubeDNS.ServerIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||||
|
return fmt.Errorf("Kubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||||
|
}
|
||||||
|
if c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||||
|
return fmt.Errorf("MasterKubelet ClusterDNS did not match cluster KubeDNS.ServerIP")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check CloudProvider
|
||||||
|
{
|
||||||
|
if c.Spec.CloudProvider != "" {
|
||||||
|
if c.Spec.Kubelet.CloudProvider != c.Spec.CloudProvider {
|
||||||
|
return fmt.Errorf("Kubelet CloudProvider did not match cluster CloudProvider")
|
||||||
|
}
|
||||||
|
if c.Spec.MasterKubelet.CloudProvider != c.Spec.CloudProvider {
|
||||||
|
return fmt.Errorf("MasterKubelet CloudProvider did not match cluster CloudProvider")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeAPIServer.CloudProvider != c.Spec.CloudProvider {
|
||||||
|
return fmt.Errorf("Errorf CloudProvider did not match cluster CloudProvider")
|
||||||
|
}
|
||||||
|
if c.Spec.KubeControllerManager.CloudProvider != c.Spec.CloudProvider {
|
||||||
|
return fmt.Errorf("KubeControllerManager CloudProvider did not match cluster CloudProvider")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the zone CIDRs are all consistent
|
||||||
|
{
|
||||||
|
|
||||||
|
for _, z := range c.Spec.Zones {
|
||||||
|
if z.CIDR == "" {
|
||||||
|
return fmt.Errorf("Zone %q did not have a CIDR set", z.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, zoneCIDR, err := net.ParseCIDR(z.CIDR)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Zone %q had an invalid CIDR: %q", z.Name, z.CIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSubnet(networkCIDR, zoneCIDR) {
|
||||||
|
return fmt.Errorf("Zone %q had a CIDR %q that was not a subnet of the NetworkCIDR %q", z.Name, z.CIDR, c.Spec.NetworkCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSubnet checks if child is a subnet of parent
|
||||||
|
func isSubnet(parent *net.IPNet, child *net.IPNet) bool {
|
||||||
|
parentOnes, parentBits := parent.Mask.Size()
|
||||||
|
childOnes, childBits := child.Mask.Size()
|
||||||
|
if childBits != parentBits {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if parentOnes > childOnes {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
childMasked := child.IP.Mask(parent.Mask)
|
||||||
|
parentMasked := parent.IP.Mask(parent.Mask)
|
||||||
|
return childMasked.Equal(parentMasked)
|
||||||
|
}
|
||||||
|
|
||||||
|
// subnetsOverlap checks if two subnets overlap
|
||||||
|
func subnetsOverlap(l *net.IPNet, r *net.IPNet) bool {
|
||||||
|
return l.Contains(r.IP) || r.Contains(l.IP)
|
||||||
|
}
|
|
@ -96,7 +96,7 @@ func (_ *InternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Intern
|
||||||
if shared {
|
if shared {
|
||||||
// Verify the InternetGateway was found and matches our required settings
|
// Verify the InternetGateway was found and matches our required settings
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return fmt.Errorf("InternetGateway with id %q not found", fi.StringValue(e.ID))
|
return fmt.Errorf("InternetGateway for shared VPC was not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -2,6 +2,7 @@ package cloudup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -16,6 +17,7 @@ import (
|
||||||
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
||||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -144,6 +146,11 @@ func (c *CreateClusterCmd) Run() error {
|
||||||
return fmt.Errorf("must configure at least one Node InstanceGroup")
|
return fmt.Errorf("must configure at least one Node InstanceGroup")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = c.assignSubnets()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Check that instance groups are defined in valid zones
|
// Check that instance groups are defined in valid zones
|
||||||
{
|
{
|
||||||
clusterZones := make(map[string]*api.ClusterZoneSpec)
|
clusterZones := make(map[string]*api.ClusterZoneSpec)
|
||||||
|
@ -598,6 +605,11 @@ func (c *CreateClusterCmd) Run() error {
|
||||||
l.cluster.Spec = *completed
|
l.cluster.Spec = *completed
|
||||||
tf.cluster = l.cluster
|
tf.cluster = l.cluster
|
||||||
|
|
||||||
|
err = l.cluster.Validate()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Completed cluster failed validation: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
taskMap, err := l.BuildTasks(c.ModelStore, c.Models)
|
taskMap, err := l.BuildTasks(c.ModelStore, c.Models)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building tasks: %v", err)
|
return fmt.Errorf("error building tasks: %v", err)
|
||||||
|
@ -741,3 +753,49 @@ func (c *CreateClusterCmd) defaultImage() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CreateClusterCmd) assignSubnets() error {
|
||||||
|
cluster := c.Cluster
|
||||||
|
if cluster.Spec.NonMasqueradeCIDR == "" {
|
||||||
|
glog.Warningf("NonMasqueradeCIDR not set; can't auto-assign dependent subnets")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, nonMasqueradeCIDR, err := net.ParseCIDR(cluster.Spec.NonMasqueradeCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing NonMasqueradeCIDR %q: %v", cluster.Spec.NonMasqueradeCIDR, err)
|
||||||
|
}
|
||||||
|
nmOnes, nmBits := nonMasqueradeCIDR.Mask.Size()
|
||||||
|
|
||||||
|
if cluster.Spec.KubeControllerManager == nil {
|
||||||
|
cluster.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.Spec.KubeControllerManager.ClusterCIDR == "" {
|
||||||
|
// Allocate as big a range as possible: the NonMasqueradeCIDR mask + 1, with a '1' in the extra bit
|
||||||
|
ip := nonMasqueradeCIDR.IP.Mask(nonMasqueradeCIDR.Mask)
|
||||||
|
|
||||||
|
ip4 := ip.To4()
|
||||||
|
if ip4 != nil {
|
||||||
|
n := binary.BigEndian.Uint32(ip4)
|
||||||
|
n += uint32(1 << uint(nmBits-nmOnes-1))
|
||||||
|
ip = make(net.IP, len(ip4))
|
||||||
|
binary.BigEndian.PutUint32(ip, n)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("IPV6 subnet computations not yet implements")
|
||||||
|
}
|
||||||
|
|
||||||
|
cidr := net.IPNet{IP: ip, Mask: net.CIDRMask(nmOnes+1, nmBits)}
|
||||||
|
cluster.Spec.KubeControllerManager.ClusterCIDR = cidr.String()
|
||||||
|
glog.V(2).Infof("Defaulted KubeControllerManager.ClusterCIDR to %v", cluster.Spec.KubeControllerManager.ClusterCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.Spec.ServiceClusterIPRange == "" {
|
||||||
|
// Allocate from the '0' subnet; but only carve off 1/4 of that (i.e. add 1 + 2 bits to the netmask)
|
||||||
|
cidr := net.IPNet{IP: nonMasqueradeCIDR.IP.Mask(nonMasqueradeCIDR.Mask), Mask: net.CIDRMask(nmOnes+3, nmBits)}
|
||||||
|
cluster.Spec.ServiceClusterIPRange = cidr.String()
|
||||||
|
glog.V(2).Infof("Defaulted ServiceClusterIPRange to %v", cluster.Spec.ServiceClusterIPRange)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -11,10 +11,14 @@ import (
|
||||||
"text/template"
|
"text/template"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type TemplateFunctions struct {
|
||||||
|
cluster *api.Cluster
|
||||||
|
}
|
||||||
|
|
||||||
func (tf *TemplateFunctions) WellKnownServiceIP(id int) (net.IP, error) {
|
func (tf *TemplateFunctions) WellKnownServiceIP(id int) (net.IP, error) {
|
||||||
_, cidr, err := net.ParseCIDR(tf.cluster.Spec.ServiceClusterIPRange)
|
_, cidr, err := net.ParseCIDR(tf.cluster.Spec.ServiceClusterIPRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing ServiceClusterIPRange: %v", err)
|
return nil, fmt.Errorf("error parsing ServiceClusterIPRange %q: %v", tf.cluster.Spec.ServiceClusterIPRange, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ip4 := cidr.IP.To4()
|
ip4 := cidr.IP.To4()
|
||||||
|
@ -43,10 +47,6 @@ func (tf *TemplateFunctions) WellKnownServiceIP(id int) (net.IP, error) {
|
||||||
return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", tf.cluster.Spec.ServiceClusterIPRange)
|
return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", tf.cluster.Spec.ServiceClusterIPRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TemplateFunctions struct {
|
|
||||||
cluster *api.Cluster
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
|
func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
|
||||||
dest["EtcdClusterMemberTags"] = tf.EtcdClusterMemberTags
|
dest["EtcdClusterMemberTags"] = tf.EtcdClusterMemberTags
|
||||||
dest["SharedVPC"] = tf.SharedVPC
|
dest["SharedVPC"] = tf.SharedVPC
|
||||||
|
|
|
@ -101,6 +101,10 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
|
||||||
providerGoogle["project"] = t.Project
|
providerGoogle["project"] = t.Project
|
||||||
providerGoogle["region"] = t.Region
|
providerGoogle["region"] = t.Region
|
||||||
providersByName["google"] = providerGoogle
|
providersByName["google"] = providerGoogle
|
||||||
|
} else if t.Cloud.ProviderID() == fi.CloudProviderAWS {
|
||||||
|
providerAWS := make(map[string]interface{})
|
||||||
|
providerAWS["region"] = t.Region
|
||||||
|
providersByName["aws"] = providerAWS
|
||||||
}
|
}
|
||||||
|
|
||||||
data := make(map[string]interface{})
|
data := make(map[string]interface{})
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error
|
||||||
b := &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
|
|
||||||
if len(t.changes) != 0 {
|
if len(t.changes) != 0 {
|
||||||
fmt.Fprintf(b, "Created resources:\n")
|
fmt.Fprintf(b, "Will create resources:\n")
|
||||||
for _, r := range t.changes {
|
for _, r := range t.changes {
|
||||||
if !r.aIsNil {
|
if !r.aIsNil {
|
||||||
continue
|
continue
|
||||||
|
@ -71,7 +71,7 @@ func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error
|
||||||
fmt.Fprintf(b, " %T\t%s\n", r.changes, IdForTask(taskMap, r.e))
|
fmt.Fprintf(b, " %T\t%s\n", r.changes, IdForTask(taskMap, r.e))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(b, "Changed resources:\n")
|
fmt.Fprintf(b, "Will modify resources:\n")
|
||||||
// We can't use our reflection helpers here - we want corresponding values from a,e,c
|
// We can't use our reflection helpers here - we want corresponding values from a,e,c
|
||||||
for _, r := range t.changes {
|
for _, r := range t.changes {
|
||||||
if r.aIsNil {
|
if r.aIsNil {
|
||||||
|
|
|
@ -31,7 +31,8 @@ type VFSStateStore struct {
|
||||||
|
|
||||||
var _ StateStore = &VFSStateStore{}
|
var _ StateStore = &VFSStateStore{}
|
||||||
|
|
||||||
func NewVFSStateStore(location vfs.Path, dryrun bool) (*VFSStateStore, error) {
|
func NewVFSStateStore(base vfs.Path, clusterName string, dryrun bool) (*VFSStateStore, error) {
|
||||||
|
location := base.Join(clusterName)
|
||||||
s := &VFSStateStore{
|
s := &VFSStateStore{
|
||||||
location: location,
|
location: location,
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,9 +157,9 @@ func (c *DeleteCluster) DeleteResources(resources map[string]*ResourceTracker) e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("Dependencies")
|
glog.V(2).Infof("Dependencies")
|
||||||
for k, v := range depMap {
|
for k, v := range depMap {
|
||||||
glog.Infof("\t%s\t%v", k, v)
|
glog.V(2).Infof("\t%s\t%v", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
iterationsWithNoProgress := 0
|
iterationsWithNoProgress := 0
|
||||||
|
|
|
@ -160,7 +160,7 @@ func (x *ExportCluster) ReverseAWS() error {
|
||||||
// return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
// return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
||||||
//}
|
//}
|
||||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||||
cluster.Spec.DNSDomain = conf.Settings["DNS_DOMAIN"]
|
cluster.Spec.ClusterDNSDomain = conf.Settings["DNS_DOMAIN"]
|
||||||
//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
||||||
//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
||||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||||
|
|
Loading…
Reference in New Issue