Merge pull request #78 from justinsb/upup_multimodel

upup: split model into two parts
This commit is contained in:
Justin Santa Barbara 2016-06-09 23:16:05 -04:00 committed by GitHub
commit 40d85dbd98
6 changed files with 74 additions and 37 deletions

View File

@ -65,7 +65,7 @@ You must pass --yes to actually delete resources (without the `#` comment!)
* See changes that would be applied: `${GOPATH}/bin/cloudup --dryrun` * See changes that would be applied: `${GOPATH}/bin/cloudup --dryrun`
Build a terrform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform > tf/k8s.tf.json` * Build a terraform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform` The terraform model will be built in `state/terraform`
* Specify the k8s build to run: `-kubernetes-version=1.2.2` * Specify the k8s build to run: `-kubernetes-version=1.2.2`
@ -80,3 +80,36 @@ Each file in the tree describes a Task.
On the nodeup side, Tasks can manage files, systemd services, packages etc. On the nodeup side, Tasks can manage files, systemd services, packages etc.
On the cloudup side, Tasks manage cloud resources: instances, networks, disks etc. On the cloudup side, Tasks manage cloud resources: instances, networks, disks etc.
## Workaround for terraform bug
Terraform currently has a bug where it can't create AWS tags containing a dot. Until this is fixed,
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
the volumes, and it isn't the worst idea to build these separately anyway.
We divide the 'cloudup' model into two parts:
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
* models/cloudup which is the main cloudup model for configuration everything else
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
```
export MYZONE=<kubernetes.myzone.com>
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zone=us-east-1c -name=${MYZONE} -kubernetes-version=1.2.2 --model=models/proto
```
And then you can use terraform to do the full installation:
```
export MYZONE=<kubernetes.myzone.com>
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zone=us-east-1c -name=${MYZONE} -kubernetes-version=1.2.2 --model=models/cloudup --target=terraform
```
Then, to apply using terraform:
```
cd state/terraform
terraform plan
terraform apply
```

View File

@ -28,8 +28,8 @@ func main() {
flag.StringVar(&target, "target", target, "Target - direct, terraform") flag.StringVar(&target, "target", target, "Target - direct, terraform")
configFile := "" configFile := ""
flag.StringVar(&configFile, "conf", configFile, "Configuration file to load") flag.StringVar(&configFile, "conf", configFile, "Configuration file to load")
modelDir := "models/cloudup" modelDirs := "models/proto,models/cloudup"
flag.StringVar(&modelDir, "model", modelDir, "Source directory to use as model") flag.StringVar(&modelDirs, "model", modelDirs, "Source directory to use as model (separate multiple models with commas)")
stateLocation := "./state" stateLocation := "./state"
flag.StringVar(&stateLocation, "state", stateLocation, "Location to use to store configuration state") flag.StringVar(&stateLocation, "state", stateLocation, "Location to use to store configuration state")
nodeModelDir := "models/nodeup" nodeModelDir := "models/nodeup"
@ -73,7 +73,7 @@ func main() {
cmd := &CreateClusterCmd{ cmd := &CreateClusterCmd{
Config: config, Config: config,
ModelDir: modelDir, ModelDirs: strings.Split(modelDirs, ","),
StateStore: stateStore, StateStore: stateStore,
Target: target, Target: target,
NodeModelDir: nodeModelDir, NodeModelDir: nodeModelDir,
@ -102,8 +102,8 @@ func main() {
type CreateClusterCmd struct { type CreateClusterCmd struct {
// Config is the cluster configuration // Config is the cluster configuration
Config *cloudup.CloudConfig Config *cloudup.CloudConfig
// ModelDir is the directory in which the cloudup model is found // ModelDir is a list of directories in which the cloudup model are found
ModelDir string ModelDirs []string
// StateStore is a StateStore in which we store state (such as the PKI tree) // StateStore is a StateStore in which we store state (such as the PKI tree)
StateStore fi.StateStore StateStore fi.StateStore
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform // Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
@ -403,7 +403,7 @@ func (c *CreateClusterCmd) Run() error {
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized)) l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
} }
taskMap, err := l.Build(c.ModelDir) taskMap, err := l.Build(c.ModelDirs)
if err != nil { if err != nil {
glog.Exitf("error building: %v", err) glog.Exitf("error building: %v", err)
} }

View File

@ -19,7 +19,14 @@ type RootCmd struct {
cobraCommand *cobra.Command cobraCommand *cobra.Command
} }
var rootCommand RootCmd var rootCommand = RootCmd{
cobraCommand: &cobra.Command{
Use: "upup",
Short: "upup manages kubernetes clusters",
Long: `upup manages kubernetes clusters.
It allows you to create, destroy, upgrade and maintain them.`,
},
}
func Execute() { func Execute() {
if err := rootCommand.cobraCommand.Execute(); err != nil { if err := rootCommand.cobraCommand.Execute(); err != nil {
@ -31,14 +38,7 @@ func Execute() {
func init() { func init() {
cobra.OnInitialize(initConfig) cobra.OnInitialize(initConfig)
cmd := &cobra.Command{ cmd := rootCommand.cobraCommand
Use: "upup",
Short: "upup manages kubernetes clusters",
Long: `upup manages kubernetes clusters.
It allows you to create, destroy, upgrade and maintain them.`,
}
rootCommand.cobraCommand = cmd
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.upup.yaml)") cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.upup.yaml)")
cmd.PersistentFlags().StringVarP(&rootCommand.stateLocation, "state", "", "", "Location of state storage") cmd.PersistentFlags().StringVarP(&rootCommand.stateLocation, "state", "", "", "Location of state storage")

View File

@ -41,17 +41,3 @@ securityGroupRule/all-master-to-master:
securityGroupRule/all-master-to-node: securityGroupRule/all-master-to-node:
securityGroup: securityGroup/kubernetes.node.{{ .ClusterName }} securityGroup: securityGroup/kubernetes.node.{{ .ClusterName }}
sourceGroup: securityGroup/kubernetes.master.{{ .ClusterName }} sourceGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
{{ range $zone := .MasterZones }}
# EBS volume
ebsVolume/kubernetes.master.{{$zone}}.{{ $.ClusterName }}:
availabilityZone: {{ $zone }}
sizeGB: {{ or $.MasterVolumeSize 20 }}
volumeType: {{ or $.MasterVolumeType "gp2" }}
tags:
k8s.io/role/master: "1"
k8s.io/etcd/main: "{{ $zone }}/{{ join $.MasterZones "," }}"
k8s.io/etcd/events: "{{ $zone }}/{{ join $.MasterZones "," }}"
{{ end }}

View File

@ -0,0 +1,13 @@
{{ range $zone := .MasterZones }}
# EBS volume per zone, for etcd
ebsVolume/kubernetes.master.{{$zone}}.{{ $.ClusterName }}:
availabilityZone: {{ $zone }}
sizeGB: {{ or $.MasterVolumeSize 20 }}
volumeType: {{ or $.MasterVolumeType "gp2" }}
tags:
k8s.io/role/master: "1"
k8s.io/etcd/main: "{{ $zone }}/{{ join $.MasterZones "," }}"
k8s.io/etcd/events: "{{ $zone }}/{{ join $.MasterZones "," }}"
{{ end }}

View File

@ -141,7 +141,7 @@ func ignoreHandler(i *loader.TreeWalkItem) error {
return nil return nil
} }
func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) { func (l *Loader) Build(modelDirs []string) (map[string]fi.Task, error) {
// First pass: load options // First pass: load options
tw := &loader.TreeWalker{ tw := &loader.TreeWalker{
DefaultHandler: ignoreHandler, DefaultHandler: ignoreHandler,
@ -153,11 +153,14 @@ func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
}, },
Tags: l.Tags, Tags: l.Tags,
} }
err := tw.Walk(baseDir) for _, modelDir := range modelDirs {
err := tw.Walk(modelDir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
var err error
l.config, err = l.OptionsLoader.Build() l.config, err = l.OptionsLoader.Build()
if err != nil { if err != nil {
return nil, err return nil, err
@ -176,10 +179,12 @@ func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
Tags: l.Tags, Tags: l.Tags,
} }
err = tw.Walk(baseDir) for _, modelDir := range modelDirs {
err = tw.Walk(modelDir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
err = l.processDeferrals() err = l.processDeferrals()
if err != nil { if err != nil {