mirror of https://github.com/kubernetes/kops.git
Merge pull request #78 from justinsb/upup_multimodel
upup: split model into two parts
This commit is contained in:
commit
40d85dbd98
|
|
@ -65,7 +65,7 @@ You must pass --yes to actually delete resources (without the `#` comment!)
|
|||
|
||||
* See changes that would be applied: `${GOPATH}/bin/cloudup --dryrun`
|
||||
|
||||
Build a terrform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform > tf/k8s.tf.json`
|
||||
* Build a terraform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform` The terraform model will be built in `state/terraform`
|
||||
|
||||
* Specify the k8s build to run: `-kubernetes-version=1.2.2`
|
||||
|
||||
|
|
@ -80,3 +80,36 @@ Each file in the tree describes a Task.
|
|||
|
||||
On the nodeup side, Tasks can manage files, systemd services, packages etc.
|
||||
On the cloudup side, Tasks manage cloud resources: instances, networks, disks etc.
|
||||
|
||||
## Workaround for terraform bug
|
||||
|
||||
Terraform currently has a bug where it can't create AWS tags containing a dot. Until this is fixed,
|
||||
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
|
||||
the volumes, and it isn't the worst idea to build these separately anyway.
|
||||
|
||||
We divide the 'cloudup' model into two parts:
|
||||
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
||||
* models/cloudup which is the main cloudup model for configuration everything else
|
||||
|
||||
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
|
||||
|
||||
```
|
||||
export MYZONE=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zone=us-east-1c -name=${MYZONE} -kubernetes-version=1.2.2 --model=models/proto
|
||||
```
|
||||
|
||||
And then you can use terraform to do the full installation:
|
||||
|
||||
```
|
||||
export MYZONE=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zone=us-east-1c -name=${MYZONE} -kubernetes-version=1.2.2 --model=models/cloudup --target=terraform
|
||||
```
|
||||
|
||||
Then, to apply using terraform:
|
||||
|
||||
```
|
||||
cd state/terraform
|
||||
|
||||
terraform plan
|
||||
terraform apply
|
||||
```
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ func main() {
|
|||
flag.StringVar(&target, "target", target, "Target - direct, terraform")
|
||||
configFile := ""
|
||||
flag.StringVar(&configFile, "conf", configFile, "Configuration file to load")
|
||||
modelDir := "models/cloudup"
|
||||
flag.StringVar(&modelDir, "model", modelDir, "Source directory to use as model")
|
||||
modelDirs := "models/proto,models/cloudup"
|
||||
flag.StringVar(&modelDirs, "model", modelDirs, "Source directory to use as model (separate multiple models with commas)")
|
||||
stateLocation := "./state"
|
||||
flag.StringVar(&stateLocation, "state", stateLocation, "Location to use to store configuration state")
|
||||
nodeModelDir := "models/nodeup"
|
||||
|
|
@ -73,7 +73,7 @@ func main() {
|
|||
|
||||
cmd := &CreateClusterCmd{
|
||||
Config: config,
|
||||
ModelDir: modelDir,
|
||||
ModelDirs: strings.Split(modelDirs, ","),
|
||||
StateStore: stateStore,
|
||||
Target: target,
|
||||
NodeModelDir: nodeModelDir,
|
||||
|
|
@ -102,8 +102,8 @@ func main() {
|
|||
type CreateClusterCmd struct {
|
||||
// Config is the cluster configuration
|
||||
Config *cloudup.CloudConfig
|
||||
// ModelDir is the directory in which the cloudup model is found
|
||||
ModelDir string
|
||||
// ModelDir is a list of directories in which the cloudup model are found
|
||||
ModelDirs []string
|
||||
// StateStore is a StateStore in which we store state (such as the PKI tree)
|
||||
StateStore fi.StateStore
|
||||
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
|
||||
|
|
@ -403,7 +403,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
|
||||
}
|
||||
|
||||
taskMap, err := l.Build(c.ModelDir)
|
||||
taskMap, err := l.Build(c.ModelDirs)
|
||||
if err != nil {
|
||||
glog.Exitf("error building: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,14 @@ type RootCmd struct {
|
|||
cobraCommand *cobra.Command
|
||||
}
|
||||
|
||||
var rootCommand RootCmd
|
||||
var rootCommand = RootCmd{
|
||||
cobraCommand: &cobra.Command{
|
||||
Use: "upup",
|
||||
Short: "upup manages kubernetes clusters",
|
||||
Long: `upup manages kubernetes clusters.
|
||||
It allows you to create, destroy, upgrade and maintain them.`,
|
||||
},
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCommand.cobraCommand.Execute(); err != nil {
|
||||
|
|
@ -31,14 +38,7 @@ func Execute() {
|
|||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "upup",
|
||||
Short: "upup manages kubernetes clusters",
|
||||
Long: `upup manages kubernetes clusters.
|
||||
It allows you to create, destroy, upgrade and maintain them.`,
|
||||
}
|
||||
|
||||
rootCommand.cobraCommand = cmd
|
||||
cmd := rootCommand.cobraCommand
|
||||
|
||||
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.upup.yaml)")
|
||||
cmd.PersistentFlags().StringVarP(&rootCommand.stateLocation, "state", "", "", "Location of state storage")
|
||||
|
|
|
|||
|
|
@ -41,17 +41,3 @@ securityGroupRule/all-master-to-master:
|
|||
securityGroupRule/all-master-to-node:
|
||||
securityGroup: securityGroup/kubernetes.node.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
|
||||
{{ range $zone := .MasterZones }}
|
||||
|
||||
# EBS volume
|
||||
ebsVolume/kubernetes.master.{{$zone}}.{{ $.ClusterName }}:
|
||||
availabilityZone: {{ $zone }}
|
||||
sizeGB: {{ or $.MasterVolumeSize 20 }}
|
||||
volumeType: {{ or $.MasterVolumeType "gp2" }}
|
||||
tags:
|
||||
k8s.io/role/master: "1"
|
||||
k8s.io/etcd/main: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
k8s.io/etcd/events: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
{{ range $zone := .MasterZones }}
|
||||
|
||||
# EBS volume per zone, for etcd
|
||||
ebsVolume/kubernetes.master.{{$zone}}.{{ $.ClusterName }}:
|
||||
availabilityZone: {{ $zone }}
|
||||
sizeGB: {{ or $.MasterVolumeSize 20 }}
|
||||
volumeType: {{ or $.MasterVolumeType "gp2" }}
|
||||
tags:
|
||||
k8s.io/role/master: "1"
|
||||
k8s.io/etcd/main: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
k8s.io/etcd/events: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
|
||||
{{ end }}
|
||||
|
|
@ -141,7 +141,7 @@ func ignoreHandler(i *loader.TreeWalkItem) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
|
||||
func (l *Loader) Build(modelDirs []string) (map[string]fi.Task, error) {
|
||||
// First pass: load options
|
||||
tw := &loader.TreeWalker{
|
||||
DefaultHandler: ignoreHandler,
|
||||
|
|
@ -153,11 +153,14 @@ func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
|
|||
},
|
||||
Tags: l.Tags,
|
||||
}
|
||||
err := tw.Walk(baseDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for _, modelDir := range modelDirs {
|
||||
err := tw.Walk(modelDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
l.config, err = l.OptionsLoader.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -176,9 +179,11 @@ func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
|
|||
Tags: l.Tags,
|
||||
}
|
||||
|
||||
err = tw.Walk(baseDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for _, modelDir := range modelDirs {
|
||||
err = tw.Walk(modelDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = l.processDeferrals()
|
||||
|
|
|
|||
Loading…
Reference in New Issue