upup: Support for shared VPCs

A lot of work that had to happen here:

* Better reuse of config
* Ability to mark VPC & InternetGateway as shared
* Find models relative to the executable, to run from a dir-per-cluster

Fixes #95
This commit is contained in:
Justin Santa Barbara 2016-06-13 11:19:15 -04:00
parent 0d617c7ffb
commit 0559ec1210
24 changed files with 582 additions and 351 deletions

View File

@ -40,7 +40,7 @@ you should use Go 1.6 or later)
* Execute:
```
export MYZONE=<kubernetes.myzone.com>
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zones=us-east-1c -name=${MYZONE}
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE}
```
If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack!
@ -72,17 +72,17 @@ You must pass --yes to actually delete resources (without the `#` comment!)
* Build a terraform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform` The terraform model will be built in `state/terraform`
* Specify the k8s build to run: `-kubernetes-version=1.2.2`
* Specify the k8s build to run: `--kubernetes-version=1.2.2`
* Try HA mode: `-zones=us-east-1b,us-east-1c,us-east-1d`
* Try HA mode: `--zones=us-east-1b,us-east-1c,us-east-1d`
* Specify the number of nodes: `-node-count=4`
* Specify the number of nodes: `--node-count=4`
* Specify the node size: `-node-size=m4.large`
* Specify the node size: `--node-size=m4.large`
* Specify the master size: `-master-size=m4.large`
* Specify the master size: `--master-size=m4.large`
* Override the default DNS zone: `-dns-zone=<my.hosted.zone>`
* Override the default DNS zone: `--dns-zone=<my.hosted.zone>`
# How it works
@ -108,14 +108,14 @@ So you don't use terraform for the 'proto' phase (you can't anyway, because of t
```
export MYZONE=<kubernetes.myzone.com>
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zones=us-east-1c -name=${MYZONE} --model=models/proto
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --model=proto
```
And then you can use terraform to do the full installation:
```
export MYZONE=<kubernetes.myzone.com>
${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=aws -zones=us-east-1c -name=${MYZONE} --model=models/cloudup --target=terraform
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --model=cloudup --target=terraform
```
Then, to apply using terraform:

View File

@ -1,9 +1,10 @@
package main
import (
"flag"
goflag "flag"
"fmt"
"github.com/golang/glog"
"github.com/spf13/pflag"
"io/ioutil"
"k8s.io/kube-deploy/upup/pkg/fi"
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
@ -17,111 +18,167 @@ import (
"k8s.io/kube-deploy/upup/pkg/fi/utils"
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
"os"
"os/exec"
"path"
"strings"
)
func main() {
dryrun := false
flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done")
target := "direct"
flag.StringVar(&target, "target", target, "Target - direct, terraform")
configFile := ""
flag.StringVar(&configFile, "conf", configFile, "Configuration file to load")
modelDirs := "models/proto,models/cloudup"
flag.StringVar(&modelDirs, "model", modelDirs, "Source directory to use as model (separate multiple models with commas)")
stateLocation := "./state"
flag.StringVar(&stateLocation, "state", stateLocation, "Location to use to store configuration state")
nodeModelDir := "models/nodeup"
flag.StringVar(&nodeModelDir, "nodemodel", nodeModelDir, "Source directory to use as model for node configuration")
// TODO: Replace all these with a direct binding to the CloudConfig
// (we have plenty of reflection helpers if one isn't already available!)
config := &cloudup.CloudConfig{}
flag.StringVar(&config.CloudProvider, "cloud", config.CloudProvider, "Cloud provider to use - gce, aws")
zones := ""
flag.StringVar(&zones, "zones", "", "Zones in which to run nodes")
masterZones := ""
flag.StringVar(&zones, "master-zones", masterZones, "Zones in which to run masters (must be an odd number)")
flag.StringVar(&config.Project, "project", config.Project, "Project to use (must be set on GCE)")
flag.StringVar(&config.ClusterName, "name", config.ClusterName, "Name for cluster")
flag.StringVar(&config.KubernetesVersion, "kubernetes-version", config.KubernetesVersion, "Version of kubernetes to run (defaults to latest)")
//flag.StringVar(&config.Region, "region", config.Region, "Cloud region to target")
sshPublicKey := path.Join(os.Getenv("HOME"), ".ssh", "id_rsa.pub")
flag.StringVar(&sshPublicKey, "ssh-public-key", sshPublicKey, "SSH public key to use")
nodeSize := ""
flag.StringVar(&nodeSize, "node-size", nodeSize, "Set instance size for nodes")
masterSize := ""
flag.StringVar(&masterSize, "master-size", masterSize, "Set instance size for masters")
nodeCount := 0
flag.IntVar(&nodeCount, "node-count", nodeCount, "Set the number of nodes")
dnsZone := ""
flag.StringVar(&dnsZone, "dns-zone", dnsZone, "DNS hosted zone to use (defaults to last two components of cluster name)")
flag.Parse()
config.NodeZones = parseZoneList(zones)
if masterZones == "" {
config.MasterZones = config.NodeZones
} else {
config.MasterZones = parseZoneList(masterZones)
executableLocation, err := exec.LookPath(os.Args[0])
if err != nil {
glog.Fatalf("Cannot determine location of cloudup tool: %q. Please report this problem!", os.Args[0])
}
if nodeSize != "" {
config.NodeMachineType = nodeSize
}
if nodeCount != 0 {
config.NodeCount = nodeCount
modelsBaseDirDefault := path.Join(path.Dir(executableLocation), "models")
dryrun := pflag.Bool("dryrun", false, "Don't create cloud resources; just show what would be done")
target := pflag.String("target", "direct", "Target - direct, terraform")
//configFile := pflag.String("conf", "", "Configuration file to load")
modelsBaseDir := pflag.String("modelstore", modelsBaseDirDefault, "Source directory where models are stored")
models := pflag.String("model", "proto,cloudup", "Models to apply (separate multiple models with commas)")
nodeModel := pflag.String("nodemodel", "nodeup", "Model to use for node configuration")
stateLocation := pflag.String("state", "./state", "Location to use to store configuration state")
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
zones := pflag.String("zones", "", "Zones in which to run nodes")
masterZones := pflag.String("master-zones", "", "Zones in which to run masters (must be an odd number)")
project := pflag.String("project", "", "Project to use (must be set on GCE)")
clusterName := pflag.String("name", "", "Name for cluster")
kubernetesVersion := pflag.String("kubernetes-version", "", "Version of kubernetes to run (defaults to latest)")
sshPublicKey := pflag.String("ssh-public-key", "~/.ssh/id_rsa.pub", "SSH public key to use")
nodeSize := pflag.String("node-size", "", "Set instance size for nodes")
masterSize := pflag.String("master-size", "", "Set instance size for masters")
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
dnsZone := pflag.String("dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
goflag.CommandLine.Parse([]string{})
isDryrun := false
if *dryrun {
isDryrun = true
*target = "dryrun"
}
if masterSize != "" {
config.MasterMachineType = masterSize
}
if dnsZone != "" {
config.DNSZone = dnsZone
}
if dryrun {
target = "dryrun"
}
statePath := vfs.NewFSPath(stateLocation)
statePath := vfs.NewFSPath(*stateLocation)
workDir := stateLocation
stateStore, err := fi.NewVFSStateStore(statePath)
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
if err != nil {
glog.Errorf("error building state store: %v", err)
os.Exit(1)
}
cmd := &CreateClusterCmd{
Config: config,
ModelDirs: strings.Split(modelDirs, ","),
StateStore: stateStore,
Target: target,
NodeModelDir: nodeModelDir,
SSHPublicKey: sshPublicKey,
WorkDir: workDir,
// TODO: Replace all these with a direct binding to the CloudConfig
// (we have plenty of reflection helpers if one isn't already available!)
config := &cloudup.CloudConfig{}
err = stateStore.ReadConfig(config)
if err != nil {
glog.Errorf("error loading configuration: %v", err)
os.Exit(1)
}
if configFile != "" {
//confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
err := cmd.LoadConfig(configFile)
if err != nil {
glog.Errorf("error loading config: %v", err)
os.Exit(1)
if *zones != "" {
existingZones := make(map[string]*cloudup.ZoneConfig)
for _, zone := range config.NodeZones {
existingZones[zone.Name] = zone
}
for _, zone := range parseZoneList(*zones) {
if existingZones[zone] == nil {
config.NodeZones = append(config.NodeZones, &cloudup.ZoneConfig{
Name: zone,
})
}
}
}
if *masterZones == "" {
if len(config.MasterZones) == 0 {
for _, nodeZone := range config.NodeZones {
config.MasterZones = append(config.MasterZones, nodeZone.Name)
}
}
} else {
config.MasterZones = parseZoneList(*masterZones)
}
if *nodeSize != "" {
config.NodeMachineType = *nodeSize
}
if *nodeCount != 0 {
config.NodeCount = *nodeCount
}
if *masterSize != "" {
config.MasterMachineType = *masterSize
}
if *dnsZone != "" {
config.DNSZone = *dnsZone
}
if *cloudProvider != "" {
config.CloudProvider = *cloudProvider
}
if *project != "" {
config.Project = *project
}
if *clusterName != "" {
config.ClusterName = *clusterName
}
if *kubernetesVersion != "" {
config.KubernetesVersion = *kubernetesVersion
}
err = config.PerformAssignments()
if err != nil {
glog.Errorf("error populating configuration: %v", err)
os.Exit(1)
}
err = stateStore.WriteConfig(config)
if err != nil {
glog.Errorf("error writing updated configuration: %v", err)
os.Exit(1)
}
if *sshPublicKey != "" {
*sshPublicKey = utils.ExpandPath(*sshPublicKey)
}
cmd := &CreateClusterCmd{
Config: config,
ModelStore: *modelsBaseDir,
Models: strings.Split(*models, ","),
StateStore: stateStore,
Target: *target,
NodeModel: *nodeModel,
SSHPublicKey: *sshPublicKey,
WorkDir: *workDir,
}
//if *configFile != "" {
// //confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
// err := cmd.LoadConfig(configFile)
// if err != nil {
// glog.Errorf("error loading config: %v", err)
// os.Exit(1)
// }
//}
err = cmd.Run()
if err != nil {
glog.Errorf("error running command: %v", err)
@ -147,14 +204,16 @@ func parseZoneList(s string) []string {
type CreateClusterCmd struct {
// Config is the cluster configuration
Config *cloudup.CloudConfig
// ModelDir is a list of directories in which the cloudup model are found
ModelDirs []string
// ModelStore is the location where models are found
ModelStore string
// Models is a list of cloudup models to apply
Models []string
// StateStore is a StateStore in which we store state (such as the PKI tree)
StateStore fi.StateStore
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
Target string
// The directory in which the node model is found
NodeModelDir string
// The node model to use
NodeModel string
// The SSH public key (file) to use
SSHPublicKey string
// WorkDir is a local directory in which we place output, can cache files etc
@ -181,8 +240,12 @@ func (c *CreateClusterCmd) Run() error {
// We (currently) have to use protokube with ASGs
useProtokube := useMasterASG
if c.Config.NodeUp == nil {
c.Config.NodeUp = &cloudup.NodeUpConfig{}
}
if c.Config.ClusterName == "" {
return fmt.Errorf("-name is required (e.g. mycluster.myzone.com)")
return fmt.Errorf("--name is required (e.g. mycluster.myzone.com)")
}
if c.Config.MasterPublicName == "" {
@ -217,10 +280,10 @@ func (c *CreateClusterCmd) Run() error {
{
nodeZones := make(map[string]bool)
for _, z := range c.Config.NodeZones {
if nodeZones[z] {
if nodeZones[z.Name] {
return fmt.Errorf("NodeZones contained a duplicate value: %v", z)
}
nodeZones[z] = true
nodeZones[z.Name] = true
}
}
@ -234,7 +297,7 @@ func (c *CreateClusterCmd) Run() error {
}
if c.Config.CloudProvider == "" {
return fmt.Errorf("-cloud is required (e.g. aws, gce)")
return fmt.Errorf("--cloud is required (e.g. aws, gce)")
}
tags := make(map[string]struct{})
@ -249,7 +312,7 @@ func (c *CreateClusterCmd) Run() error {
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
b, err := utils.ReadLocation(stableURL)
if err != nil {
return fmt.Errorf("-kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
return fmt.Errorf("--kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
}
latestVersion := strings.TrimSpace(string(b))
glog.Infof("Using kubernetes latest stable version: %s", latestVersion)
@ -292,7 +355,6 @@ func (c *CreateClusterCmd) Run() error {
var cloud fi.Cloud
var project string
var region string
checkExisting := true
@ -345,14 +407,19 @@ func (c *CreateClusterCmd) Run() error {
// For now a zone to be specified...
// This will be replace with a region when we go full HA
zone := c.Config.NodeZones[0]
if zone == "" {
if zone.Name == "" {
return fmt.Errorf("Must specify a zone (use -zone)")
}
tokens := strings.Split(zone, "-")
tokens := strings.Split(zone.Name, "-")
if len(tokens) <= 2 {
return fmt.Errorf("Invalid Zone: %v", zone)
return fmt.Errorf("Invalid Zone: %v", zone.Name)
}
region = tokens[0] + "-" + tokens[1]
region := tokens[0] + "-" + tokens[1]
if c.Config.Region != "" && region != c.Config.Region {
return fmt.Errorf("zone %q is not in region %q", zone, c.Config.Region)
}
c.Config.Region = region
project = c.Config.Project
if project == "" {
@ -386,16 +453,15 @@ func (c *CreateClusterCmd) Run() error {
"iamRolePolicy": &awstasks.IAMRolePolicy{},
// VPC / Networking
"dhcpOptions": &awstasks.DHCPOptions{},
"internetGateway": &awstasks.InternetGateway{},
"internetGatewayAttachment": &awstasks.InternetGatewayAttachment{},
"route": &awstasks.Route{},
"routeTable": &awstasks.RouteTable{},
"routeTableAssociation": &awstasks.RouteTableAssociation{},
"securityGroup": &awstasks.SecurityGroup{},
"securityGroupRule": &awstasks.SecurityGroupRule{},
"subnet": &awstasks.Subnet{},
"vpc": &awstasks.VPC{},
"dhcpOptions": &awstasks.DHCPOptions{},
"internetGateway": &awstasks.InternetGateway{},
"route": &awstasks.Route{},
"routeTable": &awstasks.RouteTable{},
"routeTableAssociation": &awstasks.RouteTableAssociation{},
"securityGroup": &awstasks.SecurityGroup{},
"securityGroupRule": &awstasks.SecurityGroupRule{},
"subnet": &awstasks.Subnet{},
"vpc": &awstasks.VPC{},
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
// ELB
@ -422,18 +488,18 @@ func (c *CreateClusterCmd) Run() error {
nodeZones := make(map[string]bool)
for _, zone := range c.Config.NodeZones {
if len(zone) <= 2 {
return fmt.Errorf("Invalid AWS zone: %q", zone)
if len(zone.Name) <= 2 {
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
}
nodeZones[zone] = true
nodeZones[zone.Name] = true
region = zone[:len(zone)-1]
if c.Config.Region != "" && c.Config.Region != region {
zoneRegion := zone.Name[:len(zone.Name)-1]
if c.Config.Region != "" && zoneRegion != c.Config.Region {
return fmt.Errorf("Clusters cannot span multiple regions")
}
c.Config.Region = region
c.Config.Region = zoneRegion
}
for _, zone := range c.Config.MasterZones {
@ -443,7 +509,7 @@ func (c *CreateClusterCmd) Run() error {
}
}
err := awsup.ValidateRegion(region)
err := awsup.ValidateRegion(c.Config.Region)
if err != nil {
return err
}
@ -454,12 +520,16 @@ func (c *CreateClusterCmd) Run() error {
cloudTags := map[string]string{"KubernetesCluster": c.Config.ClusterName}
awsCloud, err := awsup.NewAWSCloud(region, cloudTags)
awsCloud, err := awsup.NewAWSCloud(c.Config.Region, cloudTags)
if err != nil {
return err
}
err = awsCloud.ValidateZones(c.Config.NodeZones)
var nodeZoneNames []string
for _, z := range c.Config.NodeZones {
nodeZoneNames = append(nodeZoneNames, z.Name)
}
err = awsCloud.ValidateZones(nodeZoneNames)
if err != nil {
return err
}
@ -474,7 +544,8 @@ func (c *CreateClusterCmd) Run() error {
l.Tags = tags
l.WorkDir = c.WorkDir
l.NodeModelDir = c.NodeModelDir
l.ModelStore = c.ModelStore
l.NodeModel = c.NodeModel
l.OptionsLoader = loader.NewOptionsLoader(c.Config)
l.TemplateFunctions["HasTag"] = func(tag string) bool {
@ -509,7 +580,7 @@ func (c *CreateClusterCmd) Run() error {
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
}
taskMap, err := l.Build(c.ModelDirs)
taskMap, err := l.Build(c.ModelStore, c.Models)
if err != nil {
glog.Exitf("error building: %v", err)
}
@ -530,7 +601,7 @@ func (c *CreateClusterCmd) Run() error {
case "terraform":
checkExisting = false
outDir := path.Join(c.WorkDir, "terraform")
target = terraform.NewTerraformTarget(cloud, region, project, outDir)
target = terraform.NewTerraformTarget(cloud, c.Config.Region, project, outDir)
case "dryrun":
target = fi.NewDryRunTarget(os.Stdout)

View File

@ -3,18 +3,18 @@ package main
import (
"fmt"
"encoding/json"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/kube-deploy/upup/pkg/fi/utils"
"k8s.io/kube-deploy/upup/pkg/kutil"
"strings"
"github.com/golang/glog"
"encoding/json"
)
// AddonsCmd represents the addons command
type AddonsCmd struct {
//ClusterName string
cobraCommand *cobra.Command
}
@ -37,13 +37,13 @@ func init() {
}
type kubectlConfig struct {
Kind string `json:"kind`
ApiVersion string `json:"apiVersion`
Kind string `json:"kind`
ApiVersion string `json:"apiVersion`
Clusters []*kubectlClusterWithName `json:"clusters`
}
type kubectlClusterWithName struct {
Name string `json:"name`
Name string `json:"name`
Cluster kubectlCluster `json:"cluster`
}
type kubectlCluster struct {
@ -90,5 +90,11 @@ func (c *AddonsCmd) buildClusterAddons() (*kutil.ClusterAddons, error) {
APIEndpoint: server,
}
privateKeyFile := utils.ExpandPath("~/.ssh/id_rsa")
err = kutil.AddSSHIdentity(&k.SSHConfig, privateKeyFile)
if err != nil {
return nil, fmt.Errorf("error adding SSH private key %q: %v", err)
}
return k, nil
}
}

View File

@ -5,7 +5,6 @@ import (
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/kube-deploy/upup/pkg/kutil"
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
)
@ -39,12 +38,6 @@ func (c *AddonsCreateCmd) Run(args []string) error {
return err
}
privateKeyFile := expandPath("~/.ssh/id_rsa")
err = kutil.AddSSHIdentity(&k.SSHConfig, privateKeyFile)
if err != nil {
return fmt.Errorf("error adding SSH private key %q: %v", err)
}
addonFiles := make(map[string][]vfs.Path)
for _, path := range args {

View File

@ -12,7 +12,7 @@ import (
)
type AddonsGetCmd struct {
ClusterName string
ClusterName string
cobraCommand *cobra.Command
}
@ -45,12 +45,6 @@ func (c *AddonsGetCmd) Run() error {
return err
}
privateKeyFile := expandPath("~/.ssh/id_rsa")
err = kutil.AddSSHIdentity(&k.SSHConfig, privateKeyFile)
if err != nil {
return fmt.Errorf("error adding SSH private key %q: %v", err)
}
addons, err := k.ListAddons()
if err != nil {
return err

View File

@ -75,7 +75,8 @@ func (c *RootCmd) StateStore() (fi.StateStore, error) {
statePath := vfs.NewFSPath(c.stateLocation)
stateStore, err := fi.NewVFSStateStore(statePath)
isDryrun := false
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
if err != nil {
return nil, fmt.Errorf("error building state store: %v", err)
}

View File

@ -1,14 +0,0 @@
package main
import (
"strings"
"os"
)
// expandPath replaces common path aliases: ~ -> $HOME
func expandPath(p string) (string) {
if strings.HasPrefix(p, "~/") {
p = os.Getenv("HOME") + p[1:]
}
return p
}

View File

@ -0,0 +1,71 @@
## Running in a shared VPC
CloudUp is actually driven by a configuration file, stored in your state directory (`./state/config`) by default.
To build a cluster in an existing VPC, you'll need to configure the config file with the extra information
(the CLI flags just act as shortcuts to configuring the config file manually, editing the config file is "expert mode").
When launching into a shared VPC, the VPC & the Internet Gateway will be reused, but we create a new subnet per zone,
and a new route table.
Use cloudup in `--dryrun` mode to create a base configuration file:
```
cloudup --cloud=aws --zones=us-east-1b --name=<mycluster.mydomain.com> --node-size=t2.medium --master-size=t2.medium --node-count=2 --dryrun
```
Now edit your `./state/config' file. It will probably look like this:
```
CloudProvider: aws
ClusterName: <mycluster.mydomain.com>
MasterMachineType: t2.medium
MasterZones:
- us-east-1b
NetworkCIDR: 172.22.0.0/16
NodeCount: 2
NodeMachineType: t2.medium
NodeZones:
- cidr: 172.22.0.0/19
name: us-east-1b
```
You need to specify your VPC id, which is called NetworkID. You likely also need to update NetworkCIDR to match whatever value your existing VPC is using,
and you likely need to set the CIDR on each of the NodeZones, because subnets in a VPC cannot overlap. For example:
```
CloudProvider: aws
ClusterName: cluster2.awsdata.com
MasterMachineType: t2.medium
MasterZones:
- us-east-1b
NetworkID: vpc-10f95a77
NetworkCIDR: 172.22.0.0/16
NodeCount: 2
NodeMachineType: t2.medium
NodeZones:
- cidr: 172.22.224.0/19
name: us-east-1b
```
You can then run cloudup in dryrun mode (you don't need any arguments, because they're all in the config file):
```
cloudup --dryrun
```
You should see that your VPC changes from `Shared <nil> -> true`, and you should review them to make sure
that the changes are OK - the Kubernetes settings might not be ones you want on a shared VPC (in which case,
open an issue!)
Once you're happy, you can create the cluster using:
```
cloudup
```
Finally, if your shared VPC has a KubernetesCluster tag (because it was created with cloudup), you should
probably remove that tag to indicate to indicate that the resources are not owned by that cluster, and so
deleting the cluster won't try to delete the VPC. (Deleting the VPC won't succeed anyway, because it's in use,
but it's better to avoid the later confusion!)

View File

@ -1,8 +1,12 @@
vpc/kubernetes.{{ .ClusterName }}:
id: {{ .NetworkID }}
shared: {{ .SharedVPC }}
cidr: {{ .NetworkCIDR }}
enableDnsSupport: true
enableDnsHostnames: true
{{ if not .SharedVPC }}
# TODO: would be good to create these as shared, to verify them
dhcpOptions/kubernetes.{{ .ClusterName }}:
domainNameServers: AmazonProvidedDNS
{{ if eq .Region "us-east-1" }}
@ -14,32 +18,30 @@ dhcpOptions/kubernetes.{{ .ClusterName }}:
vpcDHDCPOptionsAssociation/kubernetes.{{ .ClusterName }}:
vpc: vpc/kubernetes.{{ .ClusterName }}
dhcpOptions: dhcpOptions/kubernetes.{{ .ClusterName }}
{{ end }}
internetGateway/kubernetes.{{ .ClusterName }}: {}
internetGatewayAttachment/kubernetes.{{ .ClusterName }}:
internetGateway: internetGateway/kubernetes.{{ .ClusterName }}
internetGateway/kubernetes.{{ .ClusterName }}:
shared: {{ .SharedVPC }}
vpc: vpc/kubernetes.{{ .ClusterName }}
routeTable/kubernetes.{{ .ClusterName }}:
vpc: vpc/kubernetes.{{ .ClusterName }}
{{ range $zone := .NodeZones }}
subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}:
vpc: vpc/kubernetes.{{ $.ClusterName }}
availabilityZone: {{ $zone}}
cidr: {{ $.SubnetCIDR $zone }} #172.20.0.0/24
routeTableAssociation/kubernetes.{{ $zone }}.{{ $.ClusterName }}:
routeTable: routeTable/kubernetes.{{ $.ClusterName }}
subnet: subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}
{{ end }}
route/0.0.0.0/0:
routeTable: routeTable/kubernetes.{{ .ClusterName }}
cidr: 0.0.0.0/0
internetGateway: internetGateway/kubernetes.{{ .ClusterName }}
vpc: vpc/kubernetes.{{ .ClusterName }}
{{ range $zone := .NodeZones }}
subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}:
vpc: vpc/kubernetes.{{ $.ClusterName }}
availabilityZone: {{ $zone.Name }}
cidr: {{ $zone.CIDR }}
routeTableAssociation/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}:
routeTable: routeTable/kubernetes.{{ $.ClusterName }}
subnet: subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}
{{ end }}

View File

@ -59,7 +59,7 @@ autoscalingGroup/kubernetes.nodes.{{ .ClusterName }}:
maxSize: {{ .NodeCount }}
subnets:
{{ range $zone := .NodeZones }}
- subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}
- subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}
{{ end }}
tags:
k8s.io/role: node

View File

@ -2,8 +2,6 @@ InstancePrefix: kubernetes
AllocateNodeCIDRs: true
Multizone: true
NetworkCIDR: 172.20.0.0/16
ServiceClusterIPRange: 10.0.0.0/16
ClusterIPRange: 10.244.0.0/16
MasterIPRange: 10.246.0.0/24

View File

@ -149,6 +149,11 @@ func (k *PrivateKey) MarshalJSON() ([]byte, error) {
var _ io.WriterTo = &PrivateKey{}
func (k *PrivateKey) WriteTo(w io.Writer) (int64, error) {
if k.Key == nil {
// For the dry-run case
return 0, nil
}
var data bytes.Buffer
var err error
@ -247,6 +252,11 @@ func SignNewCertificate(privateKey *PrivateKey, template *x509.Certificate, sign
var _ io.WriterTo = &Certificate{}
func (c *Certificate) WriteTo(w io.Writer) (int64, error) {
// For the dry-run case
if c.Certificate == nil {
return 0, nil
}
var b bytes.Buffer
err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: c.Certificate.Raw})
if err != nil {

View File

@ -12,8 +12,10 @@ import (
//go:generate fitask -type=InternetGateway
type InternetGateway struct {
Name *string
ID *string
Name *string
ID *string
VPC *VPC
Shared *bool
}
var _ fi.CompareWithID = &InternetGateway{}
@ -26,10 +28,20 @@ func (e *InternetGateway) Find(c *fi.Context) (*InternetGateway, error) {
cloud := c.Cloud.(*awsup.AWSCloud)
request := &ec2.DescribeInternetGatewaysInput{}
if e.ID != nil {
request.InternetGatewayIds = []*string{e.ID}
shared := fi.BoolValue(e.Shared)
if shared {
if fi.StringValue(e.VPC.ID) == "" {
return nil, fmt.Errorf("VPC ID is required when InternetGateway is shared")
}
request.Filters = []*ec2.Filter{awsup.NewEC2Filter("attachment.vpc-id", *e.VPC.ID)}
} else {
request.Filters = cloud.BuildFilters(e.Name)
if e.ID != nil {
request.InternetGatewayIds = []*string{e.ID}
} else {
request.Filters = cloud.BuildFilters(e.Name)
}
}
response, err := cloud.EC2.DescribeInternetGateways(request)
@ -51,6 +63,10 @@ func (e *InternetGateway) Find(c *fi.Context) (*InternetGateway, error) {
glog.V(2).Infof("found matching InternetGateway %q", *actual.ID)
for _, attachment := range igw.Attachments {
actual.VPC = &VPC{ID: attachment.VpcId}
}
e.ID = actual.ID
return actual, nil
@ -62,11 +78,26 @@ func (e *InternetGateway) Run(c *fi.Context) error {
func (s *InternetGateway) CheckChanges(a, e, changes *InternetGateway) error {
if a != nil {
// TODO: I think we can change it; we just detach & attach
if changes.VPC != nil {
return fi.CannotChangeField("VPC")
}
}
return nil
}
func (_ *InternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *InternetGateway) error {
shared := fi.BoolValue(e.Shared)
if shared {
// Verify the InternetGateway was found and matches our required settings
if a == nil {
return fmt.Errorf("InternetGateway with id %q not found", fi.StringValue(e.ID))
}
return nil
}
if a == nil {
glog.V(2).Infof("Creating InternetGateway")
@ -80,14 +111,55 @@ func (_ *InternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Intern
e.ID = response.InternetGateway.InternetGatewayId
}
if a == nil || (changes != nil && changes.VPC != nil) {
glog.V(2).Infof("Creating InternetGatewayAttachment")
attachRequest := &ec2.AttachInternetGatewayInput{
VpcId: e.VPC.ID,
InternetGatewayId: e.ID,
}
_, err := t.Cloud.EC2.AttachInternetGateway(attachRequest)
if err != nil {
return fmt.Errorf("error attaching InternetGateway to VPC: %v", err)
}
}
return t.AddAWSTags(*e.ID, t.Cloud.BuildTags(e.Name))
}
type terraformInternetGateway struct {
VPCID *terraform.Literal `json:"vpc_id"`
Tags map[string]string `json:"tags,omitempty"`
}
func (_ *InternetGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *InternetGateway) error {
// Rendered in the InternetGatewayAttachment instead
return nil
shared := fi.BoolValue(e.Shared)
if shared {
// Not terraform owned / managed
return nil
}
cloud := t.Cloud.(*awsup.AWSCloud)
tf := &terraformInternetGateway{
VPCID: e.VPC.TerraformLink(),
Tags: cloud.BuildTags(e.Name),
}
return t.RenderResource("aws_internet_gateway", *e.Name, tf)
}
func (e *InternetGateway) TerraformLink() *terraform.Literal {
shared := fi.BoolValue(e.Shared)
if shared {
if e.ID == nil {
glog.Fatalf("ID must be set, if InternetGateway is shared: %s", e)
}
glog.V(4).Infof("reusing existing InternetGateway with id %q", *e.ID)
return terraform.LiteralFromStringValue(*e.ID)
}
return terraform.LiteralProperty("aws_internet_gateway", *e.Name, "id")
}

View File

@ -1,116 +0,0 @@
package awstasks
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/kube-deploy/upup/pkg/fi"
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform"
)
type InternetGatewayAttachment struct {
VPC *VPC
InternetGateway *InternetGateway
}
func (e *InternetGatewayAttachment) String() string {
return fi.TaskAsString(e)
}
func (e *InternetGatewayAttachment) Find(c *fi.Context) (*InternetGatewayAttachment, error) {
if e.InternetGateway == nil {
return nil, fi.RequiredField("InternetGateway")
}
if e.VPC == nil {
return nil, fi.RequiredField("VPC")
}
if e.VPC.ID == nil {
return nil, nil
}
if e.InternetGateway.ID == nil {
return nil, nil
}
cloud := c.Cloud.(*awsup.AWSCloud)
request := &ec2.DescribeInternetGatewaysInput{
InternetGatewayIds: []*string{e.InternetGateway.ID},
}
response, err := cloud.EC2.DescribeInternetGateways(request)
if err != nil {
return nil, fmt.Errorf("error listing InternetGateways: %v", err)
}
if response == nil || len(response.InternetGateways) == 0 {
return nil, nil
}
if len(response.InternetGateways) != 1 {
return nil, fmt.Errorf("found multiple InternetGateways matching ID")
}
igw := response.InternetGateways[0]
for _, attachment := range igw.Attachments {
if aws.StringValue(attachment.VpcId) == *e.VPC.ID {
actual := &InternetGatewayAttachment{
VPC: &VPC{ID: e.VPC.ID},
InternetGateway: &InternetGateway{ID: e.InternetGateway.ID},
}
glog.V(2).Infof("found matching InternetGateway")
return actual, nil
}
}
return nil, nil
}
func (e *InternetGatewayAttachment) Run(c *fi.Context) error {
return fi.DefaultDeltaRunMethod(e, c)
}
func (s *InternetGatewayAttachment) CheckChanges(a, e, changes *InternetGatewayAttachment) error {
if a != nil {
// TODO: I think we can change it; we just detach & attach
if changes.VPC != nil {
return fi.CannotChangeField("VPC")
}
}
return nil
}
func (_ *InternetGatewayAttachment) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *InternetGatewayAttachment) error {
if a == nil {
glog.V(2).Infof("Creating InternetGatewayAttachment")
attachRequest := &ec2.AttachInternetGatewayInput{
VpcId: e.VPC.ID,
InternetGatewayId: e.InternetGateway.ID,
}
_, err := t.Cloud.EC2.AttachInternetGateway(attachRequest)
if err != nil {
return fmt.Errorf("error attaching InternetGatewayAttachment: %v", err)
}
}
return nil // No tags
}
type terraformInternetGateway struct {
VPCID *terraform.Literal `json:"vpc_id"`
Tags map[string]string `json:"tags,omitempty"`
}
func (_ *InternetGatewayAttachment) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *InternetGatewayAttachment) error {
cloud := t.Cloud.(*awsup.AWSCloud)
tf := &terraformInternetGateway{
VPCID: e.VPC.TerraformLink(),
Tags: cloud.BuildTags(e.InternetGateway.Name),
}
return t.RenderResource("aws_internet_gateway", *e.InternetGateway.Name, tf)
}

View File

@ -18,6 +18,9 @@ type VPC struct {
CIDR *string
EnableDNSHostnames *bool
EnableDNSSupport *bool
// Shared is set if this is a shared VPC
Shared *bool
}
var _ fi.CompareWithID = &VPC{}
@ -31,7 +34,7 @@ func (e *VPC) Find(c *fi.Context) (*VPC, error) {
request := &ec2.DescribeVpcsInput{}
if e.ID != nil {
if fi.StringValue(e.ID) != "" {
request.VpcIds = []*string{e.ID}
} else {
request.Filters = cloud.BuildFilters(e.Name)
@ -46,7 +49,7 @@ func (e *VPC) Find(c *fi.Context) (*VPC, error) {
}
if len(response.Vpcs) != 1 {
glog.Fatalf("found multiple VPCs matching tags")
return nil, fmt.Errorf("found multiple VPCs matching tags")
}
vpc := response.Vpcs[0]
actual := &VPC{
@ -100,6 +103,24 @@ func (e *VPC) Run(c *fi.Context) error {
}
func (_ *VPC) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *VPC) error {
shared := fi.BoolValue(e.Shared)
if shared {
// Verify the VPC was found and matches our required settings
if a == nil {
return fmt.Errorf("VPC with id %q not found", fi.StringValue(e.ID))
}
if changes != nil && changes.EnableDNSSupport != nil {
return fmt.Errorf("VPC with id %q was set to be shared, but did not have EnableDNSSupport=true", fi.StringValue(e.ID))
}
if changes != nil && changes.EnableDNSHostnames != nil {
return fmt.Errorf("VPC with id %q was set to be shared, but did not have EnableDNSHostnames=true", fi.StringValue(e.ID))
}
return nil
}
if a == nil {
glog.V(2).Infof("Creating VPC with CIDR: %q", *e.CIDR)
@ -152,6 +173,12 @@ type terraformVPC struct {
func (_ *VPC) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *VPC) error {
cloud := t.Cloud.(*awsup.AWSCloud)
shared := fi.BoolValue(e.Shared)
if shared {
// Not terraform owned / managed
return nil
}
tf := &terraformVPC{
CIDR: e.CIDR,
Tags: cloud.BuildTags(e.Name),
@ -163,5 +190,15 @@ func (_ *VPC) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *VPC)
}
func (e *VPC) TerraformLink() *terraform.Literal {
shared := fi.BoolValue(e.Shared)
if shared {
if e.ID == nil {
glog.Fatalf("ID must be set, if VPC is shared: %s", e)
}
glog.V(4).Infof("reusing existing VPC with id %q", *e.ID)
return terraform.LiteralFromStringValue(*e.ID)
}
return terraform.LiteralProperty("aws_vpc", *e.Name, "id")
}

View File

@ -19,10 +19,10 @@ type CloudConfig struct {
NodeInit string `json:",omitempty"`
// Configuration of zones we are targeting
MasterZones []string `json:",omitempty"`
NodeZones []string `json:",omitempty"`
Region string `json:",omitempty"`
Project string `json:",omitempty"`
MasterZones []string `json:",omitempty"`
NodeZones []*ZoneConfig `json:",omitempty"`
Region string `json:",omitempty"`
Project string `json:",omitempty"`
// The internal and external names for the master nodes
MasterPublicName string `json:",omitempty"`
@ -31,6 +31,7 @@ type CloudConfig struct {
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s overlay
NetworkCIDR string `json:",omitempty"`
NetworkID string `json:",omitempty"`
// The DNS zone we should use when configuring DNS
DNSZone string `json:",omitempty"`
@ -145,7 +146,12 @@ type CloudConfig struct {
NodeUpTags []string `json:",omitempty"`
NodeUp NodeUpConfig
NodeUp *NodeUpConfig `json:",omitempty"`
}
type ZoneConfig struct {
Name string `json:"name"`
CIDR string `json:"cidr,omitempty"`
}
type NodeUpConfig struct {
@ -185,23 +191,52 @@ func (c *CloudConfig) WellKnownServiceIP(id int) (net.IP, error) {
return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", c.ServiceClusterIPRange)
}
func (c *CloudConfig) SubnetCIDR(zone string) (string, error) {
func (c *CloudConfig) PerformAssignments() error {
if c.NetworkCIDR == "" {
// TODO: Choose non-overlapping networking CIDRs for VPCs?
c.NetworkCIDR = "172.20.0.0/16"
}
for _, zone := range c.NodeZones {
err := zone.performAssignments(c)
if err != nil {
return err
}
}
return nil
}
func (z *ZoneConfig) performAssignments(c *CloudConfig) error {
if z.CIDR == "" {
cidr, err := z.assignCIDR(c)
if err != nil {
return err
}
glog.Infof("Assigned CIDR %s to zone %s", cidr, z.Name)
z.CIDR = cidr
}
return nil
}
func (z *ZoneConfig) assignCIDR(c *CloudConfig) (string, error) {
// TODO: We probably could query for the existing subnets & allocate appropriately
// for now we'll require users to set CIDRs themselves
index := -1
for i, z := range c.NodeZones {
if z == zone {
if z.Name == z.Name {
index = i
break
}
}
if index == -1 {
return "", fmt.Errorf("zone not configured: %q", zone)
return "", fmt.Errorf("zone not configured: %q", z.Name)
}
_, cidr, err := net.ParseCIDR(c.NetworkCIDR)
if err != nil {
return "", fmt.Errorf("Invalid NetworkCIDR: %q", c.NetworkCIDR)
}
networkLength, _ := cidr.Mask.Size()
// We assume a maximum of 8 subnets per network
@ -215,9 +250,14 @@ func (c *CloudConfig) SubnetCIDR(zone string) (string, error) {
subnetIP := make(net.IP, len(ip4))
binary.BigEndian.PutUint32(subnetIP, n)
subnetCIDR := subnetIP.String() + "/" + strconv.Itoa(networkLength)
glog.V(2).Infof("Computed CIDR for subnet in zone %q as %q", zone, subnetCIDR)
glog.V(2).Infof("Computed CIDR for subnet in zone %q as %q", z.Name, subnetCIDR)
return subnetCIDR, nil
}
return "", fmt.Errorf("Unexpected IP address type for NetworkCIDR: %s", c.NetworkCIDR)
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (c *CloudConfig) SharedVPC() bool {
return c.NetworkID != ""
}

View File

@ -27,7 +27,9 @@ type Loader struct {
WorkDir string
OptionsLoader *loader.OptionsLoader
NodeModelDir string
ModelStore string
NodeModel string
Tags map[string]struct{}
TemplateFunctions template.FuncMap
@ -141,7 +143,7 @@ func ignoreHandler(i *loader.TreeWalkItem) error {
return nil
}
func (l *Loader) Build(modelDirs []string) (map[string]fi.Task, error) {
func (l *Loader) Build(modelStore string, models []string) (map[string]fi.Task, error) {
// First pass: load options
tw := &loader.TreeWalker{
DefaultHandler: ignoreHandler,
@ -153,7 +155,8 @@ func (l *Loader) Build(modelDirs []string) (map[string]fi.Task, error) {
},
Tags: l.Tags,
}
for _, modelDir := range modelDirs {
for _, model := range models {
modelDir := path.Join(modelStore, model)
err := tw.Walk(modelDir)
if err != nil {
return nil, err
@ -179,7 +182,8 @@ func (l *Loader) Build(modelDirs []string) (map[string]fi.Task, error) {
Tags: l.Tags,
}
for _, modelDir := range modelDirs {
for _, model := range models {
modelDir := path.Join(modelStore, model)
err = tw.Walk(modelDir)
if err != nil {
return nil, err
@ -434,7 +438,7 @@ func (l *Loader) buildNodeConfig(target string, configResourceName string, args
cmd := &nodeup.NodeUpCommand{
Config: config,
ConfigLocation: "",
ModelDir: l.NodeModelDir,
ModelDir: path.Join(l.ModelStore, l.NodeModel),
Target: target,
AssetDir: assetDir,
}

View File

@ -2,12 +2,18 @@ package fi
import (
"fmt"
"k8s.io/kube-deploy/upup/pkg/fi/utils"
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
"os"
"strings"
)
type StateStore interface {
CA() CAStore
Secrets() SecretStore
ReadConfig(config interface{}) error
WriteConfig(config interface{}) error
}
type VFSStateStore struct {
@ -18,12 +24,12 @@ type VFSStateStore struct {
var _ StateStore = &VFSStateStore{}
func NewVFSStateStore(location vfs.Path) (*VFSStateStore, error) {
func NewVFSStateStore(location vfs.Path, dryrun bool) (*VFSStateStore, error) {
s := &VFSStateStore{
location: location,
}
var err error
s.ca, err = NewVFSCAStore(location.Join("pki"))
s.ca, err = NewVFSCAStore(location.Join("pki"), dryrun)
if err != nil {
return nil, fmt.Errorf("error building CA store: %v", err)
}
@ -42,3 +48,42 @@ func (s *VFSStateStore) CA() CAStore {
func (s *VFSStateStore) Secrets() SecretStore {
return s.secrets
}
func (s *VFSStateStore) ReadConfig(config interface{}) error {
configPath := s.location.Join("config")
data, err := configPath.ReadFile()
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("error reading configuration file %s: %v", configPath, err)
}
// Yaml can't parse empty strings
configString := string(data)
configString = strings.TrimSpace(configString)
if configString != "" {
err = utils.YamlUnmarshal([]byte(configString), config)
if err != nil {
return fmt.Errorf("error parsing configuration: %v", err)
}
}
return nil
}
func (s *VFSStateStore) WriteConfig(config interface{}) error {
configPath := s.location.Join("config")
data, err := utils.YamlMarshal(config)
if err != nil {
return fmt.Errorf("error marshalling configuration: %v", err)
}
err = configPath.WriteFile(data)
if err != nil {
return fmt.Errorf("error writing configuration file %s: %v", configPath, err)
}
return nil
}

View File

@ -2,6 +2,7 @@ package utils
import (
"bytes"
"os"
"strings"
)
@ -17,3 +18,11 @@ func SanitizeString(s string) string {
}
return string(out.Bytes())
}
// ExpandPath replaces common path aliases: ~ -> $HOME
func ExpandPath(p string) string {
if strings.HasPrefix(p, "~/") {
p = os.Getenv("HOME") + p[1:]
}
return p
}

View File

@ -1,16 +1,16 @@
package vfs
import (
"golang.org/x/crypto/ssh"
"path"
"github.com/pkg/sftp"
"github.com/golang/glog"
"fmt"
"os"
"bytes"
"sync"
"math/rand"
"fmt"
"github.com/golang/glog"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"io"
"math/rand"
"os"
"path"
"sync"
)
type SSHPath struct {
@ -26,12 +26,12 @@ func NewSSHPath(client *ssh.Client, server string, path string, sudo bool) *SSHP
return &SSHPath{
client: client,
server: server,
path: path,
sudo: sudo,
path: path,
sudo: sudo,
}
}
func (p*SSHPath) newClient() (*sftp.Client, error) {
func (p *SSHPath) newClient() (*sftp.Client, error) {
if !p.sudo {
sftpClient, err := sftp.NewClient(p.client)
if err != nil {
@ -223,7 +223,6 @@ func (p *SSHPath) Base() string {
return path.Base(p.path)
}
//
//// scpMkdir executes a mkdir against the SSH target, using SCP
//func (s *SSHPath) scpMkdir(dest string, mode os.FileMode) error {

View File

@ -14,6 +14,7 @@ import (
)
type VFSCAStore struct {
dryrun bool
basedir vfs.Path
caCertificate *Certificate
caPrivateKey *PrivateKey
@ -21,8 +22,9 @@ type VFSCAStore struct {
var _ CAStore = &VFSCAStore{}
func NewVFSCAStore(basedir vfs.Path) (CAStore, error) {
func NewVFSCAStore(basedir vfs.Path, dryrun bool) (CAStore, error) {
c := &VFSCAStore{
dryrun: dryrun,
basedir: basedir,
}
//err := os.MkdirAll(path.Join(basedir, "private"), 0700)
@ -134,6 +136,10 @@ func (c *VFSCAStore) loadCertificate(p vfs.Path) (*Certificate, error) {
func (c *VFSCAStore) Cert(id string) (*Certificate, error) {
cert, err := c.FindCert(id)
if err == nil && cert == nil {
if c.dryrun {
glog.Warningf("using empty certificate, because --dryrun specified")
return &Certificate{}, err
}
return nil, fmt.Errorf("cannot find cert %q", id)
}
return cert, err
@ -232,6 +238,10 @@ func (c *VFSCAStore) FindPrivateKey(id string) (*PrivateKey, error) {
func (c *VFSCAStore) PrivateKey(id string) (*PrivateKey, error) {
key, err := c.FindPrivateKey(id)
if err == nil && key == nil {
if c.dryrun {
glog.Warningf("using empty certificate, because --dryrun specified")
return &PrivateKey{}, err
}
return nil, fmt.Errorf("cannot find SSL key %q", id)
}
return key, err

View File

@ -17,7 +17,7 @@ type ClusterAddon struct {
Path string
}
func (c*ClusterAddons) AddonsPath() (vfs.Path, error) {
func (c *ClusterAddons) AddonsPath() (vfs.Path, error) {
// TODO: Close NodeSSH
// TODO: What if endpoint is a load balancer? Query cloud and try to find actual hosts?
@ -66,7 +66,7 @@ func (c *ClusterAddons) ListAddons() (map[string]*ClusterAddon, error) {
return addons, nil
}
func (c *ClusterAddons) CreateAddon(key string, files []vfs.Path) (error) {
func (c *ClusterAddons) CreateAddon(key string, files []vfs.Path) error {
addonsPath, err := c.AddonsPath()
if err != nil {
return err

View File

@ -1,11 +1,11 @@
package kutil
import (
"fmt"
"github.com/golang/glog"
"os"
"os/exec"
"github.com/golang/glog"
"strings"
"fmt"
)
type Kubectl struct {

View File

@ -1,19 +1,19 @@
package kutil
import (
"golang.org/x/crypto/ssh"
"fmt"
"golang.org/x/crypto/ssh"
"io/ioutil"
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
)
type NodeSSH struct {
Hostname string
Hostname string
SSHConfig ssh.ClientConfig
sshClient *ssh.Client
}
func (m*NodeSSH) Root() (*vfs.SSHPath, error) {
func (m *NodeSSH) Root() (*vfs.SSHPath, error) {
client, err := m.GetSSHClient()
if err != nil {
return nil, err
@ -100,4 +100,3 @@ func parsePrivateKeyFile(p string) (ssh.AuthMethod, error) {
}
return ssh.PublicKeys(key), nil
}