mirror of https://github.com/kubernetes/kops.git
Rationalize properties to the minimal set
This commit is contained in:
parent
2700987a0f
commit
ee325435e6
|
@ -42,16 +42,14 @@ nodeup-tar: gocode
|
|||
upload: nodeup-tar upup-tar
|
||||
rm -rf .build/s3
|
||||
mkdir -p .build/s3/nodeup
|
||||
cp .build/nodeup.tar.gz .build/s3/nodeup/
|
||||
cp .build/nodeup.tar.gz.sha1 .build/s3/nodeup/
|
||||
cp .build/nodeup.tar.gz .build/s3/nodeup/nodeup-1.3.tar.gz
|
||||
cp .build/nodeup.tar.gz.sha1 .build/s3/nodeup/nodeup-1.3.tar.gz.sha1
|
||||
mkdir -p .build/s3/upup
|
||||
cp .build/upup.tar.gz .build/s3/upup/
|
||||
cp .build/upup.tar.gz.sha1 .build/s3/upup/
|
||||
cp .build/upup.tar.gz .build/s3/upup/upup-1.3.tar.gz
|
||||
cp .build/upup.tar.gz.sha1 .build/s3/upup/upup-1.3.tar.gz.sha1
|
||||
aws s3 sync --acl public-read .build/s3/ s3://kubeupv2/
|
||||
#aws s3api put-object-acl --bucket kubeupv2 --key nodeup/nodeup.tar.gz --acl public-read
|
||||
#aws s3api put-object-acl --bucket kubeupv2 --key nodeup/nodeup.tar.gz.sha1 --acl public-read
|
||||
|
||||
push: tar
|
||||
push: nodeup-tar
|
||||
scp .build/nodeup.tar.gz ${TARGET}:/tmp/
|
||||
ssh ${TARGET} sudo tar zxf /tmp/nodeup.tar.gz -C /var/cache/kubernetes-install
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ you should use Go 1.6 or later)
|
|||
* Execute:
|
||||
```
|
||||
export MYZONE=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE}
|
||||
${GOPATH}/bin/cloudup --v=0 --logtostderr --cloud=aws --zones=us-east-1c --name=${MYZONE} --state s3://<mybucket>/${MYZONE}
|
||||
```
|
||||
|
||||
If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack!
|
||||
|
@ -51,7 +51,7 @@ The upup tool is a CLI for doing administrative tasks. You can use it to genera
|
|||
|
||||
```
|
||||
export MYZONE=<kubernetes.myzone.com>
|
||||
${GOPATH}/bin/upup kubecfg generate --state=state --name=${MYZONE} --cloud=aws
|
||||
${GOPATH}/bin/upup kubecfg generate --state s3://<mybucket>/${MYZONE}
|
||||
```
|
||||
|
||||
## Delete the cluster
|
||||
|
|
|
@ -16,13 +16,13 @@ func buildDefaultCreateCluster() *CreateClusterCmd {
|
|||
|
||||
c := &CreateClusterCmd{}
|
||||
|
||||
c.Config = &cloudup.CloudConfig{}
|
||||
c.Config.ClusterName = "testcluster.mydomain.com"
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.ClusterConfig = &cloudup.CloudConfig{}
|
||||
c.ClusterConfig.ClusterName = "testcluster.mydomain.com"
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||
c.SSHPublicKey = "~/.ssh/id_rsa.pub"
|
||||
|
||||
c.Config.CloudProvider = "aws"
|
||||
c.ClusterConfig.CloudProvider = "aws"
|
||||
|
||||
dryrun := false
|
||||
c.StateStore, err = fi.NewVFSStateStore(vfs.NewFSPath("test-state"), dryrun)
|
||||
|
@ -46,66 +46,66 @@ func expectErrorFromRun(t *testing.T, c *CreateClusterCmd, message string) {
|
|||
|
||||
func TestCreateCluster_DuplicateZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1b"}
|
||||
c.Config.MasterZones = []string{"us-east-1a"}
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1b"}
|
||||
c.ClusterConfig.MasterZones = []string{"us-east-1a"}
|
||||
expectErrorFromRun(t, c, "NodeZones contained a duplicate value: us-east-1b")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoClusterName(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.ClusterName = ""
|
||||
c.ClusterConfig.ClusterName = ""
|
||||
expectErrorFromRun(t, c, "-name is required (e.g. mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoCloud(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.CloudProvider = ""
|
||||
c.ClusterConfig.CloudProvider = ""
|
||||
expectErrorFromRun(t, c, "-cloud is required (e.g. aws, gce)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_ExtraMasterZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1c"}
|
||||
c.Config.MasterZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1c"}
|
||||
c.ClusterConfig.MasterZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"}
|
||||
expectErrorFromRun(t, c, "All MasterZones must (currently) also be NodeZones")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoMasterZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.MasterZones = []string{}
|
||||
c.ClusterConfig.MasterZones = []string{}
|
||||
expectErrorFromRun(t, c, "must specify at least one MasterZone")
|
||||
}
|
||||
|
||||
func TestCreateCluster_NoNodeZones(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{}
|
||||
c.ClusterConfig.NodeZones = []string{}
|
||||
expectErrorFromRun(t, c, "must specify at least one NodeZone")
|
||||
}
|
||||
|
||||
func TestCreateCluster_RegionAsZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1"}
|
||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||
expectErrorFromRun(t, c, "Region is not a recognized EC2 region: \"us-east-\" (check you have specified valid zones?)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_BadZone(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1z"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1z"}
|
||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||
expectErrorFromRun(t, c, "Zone is not a recognized AZ: \"us-east-1z\" (check you have specified a valid zone?)")
|
||||
}
|
||||
|
||||
func TestCreateCluster_MixedRegion(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-west-1a", "us-west-2b", "us-west-2c"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.ClusterConfig.NodeZones = []string{"us-west-1a", "us-west-2b", "us-west-2c"}
|
||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||
expectErrorFromRun(t, c, "Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
func TestCreateCluster_EvenEtcdClusterSize(t *testing.T) {
|
||||
c := buildDefaultCreateCluster()
|
||||
c.Config.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}
|
||||
c.Config.MasterZones = c.Config.NodeZones
|
||||
c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}
|
||||
c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones
|
||||
expectErrorFromRun(t, c, "There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zone and -master-zone to declare node zones and master zones separately.")
|
||||
}
|
||||
|
|
|
@ -2,21 +2,13 @@ package main
|
|||
|
||||
import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"io/ioutil"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awstasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/gcetasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/fitasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
@ -41,7 +33,7 @@ func main() {
|
|||
|
||||
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
|
||||
|
||||
zones := pflag.String("zones", "", "Zones in which to run nodes")
|
||||
zones := pflag.String("zones", "", "Zones in which to run the cluster")
|
||||
masterZones := pflag.String("master-zones", "", "Zones in which to run masters (must be an odd number)")
|
||||
|
||||
project := pflag.String("project", "", "Project to use (must be set on GCE)")
|
||||
|
@ -56,6 +48,8 @@ func main() {
|
|||
|
||||
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
|
||||
|
||||
image := pflag.String("image", "", "Image to use")
|
||||
|
||||
dnsZone := pflag.String("dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
||||
outDir := pflag.String("out", "", "Path to write any local output")
|
||||
|
||||
|
@ -90,10 +84,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
// TODO: Replace all these with a direct binding to the CloudConfig
|
||||
// (we have plenty of reflection helpers if one isn't already available!)
|
||||
config := &cloudup.CloudConfig{}
|
||||
err = stateStore.ReadConfig(config)
|
||||
cluster, nodeSets, err := cloudup.ReadConfig(stateStore)
|
||||
if err != nil {
|
||||
glog.Errorf("error loading configuration: %v", err)
|
||||
os.Exit(1)
|
||||
|
@ -101,67 +92,135 @@ func main() {
|
|||
|
||||
if *zones != "" {
|
||||
existingZones := make(map[string]*cloudup.ZoneConfig)
|
||||
for _, zone := range config.NodeZones {
|
||||
for _, zone := range cluster.Zones {
|
||||
existingZones[zone.Name] = zone
|
||||
}
|
||||
|
||||
for _, zone := range parseZoneList(*zones) {
|
||||
if existingZones[zone] == nil {
|
||||
config.NodeZones = append(config.NodeZones, &cloudup.ZoneConfig{
|
||||
cluster.Zones = append(cluster.Zones, &cloudup.ZoneConfig{
|
||||
Name: zone,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
createMasterVolumes := false
|
||||
if *masterZones == "" {
|
||||
if len(config.MasterZones) == 0 {
|
||||
for _, nodeZone := range config.NodeZones {
|
||||
config.MasterZones = append(config.MasterZones, nodeZone.Name)
|
||||
if len(cluster.Masters) == 0 {
|
||||
// Default to putting into every zone
|
||||
// TODO: just the first 1 or 3 zones; or should we force users to declare?
|
||||
for _, zone := range cluster.Zones {
|
||||
m := &cloudup.MasterConfig{}
|
||||
m.Zone = zone.Name
|
||||
m.Name = zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
|
||||
cluster.Masters = append(cluster.Masters, m)
|
||||
}
|
||||
createMasterVolumes = true
|
||||
}
|
||||
} else {
|
||||
config.MasterZones = parseZoneList(*masterZones)
|
||||
if len(cluster.Masters) == 0 {
|
||||
for _, zone := range parseZoneList(*masterZones) {
|
||||
m := &cloudup.MasterConfig{}
|
||||
m.Zone = zone
|
||||
m.Name = zone
|
||||
cluster.Masters = append(cluster.Masters, m)
|
||||
}
|
||||
createMasterVolumes = true
|
||||
} else {
|
||||
// This is hard, because of the etcd cluster
|
||||
glog.Errorf("Cannot change master-zones from the CLI")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if createMasterVolumes {
|
||||
zones := sets.NewString()
|
||||
for _, m := range cluster.Masters {
|
||||
zones.Insert(m.Zone)
|
||||
}
|
||||
etcdZones := zones.List()
|
||||
if (len(etcdZones) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zone and --master-zone to declare node zones and master zones separately.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, zone := range etcdZones {
|
||||
vol := &cloudup.VolumeConfig{}
|
||||
vol.Name = "etcd." + zone
|
||||
vol.Zone = zone
|
||||
vol.Roles = make(map[string]string)
|
||||
vol.Roles["etcd/main"] = zone + "/" + strings.Join(etcdZones, ",")
|
||||
vol.Roles["etcd/events"] = zone + "/" + strings.Join(etcdZones, ",")
|
||||
cluster.MasterVolumes = append(cluster.MasterVolumes, vol)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodeSets) == 0 {
|
||||
nodeSets = append(nodeSets, &cloudup.NodeSetConfig{})
|
||||
}
|
||||
|
||||
if *nodeSize != "" {
|
||||
config.NodeMachineType = *nodeSize
|
||||
for _, ns := range nodeSets {
|
||||
ns.MachineType = *nodeSize
|
||||
}
|
||||
}
|
||||
|
||||
if *image != "" {
|
||||
for _, ns := range nodeSets {
|
||||
ns.Image = *image
|
||||
}
|
||||
for _, master := range cluster.Masters {
|
||||
master.Image = *image
|
||||
}
|
||||
}
|
||||
|
||||
if *nodeCount != 0 {
|
||||
config.NodeCount = *nodeCount
|
||||
for _, ns := range nodeSets {
|
||||
ns.MinSize = nodeCount
|
||||
ns.MaxSize = nodeCount
|
||||
}
|
||||
}
|
||||
|
||||
if *masterSize != "" {
|
||||
config.MasterMachineType = *masterSize
|
||||
for _, master := range cluster.Masters {
|
||||
master.MachineType = *masterSize
|
||||
}
|
||||
}
|
||||
|
||||
if *dnsZone != "" {
|
||||
config.DNSZone = *dnsZone
|
||||
cluster.DNSZone = *dnsZone
|
||||
}
|
||||
|
||||
if *cloudProvider != "" {
|
||||
config.CloudProvider = *cloudProvider
|
||||
cluster.CloudProvider = *cloudProvider
|
||||
}
|
||||
|
||||
if *project != "" {
|
||||
config.Project = *project
|
||||
cluster.Project = *project
|
||||
}
|
||||
|
||||
if *clusterName != "" {
|
||||
config.ClusterName = *clusterName
|
||||
cluster.ClusterName = *clusterName
|
||||
}
|
||||
|
||||
if *kubernetesVersion != "" {
|
||||
config.KubernetesVersion = *kubernetesVersion
|
||||
cluster.KubernetesVersion = *kubernetesVersion
|
||||
}
|
||||
|
||||
err = config.PerformAssignments()
|
||||
err = cluster.PerformAssignments()
|
||||
if err != nil {
|
||||
glog.Errorf("error populating configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = cloudup.PerformAssignmentsNodesets(nodeSets)
|
||||
if err != nil {
|
||||
glog.Errorf("error populating configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = stateStore.WriteConfig(config)
|
||||
err = cloudup.WriteConfig(stateStore, cluster, nodeSets)
|
||||
if err != nil {
|
||||
glog.Errorf("error writing updated configuration: %v", err)
|
||||
os.Exit(1)
|
||||
|
@ -171,15 +230,16 @@ func main() {
|
|||
*sshPublicKey = utils.ExpandPath(*sshPublicKey)
|
||||
}
|
||||
|
||||
cmd := &CreateClusterCmd{
|
||||
Config: config,
|
||||
ModelStore: *modelsBaseDir,
|
||||
Models: strings.Split(*models, ","),
|
||||
StateStore: stateStore,
|
||||
Target: *target,
|
||||
NodeModel: *nodeModel,
|
||||
SSHPublicKey: *sshPublicKey,
|
||||
OutDir: *outDir,
|
||||
cmd := &cloudup.CreateClusterCmd{
|
||||
ClusterConfig: cluster,
|
||||
NodeSets: nodeSets,
|
||||
ModelStore: *modelsBaseDir,
|
||||
Models: strings.Split(*models, ","),
|
||||
StateStore: stateStore,
|
||||
Target: *target,
|
||||
NodeModel: *nodeModel,
|
||||
SSHPublicKey: *sshPublicKey,
|
||||
OutDir: *outDir,
|
||||
}
|
||||
|
||||
//if *configFile != "" {
|
||||
|
@ -212,466 +272,3 @@ func parseZoneList(s string) []string {
|
|||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
type CreateClusterCmd struct {
|
||||
// Config is the cluster configuration
|
||||
Config *cloudup.CloudConfig
|
||||
// ModelStore is the location where models are found
|
||||
ModelStore string
|
||||
// Models is a list of cloudup models to apply
|
||||
Models []string
|
||||
// StateStore is a StateStore in which we store state (such as the PKI tree)
|
||||
StateStore fi.StateStore
|
||||
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
|
||||
Target string
|
||||
// The node model to use
|
||||
NodeModel string
|
||||
// The SSH public key (file) to use
|
||||
SSHPublicKey string
|
||||
// OutDir is a local directory in which we place output, can cache files etc
|
||||
OutDir string
|
||||
}
|
||||
|
||||
func (c *CreateClusterCmd) LoadConfig(configFile string) error {
|
||||
conf, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading configuration file %q: %v", configFile, err)
|
||||
}
|
||||
err = utils.YamlUnmarshal(conf, c.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing configuration file %q: %v", configFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CreateClusterCmd) Run() error {
|
||||
// TODO: Make these configurable?
|
||||
useMasterASG := true
|
||||
useMasterLB := false
|
||||
|
||||
// We (currently) have to use protokube with ASGs
|
||||
useProtokube := useMasterASG
|
||||
|
||||
if c.Config.NodeUp == nil {
|
||||
c.Config.NodeUp = &cloudup.NodeUpConfig{}
|
||||
}
|
||||
|
||||
if c.Config.ClusterName == "" {
|
||||
return fmt.Errorf("--name is required (e.g. mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
if c.Config.MasterPublicName == "" {
|
||||
c.Config.MasterPublicName = "api." + c.Config.ClusterName
|
||||
}
|
||||
if c.Config.DNSZone == "" {
|
||||
tokens := strings.Split(c.Config.MasterPublicName, ".")
|
||||
c.Config.DNSZone = strings.Join(tokens[len(tokens)-2:], ".")
|
||||
glog.Infof("Defaulting DNS zone to: %s", c.Config.DNSZone)
|
||||
}
|
||||
|
||||
if len(c.Config.NodeZones) == 0 {
|
||||
return fmt.Errorf("must specify at least one NodeZone")
|
||||
}
|
||||
|
||||
if len(c.Config.MasterZones) == 0 {
|
||||
return fmt.Errorf("must specify at least one MasterZone")
|
||||
}
|
||||
|
||||
// Check for master zone duplicates
|
||||
{
|
||||
masterZones := make(map[string]bool)
|
||||
for _, z := range c.Config.MasterZones {
|
||||
if masterZones[z] {
|
||||
return fmt.Errorf("MasterZones contained a duplicate value: %v", z)
|
||||
}
|
||||
masterZones[z] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check for node zone duplicates
|
||||
{
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, z := range c.Config.NodeZones {
|
||||
if nodeZones[z.Name] {
|
||||
return fmt.Errorf("NodeZones contained a duplicate value: %v", z)
|
||||
}
|
||||
nodeZones[z.Name] = true
|
||||
}
|
||||
}
|
||||
|
||||
if (len(c.Config.MasterZones) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
return fmt.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use -zone and -master-zone to declare node zones and master zones separately.")
|
||||
}
|
||||
|
||||
if c.StateStore == nil {
|
||||
return fmt.Errorf("StateStore is required")
|
||||
}
|
||||
|
||||
if c.Config.CloudProvider == "" {
|
||||
return fmt.Errorf("--cloud is required (e.g. aws, gce)")
|
||||
}
|
||||
|
||||
tags := make(map[string]struct{})
|
||||
|
||||
l := &cloudup.Loader{}
|
||||
l.Init()
|
||||
|
||||
keyStore := c.StateStore.CA()
|
||||
secretStore := c.StateStore.Secrets()
|
||||
|
||||
if vfs.IsClusterReadable(secretStore.VFSPath()) {
|
||||
vfsPath := secretStore.VFSPath()
|
||||
c.Config.SecretStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.Config.MasterPermissions == nil {
|
||||
c.Config.MasterPermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.Config.NodePermissions == nil {
|
||||
c.Config.NodePermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("secrets path is not cluster readable: %v", secretStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(keyStore.VFSPath()) {
|
||||
vfsPath := keyStore.VFSPath()
|
||||
c.Config.KeyStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.Config.MasterPermissions == nil {
|
||||
c.Config.MasterPermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.Config.NodePermissions == nil {
|
||||
c.Config.NodePermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("keyStore path is not cluster readable: %v", keyStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(c.StateStore.VFSPath()) {
|
||||
c.Config.ConfigStore = c.StateStore.VFSPath().Path()
|
||||
} else {
|
||||
// We do support this...
|
||||
}
|
||||
|
||||
if c.Config.KubernetesVersion == "" {
|
||||
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
|
||||
b, err := vfs.Context.ReadFile(stableURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("--kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
|
||||
}
|
||||
latestVersion := strings.TrimSpace(string(b))
|
||||
glog.Infof("Using kubernetes latest stable version: %s", latestVersion)
|
||||
|
||||
c.Config.KubernetesVersion = latestVersion
|
||||
//return fmt.Errorf("Must either specify a KubernetesVersion (-kubernetes-version) or provide an asset with the release bundle")
|
||||
}
|
||||
|
||||
// Normalize k8s version
|
||||
versionWithoutV := strings.TrimSpace(c.Config.KubernetesVersion)
|
||||
if strings.HasPrefix(versionWithoutV, "v") {
|
||||
versionWithoutV = versionWithoutV[1:]
|
||||
}
|
||||
if c.Config.KubernetesVersion != versionWithoutV {
|
||||
glog.Warningf("Normalizing kubernetes version: %q -> %q", c.Config.KubernetesVersion, versionWithoutV)
|
||||
c.Config.KubernetesVersion = versionWithoutV
|
||||
}
|
||||
|
||||
if len(c.Config.Assets) == 0 {
|
||||
//defaultReleaseAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/kubernetes-server-linux-amd64.tar.gz", c.Config.KubernetesVersion)
|
||||
//glog.Infof("Adding default kubernetes release asset: %s", defaultReleaseAsset)
|
||||
|
||||
defaultKubeletAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubelet", c.Config.KubernetesVersion)
|
||||
glog.Infof("Adding default kubelet release asset: %s", defaultKubeletAsset)
|
||||
|
||||
defaultKubectlAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubectl", c.Config.KubernetesVersion)
|
||||
glog.Infof("Adding default kubelet release asset: %s", defaultKubectlAsset)
|
||||
|
||||
// TODO: Verify assets exist, get the hash (that will check that KubernetesVersion is valid)
|
||||
|
||||
c.Config.Assets = append(c.Config.Assets, defaultKubeletAsset, defaultKubectlAsset)
|
||||
}
|
||||
|
||||
if c.Config.NodeUp.Location == "" {
|
||||
location := "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz"
|
||||
glog.Infof("Using default nodeup location: %q", location)
|
||||
c.Config.NodeUp.Location = location
|
||||
}
|
||||
|
||||
var cloud fi.Cloud
|
||||
|
||||
var project string
|
||||
|
||||
checkExisting := true
|
||||
|
||||
c.Config.NodeUpTags = append(c.Config.NodeUpTags, "_jessie", "_debian_family", "_systemd")
|
||||
|
||||
if useProtokube {
|
||||
tags["_protokube"] = struct{}{}
|
||||
c.Config.NodeUpTags = append(c.Config.NodeUpTags, "_protokube")
|
||||
} else {
|
||||
tags["_not_protokube"] = struct{}{}
|
||||
c.Config.NodeUpTags = append(c.Config.NodeUpTags, "_not_protokube")
|
||||
}
|
||||
|
||||
if useMasterASG {
|
||||
tags["_master_asg"] = struct{}{}
|
||||
} else {
|
||||
tags["_master_single"] = struct{}{}
|
||||
}
|
||||
|
||||
if useMasterLB {
|
||||
tags["_master_lb"] = struct{}{}
|
||||
} else {
|
||||
tags["_not_master_lb"] = struct{}{}
|
||||
}
|
||||
|
||||
if c.Config.MasterPublicName != "" {
|
||||
tags["_master_dns"] = struct{}{}
|
||||
}
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
"keypair": &fitasks.Keypair{},
|
||||
"secret": &fitasks.Secret{},
|
||||
})
|
||||
|
||||
switch c.Config.CloudProvider {
|
||||
case "gce":
|
||||
{
|
||||
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
||||
tags["_gce"] = struct{}{}
|
||||
c.Config.NodeUpTags = append(c.Config.NodeUpTags, "_gce")
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
"persistentDisk": &gcetasks.PersistentDisk{},
|
||||
"instance": &gcetasks.Instance{},
|
||||
"instanceTemplate": &gcetasks.InstanceTemplate{},
|
||||
"network": &gcetasks.Network{},
|
||||
"managedInstanceGroup": &gcetasks.ManagedInstanceGroup{},
|
||||
"firewallRule": &gcetasks.FirewallRule{},
|
||||
"ipAddress": &gcetasks.IPAddress{},
|
||||
})
|
||||
|
||||
// For now a zone to be specified...
|
||||
// This will be replace with a region when we go full HA
|
||||
zone := c.Config.NodeZones[0]
|
||||
if zone.Name == "" {
|
||||
return fmt.Errorf("Must specify a zone (use -zone)")
|
||||
}
|
||||
tokens := strings.Split(zone.Name, "-")
|
||||
if len(tokens) <= 2 {
|
||||
return fmt.Errorf("Invalid Zone: %v", zone.Name)
|
||||
}
|
||||
region := tokens[0] + "-" + tokens[1]
|
||||
|
||||
if c.Config.Region != "" && region != c.Config.Region {
|
||||
return fmt.Errorf("zone %q is not in region %q", zone, c.Config.Region)
|
||||
}
|
||||
c.Config.Region = region
|
||||
|
||||
project = c.Config.Project
|
||||
if project == "" {
|
||||
return fmt.Errorf("project is required for GCE")
|
||||
}
|
||||
gceCloud, err := gce.NewGCECloud(region, project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cloud = gceCloud
|
||||
}
|
||||
|
||||
case "aws":
|
||||
{
|
||||
tags["_aws"] = struct{}{}
|
||||
c.Config.NodeUpTags = append(c.Config.NodeUpTags, "_aws")
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
// EC2
|
||||
"elasticIP": &awstasks.ElasticIP{},
|
||||
"instance": &awstasks.Instance{},
|
||||
"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
|
||||
"instanceVolumeAttachment": &awstasks.InstanceVolumeAttachment{},
|
||||
"ebsVolume": &awstasks.EBSVolume{},
|
||||
"sshKey": &awstasks.SSHKey{},
|
||||
|
||||
// IAM
|
||||
"iamInstanceProfile": &awstasks.IAMInstanceProfile{},
|
||||
"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
|
||||
"iamRole": &awstasks.IAMRole{},
|
||||
"iamRolePolicy": &awstasks.IAMRolePolicy{},
|
||||
|
||||
// VPC / Networking
|
||||
"dhcpOptions": &awstasks.DHCPOptions{},
|
||||
"internetGateway": &awstasks.InternetGateway{},
|
||||
"route": &awstasks.Route{},
|
||||
"routeTable": &awstasks.RouteTable{},
|
||||
"routeTableAssociation": &awstasks.RouteTableAssociation{},
|
||||
"securityGroup": &awstasks.SecurityGroup{},
|
||||
"securityGroupRule": &awstasks.SecurityGroupRule{},
|
||||
"subnet": &awstasks.Subnet{},
|
||||
"vpc": &awstasks.VPC{},
|
||||
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
|
||||
|
||||
// ELB
|
||||
"loadBalancer": &awstasks.LoadBalancer{},
|
||||
"loadBalancerAttachment": &awstasks.LoadBalancerAttachment{},
|
||||
"loadBalancerHealthChecks": &awstasks.LoadBalancerHealthChecks{},
|
||||
|
||||
// Autoscaling
|
||||
"autoscalingGroup": &awstasks.AutoscalingGroup{},
|
||||
"launchConfiguration": &awstasks.LaunchConfiguration{},
|
||||
|
||||
// Route53
|
||||
"dnsName": &awstasks.DNSName{},
|
||||
"dnsZone": &awstasks.DNSZone{},
|
||||
})
|
||||
|
||||
if len(c.Config.NodeZones) == 0 {
|
||||
// TODO: Auto choose zones from region?
|
||||
return fmt.Errorf("Must specify a zone (use -zone)")
|
||||
}
|
||||
if len(c.Config.MasterZones) == 0 {
|
||||
return fmt.Errorf("Must specify a master zones")
|
||||
}
|
||||
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, zone := range c.Config.NodeZones {
|
||||
if len(zone.Name) <= 2 {
|
||||
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
||||
}
|
||||
|
||||
nodeZones[zone.Name] = true
|
||||
|
||||
zoneRegion := zone.Name[:len(zone.Name)-1]
|
||||
if c.Config.Region != "" && zoneRegion != c.Config.Region {
|
||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
c.Config.Region = zoneRegion
|
||||
}
|
||||
|
||||
for _, zone := range c.Config.MasterZones {
|
||||
if !nodeZones[zone] {
|
||||
// We could relax this, but this seems like a reasonable constraint
|
||||
return fmt.Errorf("All MasterZones must (currently) also be NodeZones")
|
||||
}
|
||||
}
|
||||
|
||||
err := awsup.ValidateRegion(c.Config.Region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.SSHPublicKey == "" {
|
||||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
||||
}
|
||||
|
||||
cloudTags := map[string]string{awsup.TagClusterName: c.Config.ClusterName}
|
||||
|
||||
awsCloud, err := awsup.NewAWSCloud(c.Config.Region, cloudTags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nodeZoneNames []string
|
||||
for _, z := range c.Config.NodeZones {
|
||||
nodeZoneNames = append(nodeZoneNames, z.Name)
|
||||
}
|
||||
err = awsCloud.ValidateZones(nodeZoneNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cloud = awsCloud
|
||||
|
||||
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown CloudProvider %q", c.Config.CloudProvider)
|
||||
}
|
||||
|
||||
l.Tags = tags
|
||||
l.WorkDir = c.OutDir
|
||||
l.ModelStore = c.ModelStore
|
||||
l.NodeModel = c.NodeModel
|
||||
l.OptionsLoader = loader.NewOptionsLoader(c.Config)
|
||||
|
||||
l.TemplateFunctions["HasTag"] = func(tag string) bool {
|
||||
_, found := l.Tags[tag]
|
||||
return found
|
||||
}
|
||||
|
||||
// TODO: Sort this out...
|
||||
l.OptionsLoader.TemplateFunctions["HasTag"] = l.TemplateFunctions["HasTag"]
|
||||
|
||||
l.TemplateFunctions["CA"] = func() fi.CAStore {
|
||||
return keyStore
|
||||
}
|
||||
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
|
||||
return secretStore
|
||||
}
|
||||
|
||||
if c.SSHPublicKey != "" {
|
||||
authorized, err := ioutil.ReadFile(c.SSHPublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
|
||||
}
|
||||
|
||||
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
|
||||
}
|
||||
|
||||
taskMap, err := l.Build(c.ModelStore, c.Models)
|
||||
if err != nil {
|
||||
glog.Exitf("error building: %v", err)
|
||||
}
|
||||
|
||||
var target fi.Target
|
||||
|
||||
switch c.Target {
|
||||
case "direct":
|
||||
switch c.Config.CloudProvider {
|
||||
case "gce":
|
||||
target = gce.NewGCEAPITarget(cloud.(*gce.GCECloud))
|
||||
case "aws":
|
||||
target = awsup.NewAWSAPITarget(cloud.(*awsup.AWSCloud))
|
||||
default:
|
||||
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", c.Config.CloudProvider)
|
||||
}
|
||||
|
||||
case "terraform":
|
||||
checkExisting = false
|
||||
outDir := path.Join(c.OutDir, "terraform")
|
||||
target = terraform.NewTerraformTarget(cloud, c.Config.Region, project, outDir)
|
||||
|
||||
case "dryrun":
|
||||
target = fi.NewDryRunTarget(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||
}
|
||||
|
||||
context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
|
||||
if err != nil {
|
||||
glog.Exitf("error building context: %v", err)
|
||||
}
|
||||
defer context.Close()
|
||||
|
||||
err = context.RunTasks(taskMap)
|
||||
if err != nil {
|
||||
glog.Exitf("error running tasks: %v", err)
|
||||
}
|
||||
|
||||
err = target.Finish(taskMap)
|
||||
if err != nil {
|
||||
glog.Exitf("error closing target: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@ func main() {
|
|||
flag.StringVar(&flagConf, "conf", "node.yaml", "configuration location")
|
||||
var flagAssetDir string
|
||||
flag.StringVar(&flagAssetDir, "assets", "/var/cache/nodeup", "the location for the local asset cache")
|
||||
var flagRootFS string
|
||||
flag.StringVar(&flagRootFS, "rootfs", "/", "the location of the machine root (for running in a container)")
|
||||
|
||||
dryrun := false
|
||||
flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done")
|
||||
|
@ -32,13 +34,12 @@ func main() {
|
|||
glog.Exitf("--conf is required")
|
||||
}
|
||||
|
||||
config := &nodeup.NodeConfig{}
|
||||
cmd := &nodeup.NodeUpCommand{
|
||||
Config: config,
|
||||
ConfigLocation: flagConf,
|
||||
ModelDir: flagModel,
|
||||
Target: target,
|
||||
AssetDir: flagAssetDir,
|
||||
FSRoot: flagRootFS,
|
||||
}
|
||||
err := cmd.Run(os.Stdout)
|
||||
if err != nil {
|
||||
|
|
|
@ -43,18 +43,17 @@ func (c *KubecfgGenerateCommand) Run() error {
|
|||
return fmt.Errorf("error state store: %v", err)
|
||||
}
|
||||
|
||||
config := &cloudup.CloudConfig{}
|
||||
err = stateStore.ReadConfig(config)
|
||||
cluster, _, err := cloudup.ReadConfig(stateStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading configuration: %v", err)
|
||||
}
|
||||
|
||||
clusterName := config.ClusterName
|
||||
clusterName := cluster.ClusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("ClusterName must be set in config")
|
||||
}
|
||||
|
||||
master := config.MasterPublicName
|
||||
master := cluster.MasterPublicName
|
||||
if master == "" {
|
||||
master = "api." + clusterName
|
||||
}
|
||||
|
|
|
@ -44,23 +44,22 @@ func (c *UpgradeClusterCmd) Run() error {
|
|||
return fmt.Errorf("error state store: %v", err)
|
||||
}
|
||||
|
||||
config := &cloudup.CloudConfig{}
|
||||
err = stateStore.ReadConfig(config)
|
||||
cluster, nodeSets, err := cloudup.ReadConfig(stateStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading configuration: %v", err)
|
||||
}
|
||||
|
||||
oldClusterName := config.ClusterName
|
||||
oldClusterName := cluster.ClusterName
|
||||
if oldClusterName == "" {
|
||||
return fmt.Errorf("(Old) ClusterName must be set in configuration")
|
||||
}
|
||||
|
||||
if len(config.NodeZones) == 0 {
|
||||
return fmt.Errorf("Configuration must include NodeZones")
|
||||
if len(cluster.Zones) == 0 {
|
||||
return fmt.Errorf("Configuration must include Zones")
|
||||
}
|
||||
|
||||
region := ""
|
||||
for _, zone := range config.NodeZones {
|
||||
for _, zone := range cluster.Zones {
|
||||
if len(zone.Name) <= 2 {
|
||||
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
||||
}
|
||||
|
@ -83,7 +82,8 @@ func (c *UpgradeClusterCmd) Run() error {
|
|||
d.NewClusterName = c.NewClusterName
|
||||
d.OldClusterName = oldClusterName
|
||||
d.Cloud = cloud
|
||||
d.Config = config
|
||||
d.ClusterConfig = cluster
|
||||
d.NodeSets = nodeSets
|
||||
d.StateStore = stateStore
|
||||
|
||||
err = d.Upgrade()
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
There is a schema-ed object ClusterConfiguration
|
||||
|
||||
Users tweak values in the "specified" ClusterConfiguration
|
||||
|
||||
We compute the "complete" ClusterConfiguration by populating defaults and inferring values
|
||||
* We try to remove any logic from downstream pieces
|
||||
* This also means that there is one source of truth
|
||||
|
||||
Note this is a little different to how kubernetes specs normally work, k8s has a
|
||||
separation between spec and status, but this is all spec. k8s will auto-populate the spec
|
||||
and not retain the "user-specified" spec, and this sometimes causes a few problems when it comes to
|
||||
exports & updates (e.g. ClusterIP). By storing the complete spec separately we ensure that the spec
|
||||
has all the information - so dependent steps don't have inference logic - but we still only keep the
|
||||
values that are specified. As a concrete example, we only store the kubernetes version if the user specifies
|
||||
it, if not we will follow k8s versions as they come out. (TODO: Not the best example. Maybe instance type?)
|
||||
|
||||
The way we store the ClusterConfiguration is an implementation detail, in terms of how it is broken
|
||||
into files. This might well change in future. For example, we might put NodeSet configuration storage into the
|
||||
kubernetes API.
|
|
@ -13,7 +13,7 @@ is supported, but support for GCS is coming soon, along with encrypted storage.
|
|||
The state store is just files; you can copy the files down and put them into git (or your preferred version
|
||||
control system).
|
||||
|
||||
## <statestore>/config
|
||||
## {statestore}/config
|
||||
|
||||
One of the most important files in the state store is the top-level config file. This file stores the main
|
||||
configuration for your cluster (instance types, zones, etc)\
|
||||
|
|
|
@ -1,24 +1,16 @@
|
|||
MasterImage: 282335181503/k8s-1.3-debian-jessie-amd64-hvm-ebs-2016-06-18
|
||||
{{ if gt .NodeCount 500 }}
|
||||
{{ if gt TotalNodeCount 500 }}
|
||||
MasterMachineType: c4.8xlarge
|
||||
{{ else if gt .NodeCount 250 }}
|
||||
{{ else if gt TotalNodeCount 250 }}
|
||||
MasterMachineType: c4.4xlarge
|
||||
{{ else if gt .NodeCount 100 }}
|
||||
{{ else if gt TotalNodeCount 100 }}
|
||||
MasterMachineType: m3.2xlarge
|
||||
{{ else if gt .NodeCount 10 }}
|
||||
{{ else if gt TotalNodeCount 10 }}
|
||||
MasterMachineType: m3.xlarge
|
||||
{{ else if gt .NodeCount 5 }}
|
||||
{{ else if gt TotalNodeCount 5 }}
|
||||
MasterMachineType: m3.large
|
||||
{{ else }}
|
||||
MasterMachineType: m3.medium
|
||||
{{ end }}
|
||||
MasterVolumeType: gp2
|
||||
|
||||
NodeImage: 282335181503/k8s-1.3-debian-jessie-amd64-hvm-ebs-2016-06-18
|
||||
{{ if gt .NodeCount 150 }}
|
||||
NodeMachineType: t2.medium
|
||||
{{ else if gt .NodeCount 50 }}
|
||||
NodeMachineType: t2.small
|
||||
{{ else }}
|
||||
NodeMachineType: t2.micro
|
||||
{{ end }}
|
||||
# We just always use a t2.medium by default for the nodes
|
|
@ -1,22 +1,22 @@
|
|||
{{ range $zone := .MasterZones }}
|
||||
{{ range $m := Masters }}
|
||||
|
||||
# ASG for master
|
||||
launchConfiguration/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}:
|
||||
sshKey: sshKey/kubernetes.{{ $.ClusterName }}
|
||||
launchConfiguration/{{ $m.Name }}.masters.{{ $.ClusterName }}:
|
||||
sshKey: sshKey/{{ $.ClusterName }}
|
||||
securityGroups:
|
||||
- securityGroup/kubernetes.master.{{ $.ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/kubernetes.master.{{ $.ClusterName }}
|
||||
imageId: {{ $.MasterImage }}
|
||||
instanceType: {{ $.MasterMachineType }}
|
||||
- securityGroup/masters.{{ $.ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/masters.{{ $.ClusterName }}
|
||||
imageId: {{ $m.Image }}
|
||||
instanceType: {{ $m.MachineType }}
|
||||
associatePublicIP: true
|
||||
userData: resources/nodeup.sh _kubernetes_master
|
||||
|
||||
autoscalingGroup/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}:
|
||||
autoscalingGroup/{{ $m.Name}}.masters.{{ $.ClusterName }}:
|
||||
minSize: 1
|
||||
maxSize: 1
|
||||
subnets:
|
||||
- subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}
|
||||
launchConfiguration: launchConfiguration/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}
|
||||
- subnet/{{ $m.Zone }}.{{ $.ClusterName }}
|
||||
launchConfiguration: launchConfiguration/{{ $m.Name }}.masters.{{ $.ClusterName }}
|
||||
tags:
|
||||
k8s.io/role/master: "1"
|
||||
k8s.io/dns/internal: "api.internal.{{ $.ClusterName }}"
|
||||
|
@ -26,9 +26,9 @@ autoscalingGroup/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}:
|
|||
|
||||
{{ if HasTag "_master_lb" }}
|
||||
# Attach ASG to ELB
|
||||
loadBalancerAttachment/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}:
|
||||
loadBalancerAttachment/masters.{{ $m.Name }}.{{ $.ClusterName }}:
|
||||
loadBalancer: loadBalancer/api.{{ $.ClusterName }}
|
||||
autoscalingGroup: autoscalingGroup/kubernetes.master.{{ $zone }}.{{ $.ClusterName }}
|
||||
autoscalingGroup: autoscalingGroup/{{ $m.Name }}.{{ $.ClusterName }}
|
||||
{{ end }}
|
||||
|
||||
{{ end }}
|
||||
|
|
|
@ -7,7 +7,7 @@ loadBalancer/api.{{ .ClusterName }}:
|
|||
- securityGroup/api.{{ .ClusterName }}
|
||||
subnets:
|
||||
{{ range $zone := .MasterZones }}
|
||||
- subnet/kubernetes-{{ $zone }}-{{ $.ClusterName }}
|
||||
- subnet/{{ $zone }}-{{ $.ClusterName }}
|
||||
{{ end }}
|
||||
listeners:
|
||||
443: { instancePort: 443 }
|
||||
|
@ -23,7 +23,7 @@ loadBalancerHealthChecks/api.{{ .ClusterName }}:
|
|||
|
||||
# Security group for master ELB
|
||||
securityGroup/api.{{ .ClusterName }}:
|
||||
vpc: vpc/kubernetes-{{ .ClusterName }}
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
description: 'Security group for ELB in front of masters'
|
||||
|
||||
# Allow full egress
|
||||
|
@ -42,7 +42,7 @@ securityGroupRule/https-external-to-api:
|
|||
|
||||
# Allow HTTPS to the master from the master ELB
|
||||
securityGroupRule/https-elb-to-master:
|
||||
securityGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
securityGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/api.{{ .ClusterName }}
|
||||
protocol: tcp
|
||||
fromPort: 443
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Master Elastic IP Address
|
||||
elasticIP/kubernetes.master.{{ .ClusterName }}:
|
||||
elasticIP/master.{{ .ClusterName }}:
|
||||
publicIP: {{ .MasterPublicIP }}
|
||||
tagOnResource: ebsVolume/kubernetes.master.{{ .ClusterName }}
|
||||
tagOnResource: ebsVolume/master.{{ .ClusterName }}
|
||||
tagUsingKey: kubernetes.io/master-ip
|
||||
|
||||
# Attach master IP to master
|
||||
|
@ -11,16 +11,16 @@ instanceElasticIPAttachment/master-ip-attachment:
|
|||
|
||||
# Attach master EBS volume to master
|
||||
instanceVolumeAttachment/master-volume-attachment:
|
||||
instance: instance/kubernetes.master.{{ .ClusterName }}
|
||||
volume: ebsVolume/kubernetes.master.{{ .ClusterName }}
|
||||
instance: instance/master.{{ .ClusterName }}
|
||||
volume: ebsVolume/master.{{ .ClusterName }}
|
||||
device: /dev/sdb
|
||||
|
||||
# We only expect one zone..
|
||||
{{ range $zone := .MasterZones }}
|
||||
|
||||
# Master instance
|
||||
instance/kubernetes.master.{{ $.ClusterName }}:
|
||||
subnet: subnet/kubernetes.{{ $zone }}.{{ $.ClusterName }}
|
||||
instance/master.{{ $.ClusterName }}:
|
||||
subnet: subnet/{{ $zone }}.{{ $.ClusterName }}
|
||||
privateIpAddress: {{ $.MasterInternalIP }}
|
||||
tags:
|
||||
k8s.io/role/master: "1"
|
||||
|
@ -28,17 +28,17 @@ instance/kubernetes.master.{{ $.ClusterName }}:
|
|||
{{ if not (HasTag "_master_lb") }}
|
||||
k8s.io/dns/public: "api.{{ $.ClusterName }}"
|
||||
{{ end }}
|
||||
sshKey: sshKey/kubernetes.{{ $.ClusterName }}
|
||||
sshKey: sshKey/{{ $.ClusterName }}
|
||||
securityGroups:
|
||||
- securityGroup/kubernetes.master.{{ $.ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/kubernetes.master.{{ $.ClusterName }}
|
||||
- securityGroup/master.{{ $.ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/master.{{ $.ClusterName }}
|
||||
imageId: {{ $.MasterImage }}
|
||||
instanceType: {{ $.MasterMachineType }}
|
||||
associatePublicIP: true
|
||||
userData: resources/nodeup.sh _kubernetes_master
|
||||
blockDeviceMappings:
|
||||
# Map all the ephemeral devices
|
||||
{{ range $d := (MachineTypeInfo $.NodeMachineType).EphemeralDevices }}
|
||||
{{ range $d := (MachineTypeInfo $.MasterMachineType).EphemeralDevices }}
|
||||
- DeviceName: {{ $d.DeviceName }}
|
||||
VirtualName: {{ $d.VirtualName }}
|
||||
{{ end }}
|
||||
|
@ -48,13 +48,13 @@ instance/kubernetes.master.{{ $.ClusterName }}:
|
|||
|
||||
# Route for master
|
||||
route/master:
|
||||
routeTable: routeTable/kubernetes.{{ .ClusterName }}
|
||||
routeTable: routeTable/{{ .ClusterName }}
|
||||
cidr: {{ .MasterIPRange }}
|
||||
instance: instance/kubernetes.master.{{ .ClusterName }}
|
||||
instance: instance/master.{{ .ClusterName }}
|
||||
|
||||
{{ if hasTag _master_lb }}
|
||||
# Attach instance to ELB
|
||||
loadBalancerAttachment/master-{{ $zone }}-{{ .ClusterName }}:
|
||||
loadBalancer: loadBalancer/api.{{ .ClusterName }}
|
||||
instance: instance/kubernetes.master.{{ .ClusterName }}
|
||||
instance: instance/master.{{ .ClusterName }}
|
||||
{{ end }}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
# HTTPS to the master is allowed (for API access)
|
||||
securityGroupRule/https-external-to-master:
|
||||
securityGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
securityGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
cidr: 0.0.0.0/0
|
||||
protocol: tcp
|
||||
fromPort: 443
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
# IAM configuration
|
||||
# We create an instance role for the master
|
||||
iamRole/kubernetes.master.{{ .ClusterName }}:
|
||||
iamRole/masters.{{ .ClusterName }}:
|
||||
rolePolicyDocument: resources/iam/kubernetes-master-role.json
|
||||
|
||||
iamRolePolicy/kubernetes.master.{{ .ClusterName }}:
|
||||
role: iamRole/kubernetes.master.{{ .ClusterName }}
|
||||
iamRolePolicy/masters.{{ .ClusterName }}:
|
||||
role: iamRole/masters.{{ .ClusterName }}
|
||||
policyDocument: resources/iam/kubernetes-master-policy.json
|
||||
|
||||
iamInstanceProfile/kubernetes.master.{{ .ClusterName }}: {}
|
||||
iamInstanceProfile/masters.{{ .ClusterName }}: {}
|
||||
|
||||
iamInstanceProfileRole/kubernetes.master.{{ .ClusterName }}:
|
||||
instanceProfile: iamInstanceProfile/kubernetes.master.{{ .ClusterName }}
|
||||
role: iamRole/kubernetes.master.{{ .ClusterName }}
|
||||
iamInstanceProfileRole/masters.{{ .ClusterName }}:
|
||||
instanceProfile: iamInstanceProfile/masters.{{ .ClusterName }}
|
||||
role: iamRole/masters.{{ .ClusterName }}
|
||||
|
||||
# Security group for master
|
||||
securityGroup/kubernetes.master.{{ .ClusterName }}:
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
securityGroup/masters.{{ .ClusterName }}:
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
description: 'Security group for masters'
|
||||
|
||||
# Allow full egress
|
||||
securityGroupRule/master-egress:
|
||||
securityGroup: securityGroup/kubernetes.master.{{.ClusterName}}
|
||||
securityGroup: securityGroup/masters.{{.ClusterName}}
|
||||
egress: true
|
||||
cidr: 0.0.0.0/0
|
||||
|
||||
# SSH is open to the world
|
||||
securityGroupRule/ssh-external-to-master:
|
||||
securityGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
securityGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
cidr: 0.0.0.0/0
|
||||
protocol: tcp
|
||||
fromPort: 22
|
||||
|
@ -34,10 +34,10 @@ securityGroupRule/ssh-external-to-master:
|
|||
|
||||
# Masters can talk to masters
|
||||
securityGroupRule/all-master-to-master:
|
||||
securityGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
securityGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
|
||||
# Masters can talk to nodes
|
||||
securityGroupRule/all-master-to-node:
|
||||
securityGroup: securityGroup/kubernetes.node.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
securityGroup: securityGroup/nodes.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
vpc/kubernetes.{{ .ClusterName }}:
|
||||
vpc/{{ .ClusterName }}:
|
||||
id: {{ .NetworkID }}
|
||||
shared: {{ .SharedVPC }}
|
||||
cidr: {{ .NetworkCIDR }}
|
||||
|
@ -7,41 +7,41 @@ vpc/kubernetes.{{ .ClusterName }}:
|
|||
|
||||
{{ if not .SharedVPC }}
|
||||
# TODO: would be good to create these as shared, to verify them
|
||||
dhcpOptions/kubernetes.{{ .ClusterName }}:
|
||||
dhcpOptions/{{ .ClusterName }}:
|
||||
domainNameServers: AmazonProvidedDNS
|
||||
{{ if eq .Region "us-east-1" }}
|
||||
{{ if eq Region "us-east-1" }}
|
||||
domainName: ec2.internal
|
||||
{{ else }}
|
||||
domainName: {{ .Region }}.compute.internal
|
||||
domainName: {{ Region }}.compute.internal
|
||||
{{ end }}
|
||||
|
||||
vpcDHDCPOptionsAssociation/kubernetes.{{ .ClusterName }}:
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
dhcpOptions: dhcpOptions/kubernetes.{{ .ClusterName }}
|
||||
vpcDHDCPOptionsAssociation/{{ .ClusterName }}:
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
dhcpOptions: dhcpOptions/{{ .ClusterName }}
|
||||
{{ end }}
|
||||
|
||||
internetGateway/kubernetes.{{ .ClusterName }}:
|
||||
internetGateway/{{ .ClusterName }}:
|
||||
shared: {{ .SharedVPC }}
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
|
||||
routeTable/kubernetes.{{ .ClusterName }}:
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
routeTable/{{ .ClusterName }}:
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
|
||||
route/0.0.0.0/0:
|
||||
routeTable: routeTable/kubernetes.{{ .ClusterName }}
|
||||
routeTable: routeTable/{{ .ClusterName }}
|
||||
cidr: 0.0.0.0/0
|
||||
internetGateway: internetGateway/kubernetes.{{ .ClusterName }}
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
internetGateway: internetGateway/{{ .ClusterName }}
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
|
||||
{{ range $zone := .NodeZones }}
|
||||
{{ range $zone := .Zones }}
|
||||
|
||||
subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}:
|
||||
vpc: vpc/kubernetes.{{ $.ClusterName }}
|
||||
subnet/{{ $zone.Name }}.{{ $.ClusterName }}:
|
||||
vpc: vpc/{{ $.ClusterName }}
|
||||
availabilityZone: {{ $zone.Name }}
|
||||
cidr: {{ $zone.CIDR }}
|
||||
|
||||
routeTableAssociation/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}:
|
||||
routeTable: routeTable/kubernetes.{{ $.ClusterName }}
|
||||
subnet: subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}
|
||||
routeTableAssociation/{{ $zone.Name }}.{{ $.ClusterName }}:
|
||||
routeTable: routeTable/{{ $.ClusterName }}
|
||||
subnet: subnet/{{ $zone.Name }}.{{ $.ClusterName }}
|
||||
|
||||
{{ end }}
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
# IAM configuration
|
||||
# We create an instance role for the nodes
|
||||
iamRole/kubernetes.node.{{ .ClusterName }}:
|
||||
iamRole/nodes.{{ .ClusterName }}:
|
||||
rolePolicyDocument: resources/iam/kubernetes-node-role.json
|
||||
|
||||
iamRolePolicy/kubernetes.node.{{ .ClusterName }}:
|
||||
role: iamRole/kubernetes.node.{{ .ClusterName }}
|
||||
iamRolePolicy/nodes.{{ .ClusterName }}:
|
||||
role: iamRole/nodes.{{ .ClusterName }}
|
||||
policyDocument: resources/iam/kubernetes-node-policy.json
|
||||
|
||||
iamInstanceProfile/kubernetes.node.{{ .ClusterName }}: {}
|
||||
iamInstanceProfile/nodes.{{ .ClusterName }}: {}
|
||||
|
||||
iamInstanceProfileRole/kubernetes.node.{{ .ClusterName }}:
|
||||
instanceProfile: iamInstanceProfile/kubernetes.node.{{ .ClusterName }}
|
||||
role: iamRole/kubernetes.node.{{ .ClusterName }}
|
||||
iamInstanceProfileRole/nodes.{{ .ClusterName }}:
|
||||
instanceProfile: iamInstanceProfile/nodes.{{ .ClusterName }}
|
||||
role: iamRole/nodes.{{ .ClusterName }}
|
||||
|
||||
# Create security group for nodes
|
||||
securityGroup/kubernetes.node.{{.ClusterName}}:
|
||||
vpc: vpc/kubernetes.{{ .ClusterName }}
|
||||
securityGroup/nodes.{{.ClusterName}}:
|
||||
vpc: vpc/{{ .ClusterName }}
|
||||
description: 'Security group for nodes'
|
||||
|
||||
# Allow full egress
|
||||
securityGroupRule/node-egress:
|
||||
securityGroup: securityGroup/kubernetes.node.{{.ClusterName}}
|
||||
securityGroup: securityGroup/nodes.{{.ClusterName}}
|
||||
egress: true
|
||||
cidr: 0.0.0.0/0
|
||||
|
||||
# SSH is open to the world
|
||||
securityGroupRule/ssh-external-to-node:
|
||||
securityGroup: securityGroup/kubernetes.node.{{.ClusterName}}
|
||||
securityGroup: securityGroup/nodes.{{.ClusterName}}
|
||||
cidr: 0.0.0.0/0
|
||||
protocol: tcp
|
||||
fromPort: 22
|
||||
|
@ -34,32 +34,36 @@ securityGroupRule/ssh-external-to-node:
|
|||
|
||||
# Nodes can talk to nodes
|
||||
securityGroupRule/all-node-to-node:
|
||||
securityGroup: securityGroup/kubernetes.node.{{.ClusterName}}
|
||||
sourceGroup: securityGroup/kubernetes.node.{{.ClusterName}}
|
||||
securityGroup: securityGroup/nodes.{{.ClusterName}}
|
||||
sourceGroup: securityGroup/nodes.{{.ClusterName}}
|
||||
|
||||
# Nodes can talk masters nodes
|
||||
securityGroupRule/all-node-to-master:
|
||||
securityGroup: securityGroup/kubernetes.master.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/kubernetes.node.{{.ClusterName}}
|
||||
securityGroup: securityGroup/masters.{{ .ClusterName }}
|
||||
sourceGroup: securityGroup/nodes.{{.ClusterName}}
|
||||
|
||||
{{ range $nodeset := NodeSets }}
|
||||
|
||||
# LaunchConfiguration & ASG for nodes
|
||||
launchConfiguration/kubernetes.nodes.{{ .ClusterName }}:
|
||||
sshKey: sshKey/kubernetes.{{ .ClusterName }}
|
||||
launchConfiguration/{{ $nodeset.Name }}.{{ $.ClusterName }}:
|
||||
sshKey: sshKey/{{ $.ClusterName }}
|
||||
securityGroups:
|
||||
- securityGroup/kubernetes.node.{{ .ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/kubernetes.node.{{ .ClusterName }}
|
||||
imageId: {{ .NodeImage }}
|
||||
instanceType: {{ .NodeMachineType }}
|
||||
- securityGroup/nodes.{{ $.ClusterName }}
|
||||
iamInstanceProfile: iamInstanceProfile/nodes.{{ $.ClusterName }}
|
||||
imageId: {{ $nodeset.Image }}
|
||||
instanceType: {{ $nodeset.MachineType }}
|
||||
associatePublicIP: true
|
||||
userData: resources/nodeup.sh _kubernetes_pool
|
||||
|
||||
autoscalingGroup/kubernetes.nodes.{{ .ClusterName }}:
|
||||
launchConfiguration: launchConfiguration/kubernetes.nodes.{{ .ClusterName }}
|
||||
minSize: {{ .NodeCount }}
|
||||
maxSize: {{ .NodeCount }}
|
||||
autoscalingGroup/{{ $nodeset.Name }}.{{ $.ClusterName }}:
|
||||
launchConfiguration: launchConfiguration/{{ $nodeset.Name }}.{{ $.ClusterName }}
|
||||
minSize: {{ or $nodeset.MinSize 2 }}
|
||||
maxSize: {{ or $nodeset.MaxSize 2 }}
|
||||
subnets:
|
||||
{{ range $zone := .NodeZones }}
|
||||
- subnet/kubernetes.{{ $zone.Name }}.{{ $.ClusterName }}
|
||||
{{ range $zone := $.Zones }}
|
||||
- subnet/{{ $zone.Name }}.{{ $.ClusterName }}
|
||||
{{ end }}
|
||||
tags:
|
||||
k8s.io/role: node
|
||||
|
||||
{{ end }}
|
||||
|
|
|
@ -17,8 +17,8 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_TAR_URL={{ .NodeUp.Location }}
|
||||
NODEUP_TAR_HASH={{ .NodeUp.Hash }}
|
||||
NODEUP_TAR_URL={{ NodeUpSource }}
|
||||
NODEUP_TAR_HASH={{ NodeUpSourceHash }}
|
||||
|
||||
function ensure-install-dir() {
|
||||
INSTALL_DIR="/var/cache/kubernetes-install"
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
sshKey/kubernetes.{{ .ClusterName }}:
|
||||
sshKey/{{ .ClusterName }}:
|
||||
name: kubernetes.{{.ClusterName}}
|
||||
publicKey: resources/ssh-public-key
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
MasterImage: k8s-1-2-debian-jessie-amd64-2016-04-17
|
||||
{{ if gt .NodeCount 500 }}
|
||||
{{ if gt .TotalNodeCount 500 }}
|
||||
MasterMachineType: n1-standard-32
|
||||
{{ else if gt .NodeCount 250 }}
|
||||
{{ else if gt .TotalNodeCount 250 }}
|
||||
MasterMachineType: n1-standard-16
|
||||
{{ else if gt .NodeCount 100 }}
|
||||
{{ else if gt .TotalNodeCount 100 }}
|
||||
MasterMachineType: n1-standard-8
|
||||
{{ else if gt .NodeCount 10 }}
|
||||
{{ else if gt .TotalNodeCount 10 }}
|
||||
MasterMachineType: n1-standard-4
|
||||
{{ else if gt .NodeCount 5 }}
|
||||
{{ else if gt .TotalNodeCount 5 }}
|
||||
MasterMachineType: n1-standard-2
|
||||
{{ else }}
|
||||
MasterMachineType: n1-standard-1
|
||||
{{ end }}
|
||||
MasterVolumeType: pd-ssd
|
||||
|
||||
NodeImage: k8s-1-2-debian-jessie-amd64-2016-04-17
|
||||
NodeMachineType: n1-standard-2
|
||||
NodeImage: k8s-1-2-debian-jessie-amd64-2016-04-17
|
|
@ -1,12 +1,12 @@
|
|||
# TODO: Support multiple instance groups
|
||||
{{ range $nodeset := .NodeSets }}
|
||||
|
||||
instanceTemplate/{{ .NodeInstancePrefix }}-template:
|
||||
instanceTemplate/{{ $nodeset.Key }}-{{ $.NodeInstancePrefix }}-template:
|
||||
network: network/default
|
||||
machineType: {{ .NodeMachineType }}
|
||||
machineType: {{ $nodeset.NodeMachineType }}
|
||||
# TODO: Make configurable
|
||||
bootDiskType: pd-standard
|
||||
bootDiskSizeGB: 100
|
||||
bootDiskImage: {{ .NodeImage }}
|
||||
bootDiskImage: {{ $nodeset.NodeImage }}
|
||||
canIpForward: true
|
||||
# TODO: Support preemptible nodes?
|
||||
preemptible: false
|
||||
|
@ -17,7 +17,7 @@ instanceTemplate/{{ .NodeInstancePrefix }}-template:
|
|||
- storage-ro
|
||||
metadata:
|
||||
# kube-env: resources/kube-env
|
||||
{{ if eq .NodeInit "cloudinit" }}
|
||||
{{ if eq $.NodeInit "cloudinit" }}
|
||||
# TODO: we should probably always store the config somewhere
|
||||
config: resources/cloudinit.yaml _kubernetes_master
|
||||
{{ else }}
|
||||
|
@ -26,13 +26,15 @@ instanceTemplate/{{ .NodeInstancePrefix }}-template:
|
|||
{{ end }}
|
||||
cluster-name: resources/cluster-name
|
||||
tags:
|
||||
- {{ .NodeTag }}
|
||||
- {{ $.NodeTag }}
|
||||
|
||||
managedInstanceGroup/{{ .NodeInstancePrefix }}-group:
|
||||
zone: {{ .Zone }}
|
||||
baseInstanceName: {{ .NodeInstancePrefix }}
|
||||
targetSize: {{ .NodeCount }}
|
||||
instanceTemplate: instanceTemplate/{{ .NodeInstancePrefix }}-template
|
||||
managedInstanceGroup/{{ $nodeset.Key}}-{{ .NodeInstancePrefix }}-group:
|
||||
zone: {{ $.Zone }}
|
||||
baseInstanceName: {{ $nodeset.InstancePrefix }}
|
||||
targetSize: {{ $nodeset.Count }}
|
||||
instanceTemplate: instanceTemplate/{{ $nodeset.Key }}-{{ $nodeset.InstancePrefix }}-template
|
||||
|
||||
{{ end }}
|
||||
|
||||
# Allow traffic from nodes -> nodes
|
||||
firewallRule/{{ .NodeTag }}-all:
|
||||
|
|
|
@ -17,8 +17,8 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
NODEUP_TAR_URL={{ .NodeUp.Location }}
|
||||
NODEUP_TAR_HASH={{ .NodeUp.Hash }}
|
||||
NODEUP_TAR_URL={{ NodeUpSource }}
|
||||
NODEUP_TAR_HASH={{ NodeUpSourceHash }}
|
||||
|
||||
function ensure-basic-networking() {
|
||||
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
|
||||
|
|
|
@ -14,3 +14,4 @@ APIServer:
|
|||
TokenAuthFile: /srv/kubernetes/known_tokens.csv
|
||||
LogLevel: 2
|
||||
AllowPrivileged: true
|
||||
Image: gcr.io/google_containers/kube-apiserver:v{{ .KubernetesVersion }}
|
|
@ -6,6 +6,8 @@ KubeControllerManager:
|
|||
ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key
|
||||
LogLevel: 2
|
||||
RootCAFile: /srv/kubernetes/ca.crt
|
||||
ClusterName: {{ .ClusterName }}
|
||||
Image: gcr.io/google_containers/kube-controller-manager:v{{ .KubernetesVersion }}
|
||||
# Doesn't seem to be any real downside to always doing a leader election
|
||||
LeaderElection:
|
||||
LeaderElect: true
|
|
@ -1,4 +1,4 @@
|
|||
DNS:
|
||||
KubeDNS:
|
||||
Replicas: 1
|
||||
ServerIP: 10.0.0.10
|
||||
Domain: cluster.local
|
|
@ -7,4 +7,6 @@ KubeProxy:
|
|||
# requests of other per-node add-ons (e.g. fluentd).
|
||||
CPURequest: 20m
|
||||
|
||||
Image: gcr.io/google_containers/kube-proxy:v{{ .KubernetesVersion }}
|
||||
|
||||
Master: https://{{ .MasterInternalName }}
|
|
@ -1,6 +1,7 @@
|
|||
KubeScheduler:
|
||||
Master: 127.0.0.1:8080
|
||||
LogLevel: 2
|
||||
Image: gcr.io/google_containers/kube-scheduler:v{{ .KubernetesVersion }}
|
||||
# Doesn't seem to be any real downside to always doing a leader election
|
||||
LeaderElection:
|
||||
LeaderElect: true
|
|
@ -1,4 +1,4 @@
|
|||
Kubelet:
|
||||
CloudProvider: aws
|
||||
CgroupRoot: docker
|
||||
NonMasqueradeCdir: 10.0.0.0/8
|
||||
NonMasqueradeCidr: 10.0.0.0/8
|
|
@ -8,3 +8,10 @@ Kubelet:
|
|||
ConfigureCBR0: true
|
||||
BabysitDaemons: true
|
||||
APIServers: https://{{ .MasterInternalName }}
|
||||
|
||||
MasterKubelet:
|
||||
RegisterSchedulable: false
|
||||
ReconcileCIDR: false
|
||||
EnableDebuggingHandlers: false
|
||||
HairpinMode: none
|
||||
PodCIDR: 10.123.45.0/30
|
|
@ -1,4 +1,4 @@
|
|||
InstancePrefix: kubernetes
|
||||
#InstancePrefix: kubernetes
|
||||
AllocateNodeCIDRs: true
|
||||
Multizone: true
|
||||
|
||||
|
@ -9,26 +9,21 @@ NetworkProvider: none
|
|||
|
||||
AdmissionControl: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,PersistentVolumeLabel
|
||||
|
||||
EnableClusterMonitoring: none
|
||||
EnableL7LoadBalancing: none
|
||||
EnableClusterUI: true
|
||||
#EnableClusterMonitoring: none
|
||||
#EnableL7LoadBalancing: none
|
||||
#EnableClusterUI: true
|
||||
|
||||
EnableClusterDNS: true
|
||||
DNSReplicas: 1
|
||||
DNSServerIP: 10.0.0.10
|
||||
#EnableClusterDNS: true
|
||||
#DNSReplicas: 1
|
||||
#DNSServerIP: 100.64.0.10
|
||||
DNSDomain: cluster.local
|
||||
|
||||
EnableClusterLogging: true
|
||||
EnableNodeLogging: true
|
||||
LoggingDestination: elasticsearch
|
||||
ElasticsearchLoggingReplicas: 1
|
||||
#EnableClusterLogging: true
|
||||
#EnableNodeLogging: true
|
||||
#LoggingDestination: elasticsearch
|
||||
#ElasticsearchLoggingReplicas: 1
|
||||
|
||||
MasterName: {{ .InstancePrefix }}-master
|
||||
MasterTag: {{ .InstancePrefix }}-master
|
||||
MasterVolumeSize: 20
|
||||
#MasterVolumeSize: 20
|
||||
|
||||
NodeCount: 2
|
||||
NodeTag: {{ .InstancePrefix }}-minion
|
||||
NodeInstancePrefix: {{ .InstancePrefix }}-minion
|
||||
|
||||
KubeUser: admin
|
|
@ -1,37 +1,15 @@
|
|||
NodeUp:
|
||||
Location: https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz
|
||||
|
||||
APIServer:
|
||||
Image: gcr.io/google_containers/kube-apiserver:v{{ .KubernetesVersion }}
|
||||
|
||||
KubeControllerManager:
|
||||
ClusterName: {{ .ClusterName }}
|
||||
Image: gcr.io/google_containers/kube-controller-manager:v{{ .KubernetesVersion }}
|
||||
|
||||
KubeScheduler:
|
||||
Image: gcr.io/google_containers/kube-scheduler:v{{ .KubernetesVersion }}
|
||||
|
||||
KubeProxy:
|
||||
Image: gcr.io/google_containers/kube-proxy:v{{ .KubernetesVersion }}
|
||||
|
||||
MasterInternalName: {{ .MasterInternalName }}
|
||||
DNSZone: {{ .DNSZone }}
|
||||
|
||||
KubeUser: {{ .KubeUser }}
|
||||
|
||||
Tags:
|
||||
{{ range $tag := Args }}
|
||||
- {{ $tag }}
|
||||
{{ end }}
|
||||
{{ range $tag := .NodeUpTags }}
|
||||
{{ range $tag := NodeUpTags }}
|
||||
- {{ $tag }}
|
||||
{{ end }}
|
||||
|
||||
|
||||
Assets:
|
||||
{{ range $asset := .Assets }}
|
||||
{{ range $asset := Assets }}
|
||||
- {{ $asset }}
|
||||
{{ end }}
|
||||
|
||||
KeyStore: {{ .KeyStore }}
|
||||
SecretStore: {{ .SecretStore }}
|
||||
ConfigStore: {{ .ConfigStore }}
|
||||
ClusterLocation: {{ ClusterLocation }}
|
||||
|
|
|
@ -9,9 +9,6 @@ spec:
|
|||
containers:
|
||||
- name: kope-aws
|
||||
image: kope/aws-controller
|
||||
#resources:
|
||||
# requests:
|
||||
# cpu: {{ .KubeProxy.CPURequest }}
|
||||
command:
|
||||
- /usr/bin/aws-controller
|
||||
- --healthz-port=10245
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
/srv/kubernetes - secrets
|
||||
|
||||
/srv/sshproxy - not used in "normal" environments? Contains SSH keypairs for tunnelling. Secrets, really.
|
||||
|
||||
/var/etcd - the etcd data volume. This should be a direct EBS volume
|
|
@ -1,2 +0,0 @@
|
|||
device: /dev/xvdb
|
||||
mountpoint: /mnt/master-pd
|
|
@ -1,2 +0,0 @@
|
|||
device: /dev/disk/by-id/google-master-pd
|
||||
mountpoint: /mnt/master-pd
|
|
@ -1 +0,0 @@
|
|||
directory: true
|
|
@ -1 +0,0 @@
|
|||
# Needed to add empty directory to git
|
|
@ -1 +0,0 @@
|
|||
directory: true
|
|
@ -1 +0,0 @@
|
|||
# Needed to add empty directory to git
|
|
@ -1,4 +0,0 @@
|
|||
owner: etcd
|
||||
group: etcd
|
||||
directory: true
|
||||
mode: "0700"
|
|
@ -1 +0,0 @@
|
|||
# Needed to add empty directory to git
|
|
@ -1 +0,0 @@
|
|||
symlink: /mnt/master-pd/srv/kubernetes
|
|
@ -1 +0,0 @@
|
|||
symlink: /mnt/master-pd/srv/sshproxy
|
|
@ -1 +0,0 @@
|
|||
symlink: /mnt/master-pd/var/etcd
|
|
@ -1 +1 @@
|
|||
{{ GetToken "kube" }},{{ .KubeUser }},admin
|
||||
{{ GetToken "kube" }},admin,admin
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ .DNS.Replicas }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
volumeMounts:
|
||||
- name: etcd-storage
|
||||
mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.12
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /kube2sky
|
||||
args:
|
||||
- -domain={{ .DNS.Domain }}
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /skydns
|
||||
args:
|
||||
- -machines=http://127.0.0.1:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -ns-rotate=false
|
||||
- -domain={{ .DNS.Domain }}.
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
timeoutSeconds: 5
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
command:
|
||||
- /exechealthz
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ .DNS.Domain }} 127.0.0.1 >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ .DNS.ServerIP }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
|
@ -24,7 +24,7 @@ metadata:
|
|||
version: v14
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ .DNS.Replicas }}
|
||||
replicas: {{ .KubeDNS.Replicas }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v14
|
||||
|
@ -69,7 +69,7 @@ spec:
|
|||
timeoutSeconds: 5
|
||||
args:
|
||||
# command = "/kube-dns"
|
||||
- --domain={{ .DNS.Domain }}.
|
||||
- --domain={{ .KubeDNS.Domain }}.
|
||||
- --dns-port=10053
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
|
@ -102,7 +102,7 @@ spec:
|
|||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ .DNS.Domain }} 127.0.0.1 >/dev/null
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ .KubeDNS.Domain }} 127.0.0.1 >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
|
|
@ -26,7 +26,7 @@ metadata:
|
|||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ .DNS.ServerIP }}
|
||||
clusterIP: {{ .KubeDNS.ServerIP }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
|
|
|
@ -8,14 +8,14 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: {{ .KubeProxy.Image }}
|
||||
image: {{ KubeProxy.Image }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .KubeProxy.CPURequest }}
|
||||
cpu: {{ KubeProxy.CPURequest }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --resource-container="" {{ BuildFlags .KubeProxy }} 1>>/var/log/kube-proxy.log 2>&1
|
||||
- kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --resource-container="" {{ BuildFlags KubeProxy }} 1>>/var/log/kube-proxy.log 2>&1
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
|
|
@ -1 +1 @@
|
|||
DAEMON_ARGS="{{ BuildFlags .Kubelet }}"
|
||||
DAEMON_ARGS="{{ BuildFlags Kubelet }}"
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
Kubelet:
|
||||
RegisterSchedulable: false
|
||||
ReconcileCIDR: false
|
||||
EnableDebuggingHandlers: false
|
||||
HairpinMode: none
|
||||
PodCIDR: 10.123.45.0/30
|
||||
|
|
@ -1,13 +1,14 @@
|
|||
{{ range $zone := .MasterZones }}
|
||||
{{ range $vol := .MasterVolumes }}
|
||||
|
||||
# EBS volume per zone, for etcd
|
||||
ebsVolume/kubernetes.master.{{$zone}}.{{ $.ClusterName }}:
|
||||
availabilityZone: {{ $zone }}
|
||||
sizeGB: {{ or $.MasterVolumeSize 20 }}
|
||||
volumeType: {{ or $.MasterVolumeType "gp2" }}
|
||||
ebsVolume/{{$vol.Name}}.{{ $.ClusterName }}:
|
||||
availabilityZone: {{ $vol.Zone }}
|
||||
sizeGB: {{ or $vol.Size 20 }}
|
||||
volumeType: {{ or $vol.Type "gp2" }}
|
||||
tags:
|
||||
k8s.io/role/master: "1"
|
||||
k8s.io/etcd/main: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
k8s.io/etcd/events: "{{ $zone }}/{{ join $.MasterZones "," }}"
|
||||
{{ range $k, $v := $vol.Roles }}
|
||||
k8s.io/{{$k}}: "{{$v}}"
|
||||
{{ end }}
|
||||
|
||||
{{ end }}
|
||||
|
|
|
@ -153,10 +153,14 @@ func (e *LaunchConfiguration) Run(c *fi.Context) error {
|
|||
}
|
||||
|
||||
func (s *LaunchConfiguration) CheckChanges(a, e, changes *LaunchConfiguration) error {
|
||||
if e.ImageID == nil {
|
||||
return fi.RequiredField("ImageID")
|
||||
}
|
||||
if e.InstanceType == nil {
|
||||
return fi.RequiredField("InstanceType")
|
||||
}
|
||||
|
||||
if a != nil {
|
||||
if e.InstanceType == nil {
|
||||
return fi.RequiredField("InstanceType")
|
||||
}
|
||||
if e.Name == nil {
|
||||
return fi.RequiredField("Name")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,575 @@
|
|||
package cloudup
|
||||
|
||||
// Configuration for each component
|
||||
// Wherever possible, we try to use the types & names in https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
type KubeletConfig struct {
|
||||
APIServers string `json:",omitempty" flag:"api-servers"`
|
||||
|
||||
LogLevel *int `json:",omitempty" flag:"v"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
// config is the path to the config file or directory of files
|
||||
Config string `json:"config,omitempty" flag:"config"`
|
||||
//// syncFrequency is the max period between synchronizing running
|
||||
//// containers and config
|
||||
//SyncFrequency unversioned.Duration `json:"syncFrequency"`
|
||||
//// fileCheckFrequency is the duration between checking config files for
|
||||
//// new data
|
||||
//FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"`
|
||||
//// httpCheckFrequency is the duration between checking http for new data
|
||||
//HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"`
|
||||
//// manifestURL is the URL for accessing the container manifest
|
||||
//ManifestURL string `json:"manifestURL"`
|
||||
//// manifestURLHeader is the HTTP header to use when accessing the manifest
|
||||
//// URL, with the key separated from the value with a ':', as in 'key:value'
|
||||
//ManifestURLHeader string `json:"manifestURLHeader"`
|
||||
//// enableServer enables the Kubelet's server
|
||||
//EnableServer bool `json:"enableServer"`
|
||||
//// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
|
||||
//// for all interfaces)
|
||||
//Address string `json:"address"`
|
||||
//// port is the port for the Kubelet to serve on.
|
||||
//Port uint `json:"port"`
|
||||
//// readOnlyPort is the read-only port for the Kubelet to serve on with
|
||||
//// no authentication/authorization (set to 0 to disable)
|
||||
//ReadOnlyPort uint `json:"readOnlyPort"`
|
||||
//// tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
|
||||
//// if any, concatenated after server cert). If tlsCertFile and
|
||||
//// tlsPrivateKeyFile are not provided, a self-signed certificate
|
||||
//// and key are generated for the public address and saved to the directory
|
||||
//// passed to certDir.
|
||||
//TLSCertFile string `json:"tLSCertFile"`
|
||||
//// tLSPrivateKeyFile is the ile containing x509 private key matching
|
||||
//// tlsCertFile.
|
||||
//TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"`
|
||||
//// certDirectory is the directory where the TLS certs are located (by
|
||||
//// default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile
|
||||
//// are provided, this flag will be ignored.
|
||||
//CertDirectory string `json:"certDirectory"`
|
||||
//// hostnameOverride is the hostname used to identify the kubelet instead
|
||||
//// of the actual hostname.
|
||||
//HostnameOverride string `json:"hostnameOverride"`
|
||||
//// podInfraContainerImage is the image whose network/ipc namespaces
|
||||
//// containers in each pod will use.
|
||||
//PodInfraContainerImage string `json:"podInfraContainerImage"`
|
||||
//// dockerEndpoint is the path to the docker endpoint to communicate with.
|
||||
//DockerEndpoint string `json:"dockerEndpoint"`
|
||||
//// rootDirectory is the directory path to place kubelet files (volume
|
||||
//// mounts,etc).
|
||||
//RootDirectory string `json:"rootDirectory"`
|
||||
//// seccompProfileRoot is the directory path for seccomp profiles.
|
||||
//SeccompProfileRoot string `json:"seccompProfileRoot"`
|
||||
// allowPrivileged enables containers to request privileged mode.
|
||||
// Defaults to false.
|
||||
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"`
|
||||
//// hostNetworkSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use of host network. Defaults to "*".
|
||||
//HostNetworkSources string `json:"hostNetworkSources"`
|
||||
//// hostPIDSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use the host pid namespace. Defaults to "*".
|
||||
//HostPIDSources string `json:"hostPIDSources"`
|
||||
//// hostIPCSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use the host ipc namespace. Defaults to "*".
|
||||
//HostIPCSources string `json:"hostIPCSources"`
|
||||
//// registryPullQPS is the limit of registry pulls per second. If 0,
|
||||
//// unlimited. Set to 0 for no limit. Defaults to 5.0.
|
||||
//RegistryPullQPS float64 `json:"registryPullQPS"`
|
||||
//// registryBurst is the maximum size of a bursty pulls, temporarily allows
|
||||
//// pulls to burst to this number, while still not exceeding registryQps.
|
||||
//// Only used if registryQps > 0.
|
||||
//RegistryBurst int32 `json:"registryBurst"`
|
||||
//// eventRecordQPS is the maximum event creations per second. If 0, there
|
||||
//// is no limit enforced.
|
||||
//EventRecordQPS float32 `json:"eventRecordQPS"`
|
||||
//// eventBurst is the maximum size of a bursty event records, temporarily
|
||||
//// allows event records to burst to this number, while still not exceeding
|
||||
//// event-qps. Only used if eventQps > 0
|
||||
//EventBurst int32 `json:"eventBurst"`
|
||||
// enableDebuggingHandlers enables server endpoints for log collection
|
||||
// and local running of containers and commands
|
||||
EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers,omitempty" flag:"enable-debugging-handlers"`
|
||||
//// minimumGCAge is the minimum age for a finished container before it is
|
||||
//// garbage collected.
|
||||
//MinimumGCAge unversioned.Duration `json:"minimumGCAge"`
|
||||
//// maxPerPodContainerCount is the maximum number of old instances to
|
||||
//// retain per container. Each container takes up some disk space.
|
||||
//MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"`
|
||||
//// maxContainerCount is the maximum number of old instances of containers
|
||||
//// to retain globally. Each container takes up some disk space.
|
||||
//MaxContainerCount int32 `json:"maxContainerCount"`
|
||||
//// cAdvisorPort is the port of the localhost cAdvisor endpoint
|
||||
//CAdvisorPort uint `json:"cAdvisorPort"`
|
||||
//// healthzPort is the port of the localhost healthz endpoint
|
||||
//HealthzPort int32 `json:"healthzPort"`
|
||||
//// healthzBindAddress is the IP address for the healthz server to serve
|
||||
//// on.
|
||||
//HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
//// oomScoreAdj is The oom-score-adj value for kubelet process. Values
|
||||
//// must be within the range [-1000, 1000].
|
||||
//OOMScoreAdj int32 `json:"oomScoreAdj"`
|
||||
//// registerNode enables automatic registration with the apiserver.
|
||||
//RegisterNode bool `json:"registerNode"`
|
||||
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
|
||||
// configure all containers to search this domain in addition to the
|
||||
// host's search domains.
|
||||
ClusterDomain string `json:"clusterDomain,omitempty" flag:"cluster-domain"`
|
||||
//// masterServiceNamespace is The namespace from which the kubernetes
|
||||
//// master services should be injected into pods.
|
||||
//MasterServiceNamespace string `json:"masterServiceNamespace"`
|
||||
// clusterDNS is the IP address for a cluster DNS server. If set, kubelet
|
||||
// will configure all containers to use this for DNS resolution in
|
||||
// addition to the host's DNS servers
|
||||
ClusterDNS string `json:"clusterDNS,omitempty" flag:"cluster-dns"`
|
||||
//// streamingConnectionIdleTimeout is the maximum time a streaming connection
|
||||
//// can be idle before the connection is automatically closed.
|
||||
//StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"`
|
||||
//// nodeStatusUpdateFrequency is the frequency that kubelet posts node
|
||||
//// status to master. Note: be cautious when changing the constant, it
|
||||
//// must work with nodeMonitorGracePeriod in nodecontroller.
|
||||
//NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"`
|
||||
//// minimumGCAge is the minimum age for a unused image before it is
|
||||
//// garbage collected.
|
||||
//ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"`
|
||||
//// imageGCHighThresholdPercent is the percent of disk usage after which
|
||||
//// image garbage collection is always run.
|
||||
//ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"`
|
||||
//// imageGCLowThresholdPercent is the percent of disk usage before which
|
||||
//// image garbage collection is never run. Lowest disk usage to garbage
|
||||
//// collect to.
|
||||
//ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"`
|
||||
//// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
|
||||
//// maintain. When disk space falls below this threshold, new pods would
|
||||
//// be rejected.
|
||||
//LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
|
||||
//// How frequently to calculate and cache volume disk usage for all pods
|
||||
//VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
|
||||
//// networkPluginName is the name of the network plugin to be invoked for
|
||||
//// various events in kubelet/pod lifecycle
|
||||
//NetworkPluginName string `json:"networkPluginName"`
|
||||
//// networkPluginDir is the full path of the directory in which to search
|
||||
//// for network plugins
|
||||
//NetworkPluginDir string `json:"networkPluginDir"`
|
||||
//// volumePluginDir is the full path of the directory in which to search
|
||||
//// for additional third party volume plugins
|
||||
//VolumePluginDir string `json:"volumePluginDir"`
|
||||
// cloudProvider is the provider for cloud services.
|
||||
CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"`
|
||||
//// cloudConfigFile is the path to the cloud provider configuration file.
|
||||
//CloudConfigFile string `json:"cloudConfigFile,omitempty"`
|
||||
// KubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
|
||||
KubeletCgroups string `json:"kubeletCgroups,omitempty" flag:"kubelet-cgroups"`
|
||||
// Cgroups that container runtime is expected to be isolated in.
|
||||
RuntimeCgroups string `json:"runtimeCgroups,omitempty" flag:"runtime-cgroups"`
|
||||
// SystemCgroups is absolute name of cgroups in which to place
|
||||
// all non-kernel processes that are not already in a container. Empty
|
||||
// for no container. Rolling back the flag requires a reboot.
|
||||
SystemCgroups string `json:"systemContainer,omitempty" flag:"system-cgroups"`
|
||||
// cgroupRoot is the root cgroup to use for pods. This is handled by the
|
||||
// container runtime on a best effort basis.
|
||||
CgroupRoot string `json:"cgroupRoot,omitempty" flag:"cgroup-root"`
|
||||
//// containerRuntime is the container runtime to use.
|
||||
//ContainerRuntime string `json:"containerRuntime"`
|
||||
//// rktPath is the path of rkt binary. Leave empty to use the first rkt in
|
||||
//// $PATH.
|
||||
//RktPath string `json:"rktPath,omitempty"`
|
||||
//// rktApiEndpoint is the endpoint of the rkt API service to communicate with.
|
||||
//RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"`
|
||||
//// rktStage1Image is the image to use as stage1. Local paths and
|
||||
//// http/https URLs are supported.
|
||||
//RktStage1Image string `json:"rktStage1Image,omitempty"`
|
||||
//// lockFilePath is the path that kubelet will use to as a lock file.
|
||||
//// It uses this file as a lock to synchronize with other kubelet processes
|
||||
//// that may be running.
|
||||
//LockFilePath string `json:"lockFilePath"`
|
||||
//// ExitOnLockContention is a flag that signifies to the kubelet that it is running
|
||||
//// in "bootstrap" mode. This requires that 'LockFilePath' has been set.
|
||||
//// This will cause the kubelet to listen to inotify events on the lock file,
|
||||
//// releasing it and exiting when another process tries to open that file.
|
||||
//ExitOnLockContention bool `json:"exitOnLockContention"`
|
||||
// configureCBR0 enables the kublet to configure cbr0 based on
|
||||
// Node.Spec.PodCIDR.
|
||||
ConfigureCBR0 *bool `json:"configureCbr0,omitempty" flag:"configure-cbr0"`
|
||||
// How should the kubelet configure the container bridge for hairpin packets.
|
||||
// Setting this flag allows endpoints in a Service to loadbalance back to
|
||||
// themselves if they should try to access their own Service. Values:
|
||||
// "promiscuous-bridge": make the container bridge promiscuous.
|
||||
// "hairpin-veth": set the hairpin flag on container veth interfaces.
|
||||
// "none": do nothing.
|
||||
// Setting --configure-cbr0 to false implies that to achieve hairpin NAT
|
||||
// one must set --hairpin-mode=veth-flag, because bridge assumes the
|
||||
// existence of a container bridge named cbr0.
|
||||
HairpinMode string `json:"hairpinMode,omitempty" flag:"hairpin-mode"`
|
||||
// The node has babysitter process monitoring docker and kubelet.
|
||||
BabysitDaemons *bool `json:"babysitDaemons,omitempty" flag:"babysit-daemons"`
|
||||
//// maxPods is the number of pods that can run on this Kubelet.
|
||||
//MaxPods int32 `json:"maxPods"`
|
||||
//// nvidiaGPUs is the number of NVIDIA GPU devices on this node.
|
||||
//NvidiaGPUs int32 `json:"nvidiaGPUs"`
|
||||
//// dockerExecHandlerName is the handler to use when executing a command
|
||||
//// in a container. Valid values are 'native' and 'nsenter'. Defaults to
|
||||
//// 'native'.
|
||||
//DockerExecHandlerName string `json:"dockerExecHandlerName"`
|
||||
// The CIDR to use for pod IP addresses, only used in standalone mode.
|
||||
// In cluster mode, this is obtained from the master.
|
||||
PodCIDR string `json:"podCIDR,omitempty" flag:"pod-cidr"`
|
||||
//// ResolverConfig is the resolver configuration file used as the basis
|
||||
//// for the container DNS resolution configuration."), []
|
||||
//ResolverConfig string `json:"resolvConf"`
|
||||
//// cpuCFSQuota is Enable CPU CFS quota enforcement for containers that
|
||||
//// specify CPU limits
|
||||
//CPUCFSQuota bool `json:"cpuCFSQuota"`
|
||||
//// containerized should be set to true if kubelet is running in a container.
|
||||
//Containerized bool `json:"containerized"`
|
||||
//// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
||||
//MaxOpenFiles uint64 `json:"maxOpenFiles"`
|
||||
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
|
||||
// API server. No-op if register-node or configure-cbr0 is false.
|
||||
ReconcileCIDR *bool `json:"reconcileCIDR,omitempty" flag:"reconcile-cidr"`
|
||||
// registerSchedulable tells the kubelet to register the node as
|
||||
// schedulable. No-op if register-node is false.
|
||||
RegisterSchedulable *bool `json:"registerSchedulable,omitempty" flag:"register-schedulable"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the burst to allow while talking with kubernetes
|
||||
//// apiserver
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
//// serializeImagePulls when enabled, tells the Kubelet to pull images one
|
||||
//// at a time. We recommend *not* changing the default value on nodes that
|
||||
//// run docker daemon with version < 1.9 or an Aufs storage backend.
|
||||
//// Issue #10959 has more details.
|
||||
//SerializeImagePulls bool `json:"serializeImagePulls"`
|
||||
//// experimentalFlannelOverlay enables experimental support for starting the
|
||||
//// kubelet with the default overlay network (flannel). Assumes flanneld
|
||||
//// is already running in client mode.
|
||||
//ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
|
||||
//// outOfDiskTransitionFrequency is duration for which the kubelet has to
|
||||
//// wait before transitioning out of out-of-disk node condition status.
|
||||
//OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`
|
||||
//// nodeIP is IP address of the node. If set, kubelet will use this IP
|
||||
//// address for the node.
|
||||
//NodeIP string `json:"nodeIP,omitempty"`
|
||||
//// nodeLabels to add when registering the node in the cluster.
|
||||
//NodeLabels map[string]string `json:"nodeLabels"`
|
||||
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
|
||||
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty" flag:"non-masquerade-cidr"`
|
||||
//// enable gathering custom metrics.
|
||||
//EnableCustomMetrics bool `json:"enableCustomMetrics"`
|
||||
//// Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'.
|
||||
//EvictionHard string `json:"evictionHard,omitempty"`
|
||||
//// Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'.
|
||||
//EvictionSoft string `json:"evictionSoft,omitempty"`
|
||||
//// Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'.
|
||||
//EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"`
|
||||
//// Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
|
||||
//EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
|
||||
//// Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
|
||||
//EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
|
||||
//// Maximum number of pods per core. Cannot exceed MaxPods
|
||||
//PodsPerCore int32 `json:"podsPerCore"`
|
||||
//// enableControllerAttachDetach enables the Attach/Detach controller to
|
||||
//// manage attachment/detachment of volumes scheduled to this node, and
|
||||
//// disables kubelet from executing any attach/detach operations
|
||||
//EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"`
|
||||
|
||||
}
|
||||
|
||||
type KubeProxyConfig struct {
|
||||
Image string `json:",omitempty"`
|
||||
// TODO: Better type ?
|
||||
CPURequest string `json:",omitempty"` // e.g. "20m"
|
||||
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
|
||||
//// for all interfaces)
|
||||
//BindAddress string `json:"bindAddress"`
|
||||
//// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
|
||||
//// bridge traffic coming from outside of the cluster. If not provided,
|
||||
//// no off-cluster bridging will be performed.
|
||||
//ClusterCIDR string `json:"clusterCIDR"`
|
||||
//// healthzBindAddress is the IP address for the health check server to serve on,
|
||||
//// defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||
//HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
//// healthzPort is the port to bind the health check server. Use 0 to disable.
|
||||
//HealthzPort int32 `json:"healthzPort"`
|
||||
//// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
|
||||
//HostnameOverride string `json:"hostnameOverride"`
|
||||
//// iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
|
||||
//// the pure iptables proxy mode. Values must be within the range [0, 31].
|
||||
//IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"`
|
||||
//// iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
//// '2h22m'). Must be greater than 0.
|
||||
//IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
|
||||
//// kubeconfigPath is the path to the kubeconfig file with authorization information (the
|
||||
//// master location is set by the master flag).
|
||||
//KubeconfigPath string `json:"kubeconfigPath"`
|
||||
//// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
|
||||
//MasqueradeAll bool `json:"masqueradeAll"`
|
||||
// master is the address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
Master string `json:"master,omitempty" flag:"master"`
|
||||
//// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
|
||||
//// the range [-1000, 1000]
|
||||
//OOMScoreAdj *int32 `json:"oomScoreAdj"`
|
||||
//// mode specifies which proxy mode to use.
|
||||
//Mode ProxyMode `json:"mode"`
|
||||
//// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
//// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
//PortRange string `json:"portRange"`
|
||||
//// resourceContainer is the bsolute name of the resource-only container to create and run
|
||||
//// the Kube-proxy in (Default: /kube-proxy).
|
||||
//ResourceContainer string `json:"resourceContainer"`
|
||||
//// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
//// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
//UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||
//// conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
|
||||
//ConntrackMax int32 `json:"conntrackMax"`
|
||||
//// conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
|
||||
//// (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
|
||||
//ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Bridge string `json:",omitempty" flag:"bridge"`
|
||||
LogLevel string `json:",omitempty" flag:"log-level"`
|
||||
IPTables bool `json:",omitempty" flag:"iptables"`
|
||||
IPMasq bool `json:",omitempty" flag:"ip-masq"`
|
||||
Storage string `json:",omitempty" flag:"s"`
|
||||
}
|
||||
|
||||
type APIServerConfig struct {
|
||||
PathSrvKubernetes string `json:",omitempty"`
|
||||
PathSrvSshproxy string `json:",omitempty"`
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
CloudProvider string `json:",omitempty" flag:"cloud-provider"`
|
||||
SecurePort int `json:",omitempty" flag:"secure-port"`
|
||||
Address string `json:",omitempty" flag:"address"`
|
||||
EtcdServers string `json:",omitempty" flag:"etcd-servers"`
|
||||
EtcdServersOverrides string `json:",omitempty" flag:"etcd-servers-overrides"`
|
||||
// TODO: []string and join with commas?
|
||||
AdmissionControl string `json:",omitempty" flag:"admission-control"`
|
||||
ServiceClusterIPRange string `json:",omitempty" flag:"service-cluster-ip-range"`
|
||||
ClientCAFile string `json:",omitempty" flag:"client-ca-file"`
|
||||
BasicAuthFile string `json:",omitempty" flag:"basic-auth-file"`
|
||||
TLSCertFile string `json:",omitempty" flag:"tls-cert-file"`
|
||||
TLSPrivateKeyFile string `json:",omitempty" flag:"tls-private-key-file"`
|
||||
TokenAuthFile string `json:",omitempty" flag:"token-auth-file"`
|
||||
AllowPrivileged *bool `json:",omitempty" flag:"allow-privileged"`
|
||||
}
|
||||
|
||||
type KubeControllerManagerConfig struct {
|
||||
Master string `json:",omitempty" flag:"master"`
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
ServiceAccountPrivateKeyFile string `json:",omitempty" flag:"service-account-private-key-file"`
|
||||
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
PathSrvKubernetes string `json:",omitempty"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// port is the port that the controller-manager's http service runs on.
|
||||
//Port int32 `json:"port"`
|
||||
//// address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
|
||||
//Address string `json:"address"`
|
||||
// cloudProvider is the provider for cloud services.
|
||||
CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"`
|
||||
//// cloudConfigFile is the path to the cloud provider configuration file.
|
||||
//CloudConfigFile string `json:"cloudConfigFile"`
|
||||
//// concurrentEndpointSyncs is the number of endpoint syncing operations
|
||||
//// that will be done concurrently. Larger number = faster endpoint updating,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"`
|
||||
//// concurrentRSSyncs is the number of replica sets that are allowed to sync
|
||||
//// concurrently. Larger number = more responsive replica management, but more
|
||||
//// CPU (and network) load.
|
||||
//ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"`
|
||||
//// concurrentRCSyncs is the number of replication controllers that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive replica
|
||||
//// management, but more CPU (and network) load.
|
||||
//ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"`
|
||||
//// concurrentResourceQuotaSyncs is the number of resource quotas that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive quota
|
||||
//// management, but more CPU (and network) load.
|
||||
//ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"`
|
||||
//// concurrentDeploymentSyncs is the number of deployment objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive deployments,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"`
|
||||
//// concurrentDaemonSetSyncs is the number of daemonset objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive daemonset,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"`
|
||||
//// concurrentJobSyncs is the number of job objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive jobs,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"`
|
||||
//// concurrentNamespaceSyncs is the number of namespace objects that are
|
||||
//// allowed to sync concurrently.
|
||||
//ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"`
|
||||
//// lookupCacheSizeForRC is the size of lookup cache for replication controllers.
|
||||
//// Larger number = more responsive replica management, but more MEM load.
|
||||
//LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"`
|
||||
//// lookupCacheSizeForRS is the size of lookup cache for replicatsets.
|
||||
//// Larger number = more responsive replica management, but more MEM load.
|
||||
//LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"`
|
||||
//// lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets.
|
||||
//// Larger number = more responsive daemonset, but more MEM load.
|
||||
//LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"`
|
||||
//// serviceSyncPeriod is the period for syncing services with their external
|
||||
//// load balancers.
|
||||
//ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
|
||||
//// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
|
||||
//// periods will result in fewer calls to cloud provider, but may delay addition
|
||||
//// of new nodes to cluster.
|
||||
//NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"`
|
||||
//// resourceQuotaSyncPeriod is the period for syncing quota usage status
|
||||
//// in the system.
|
||||
//ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"`
|
||||
//// namespaceSyncPeriod is the period for syncing namespace life-cycle
|
||||
//// updates.
|
||||
//NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"`
|
||||
//// pvClaimBinderSyncPeriod is the period for syncing persistent volumes
|
||||
//// and persistent volume claims.
|
||||
//PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"`
|
||||
//// minResyncPeriod is the resync period in reflectors; will be random between
|
||||
//// minResyncPeriod and 2*minResyncPeriod.
|
||||
//MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"`
|
||||
//// terminatedPodGCThreshold is the number of terminated pods that can exist
|
||||
//// before the terminated pod garbage collector starts deleting terminated pods.
|
||||
//// If <= 0, the terminated pod garbage collector is disabled.
|
||||
//TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"`
|
||||
//// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
|
||||
//// pods in horizontal pod autoscaler.
|
||||
//HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"`
|
||||
//// deploymentControllerSyncPeriod is the period for syncing the deployments.
|
||||
//DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"`
|
||||
//// podEvictionTimeout is the grace period for deleting pods on failed nodes.
|
||||
//PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"`
|
||||
//// deletingPodsQps is the number of nodes per second on which pods are deleted in
|
||||
//// case of node failure.
|
||||
//DeletingPodsQps float32 `json:"deletingPodsQps"`
|
||||
//// deletingPodsBurst is the number of nodes on which pods are bursty deleted in
|
||||
//// case of node failure. For more details look into RateLimiter.
|
||||
//DeletingPodsBurst int32 `json:"deletingPodsBurst"`
|
||||
//// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
|
||||
//// unresponsive before marking it unhealty. Must be N times more than kubelet's
|
||||
//// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
|
||||
//// to post node status.
|
||||
//NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"`
|
||||
//// registerRetryCount is the number of retries for initial node registration.
|
||||
//// Retry interval equals node-sync-period.
|
||||
//RegisterRetryCount int32 `json:"registerRetryCount"`
|
||||
//// nodeStartupGracePeriod is the amount of time which we allow starting a node to
|
||||
//// be unresponsive before marking it unhealty.
|
||||
//NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"`
|
||||
//// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
|
||||
//NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"`
|
||||
//// serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
|
||||
//// used to sign service account tokens.
|
||||
//ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
|
||||
//// enableProfiling enables profiling via web interface host:port/debug/pprof/
|
||||
//EnableProfiling bool `json:"enableProfiling"`
|
||||
// clusterName is the instance prefix for the cluster.
|
||||
ClusterName string `json:"clusterName,omitempty" flag:"cluster-name"`
|
||||
// clusterCIDR is CIDR Range for Pods in cluster.
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty" flag:"cluster-cidr"`
|
||||
//// serviceCIDR is CIDR Range for Services in cluster.
|
||||
//ServiceCIDR string `json:"serviceCIDR"`
|
||||
//// NodeCIDRMaskSize is the mask size for node cidr in cluster.
|
||||
//NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
|
||||
// allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
|
||||
// ConfigureCloudRoutes is true, to be set on the cloud provider.
|
||||
AllocateNodeCIDRs *bool `json:"allocateNodeCIDRs,omitempty" flag:"allocate-node-cidrs"`
|
||||
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
|
||||
// to be configured on the cloud provider.
|
||||
ConfigureCloudRoutes *bool `json:"configureCloudRoutes,omitempty" flag:"configure-cloud-routes"`
|
||||
// rootCAFile is the root certificate authority will be included in service
|
||||
// account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
RootCAFile string `json:"rootCAFile,omitempty" flag:"root-ca-file"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection *LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
//// volumeConfiguration holds configuration for volume related features.
|
||||
//VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
|
||||
//// How long to wait between starting controller managers
|
||||
//ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"`
|
||||
//// enables the generic garbage collector. MUST be synced with the
|
||||
//// corresponding flag of the kube-apiserver. WARNING: the generic garbage
|
||||
//// collector is an alpha feature.
|
||||
//EnableGarbageCollector bool `json:"enableGarbageCollector"`
|
||||
}
|
||||
|
||||
type KubeSchedulerConfig struct {
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// port is the port that the scheduler's http service runs on.
|
||||
//Port int32 `json:"port"`
|
||||
//// address is the IP address to serve on.
|
||||
//Address string `json:"address"`
|
||||
//// algorithmProvider is the scheduling algorithm provider to use.
|
||||
//AlgorithmProvider string `json:"algorithmProvider"`
|
||||
//// policyConfigFile is the filepath to the scheduler policy configuration.
|
||||
//PolicyConfigFile string `json:"policyConfigFile"`
|
||||
//// enableProfiling enables profiling via web interface.
|
||||
//EnableProfiling bool `json:"enableProfiling"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
//// schedulerName is name of the scheduler, used to select which pods
|
||||
//// will be processed by this scheduler, based on pod's annotation with
|
||||
//// key 'scheduler.alpha.kubernetes.io/name'.
|
||||
//SchedulerName string `json:"schedulerName"`
|
||||
//// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
//// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
//// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
//HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
|
||||
//// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
//FailureDomains string `json:"failureDomains"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection *LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
}
|
||||
|
||||
// LeaderElectionConfiguration defines the configuration of leader election
|
||||
// clients for components that can run with leader election enabled.
|
||||
type LeaderElectionConfiguration struct {
|
||||
// leaderElect enables a leader election client to gain leadership
|
||||
// before executing the main loop. Enable this when running replicated
|
||||
// components for high availability.
|
||||
LeaderElect *bool `json:"leaderElect,omitempty" flag:"leader-elect"`
|
||||
//// leaseDuration is the duration that non-leader candidates will wait
|
||||
//// after observing a leadership renewal until attempting to acquire
|
||||
//// leadership of a led but unrenewed leader slot. This is effectively the
|
||||
//// maximum duration that a leader can be stopped before it is replaced
|
||||
//// by another candidate. This is only applicable if leader election is
|
||||
//// enabled.
|
||||
//LeaseDuration unversioned.Duration `json:"leaseDuration"`
|
||||
//// renewDeadline is the interval between attempts by the acting master to
|
||||
//// renew a leadership slot before it stops leading. This must be less
|
||||
//// than or equal to the lease duration. This is only applicable if leader
|
||||
//// election is enabled.
|
||||
//RenewDeadline unversioned.Duration `json:"renewDeadline"`
|
||||
//// retryPeriod is the duration the clients should wait between attempting
|
||||
//// acquisition and renewal of a leadership. This is only applicable if
|
||||
//// leader election is enabled.
|
||||
//RetryPeriod unversioned.Duration `json:"retryPeriod"`
|
||||
}
|
|
@ -7,154 +7,216 @@ import (
|
|||
"math/big"
|
||||
"net"
|
||||
"strconv"
|
||||
//"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
//"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
)
|
||||
|
||||
type CloudConfig struct {
|
||||
type ClusterConfig struct {
|
||||
// The CloudProvider to use (aws or gce)
|
||||
CloudProvider string `json:",omitempty"`
|
||||
|
||||
// The version of kubernetes to install
|
||||
// The version of kubernetes to install (optional, and can be a "spec" like stable)
|
||||
KubernetesVersion string `json:",omitempty"`
|
||||
|
||||
// The Node initializer technique to use: cloudinit or nodeup
|
||||
NodeInit string `json:",omitempty"`
|
||||
//
|
||||
//// The Node initializer technique to use: cloudinit or nodeup
|
||||
//NodeInit string `json:",omitempty"`
|
||||
|
||||
// Configuration of zones we are targeting
|
||||
MasterZones []string `json:",omitempty"`
|
||||
NodeZones []*ZoneConfig `json:",omitempty"`
|
||||
Region string `json:",omitempty"`
|
||||
Project string `json:",omitempty"`
|
||||
Zones []*ZoneConfig `json:",omitempty"`
|
||||
//Region string `json:",omitempty"`
|
||||
|
||||
// Permissions to configure in IAM or GCE
|
||||
// Project is the cloud project we should use, required on GCE
|
||||
Project string `json:",omitempty"`
|
||||
|
||||
// MasterPermissions contains the IAM permissions for the masters
|
||||
MasterPermissions *CloudPermissions `json:",omitempty"`
|
||||
NodePermissions *CloudPermissions `json:",omitempty"`
|
||||
// NodePermissions contains the IAM permissions for the nodes
|
||||
NodePermissions *CloudPermissions `json:",omitempty"`
|
||||
|
||||
// The internal and external names for the master nodes
|
||||
MasterPublicName string `json:",omitempty"`
|
||||
// MasterPublicName is the external DNS name for the master nodes
|
||||
MasterPublicName string `json:",omitempty"`
|
||||
// MasterInternalName is the internal DNS name for the master nodes
|
||||
MasterInternalName string `json:",omitempty"`
|
||||
|
||||
// The CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
|
||||
// This is a real CIDR, not the internal k8s overlay
|
||||
// This is a real CIDR, not the internal k8s network
|
||||
NetworkCIDR string `json:",omitempty"`
|
||||
NetworkID string `json:",omitempty"`
|
||||
|
||||
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
|
||||
NetworkID string `json:",omitempty"`
|
||||
|
||||
// SecretStore is the VFS path to where secrets are stored
|
||||
SecretStore string `json:",omitempty"`
|
||||
KeyStore string `json:",omitempty"`
|
||||
// KeyStore is the VFS path to where SSL keys and certificates are stored
|
||||
KeyStore string `json:",omitempty"`
|
||||
// ConfigStore is the VFS path to where the configuration (CloudConfig, NodeSetConfig etc) is stored
|
||||
ConfigStore string `json:",omitempty"`
|
||||
|
||||
// The DNS zone we should use when configuring DNS
|
||||
// DNSZone is the DNS zone we should use when configuring DNS
|
||||
DNSZone string `json:",omitempty"`
|
||||
|
||||
InstancePrefix string `json:",omitempty"`
|
||||
//InstancePrefix string `json:",omitempty"`
|
||||
|
||||
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
|
||||
ClusterName string `json:",omitempty"`
|
||||
AllocateNodeCIDRs *bool `json:",omitempty"`
|
||||
|
||||
Multizone bool `json:",omitempty"`
|
||||
Multizone *bool `json:",omitempty"`
|
||||
|
||||
ClusterIPRange string `json:",omitempty"`
|
||||
//ClusterIPRange string `json:",omitempty"`
|
||||
|
||||
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
|
||||
ServiceClusterIPRange string `json:",omitempty"`
|
||||
MasterIPRange string `json:",omitempty"`
|
||||
NonMasqueradeCidr string `json:",omitempty"`
|
||||
//MasterIPRange string `json:",omitempty"`
|
||||
//NonMasqueradeCidr string `json:",omitempty"`
|
||||
//
|
||||
//NetworkProvider string `json:",omitempty"`
|
||||
//
|
||||
//HairpinMode string `json:",omitempty"`
|
||||
//
|
||||
//OpencontrailTag string `json:",omitempty"`
|
||||
//OpencontrailKubernetesTag string `json:",omitempty"`
|
||||
//OpencontrailPublicSubnet string `json:",omitempty"`
|
||||
//
|
||||
//EnableClusterMonitoring string `json:",omitempty"`
|
||||
//EnableL7LoadBalancing string `json:",omitempty"`
|
||||
//EnableClusterUI *bool `json:",omitempty"`
|
||||
//
|
||||
//EnableClusterDNS *bool `json:",omitempty"`
|
||||
//DNSReplicas int `json:",omitempty"`
|
||||
//DNSServerIP string `json:",omitempty"`
|
||||
|
||||
NetworkProvider string `json:",omitempty"`
|
||||
// DNSDomain is the suffix we use for internal DNS names (normally cluster.local)
|
||||
DNSDomain string `json:",omitempty"`
|
||||
|
||||
HairpinMode string `json:",omitempty"`
|
||||
//EnableClusterLogging *bool `json:",omitempty"`
|
||||
//EnableNodeLogging *bool `json:",omitempty"`
|
||||
//LoggingDestination string `json:",omitempty"`
|
||||
//ElasticsearchLoggingReplicas int `json:",omitempty"`
|
||||
//
|
||||
//EnableClusterRegistry *bool `json:",omitempty"`
|
||||
//ClusterRegistryDisk string `json:",omitempty"`
|
||||
//ClusterRegistryDiskSize int `json:",omitempty"`
|
||||
//
|
||||
//EnableCustomMetrics *bool `json:",omitempty"`
|
||||
//
|
||||
//RegisterMasterKubelet *bool `json:",omitempty"`
|
||||
|
||||
OpencontrailTag string `json:",omitempty"`
|
||||
OpencontrailKubernetesTag string `json:",omitempty"`
|
||||
OpencontrailPublicSubnet string `json:",omitempty"`
|
||||
//// Image is the default image spec to use for the cluster
|
||||
//Image string `json:",omitempty"`
|
||||
|
||||
EnableClusterMonitoring string `json:",omitempty"`
|
||||
EnableL7LoadBalancing string `json:",omitempty"`
|
||||
EnableClusterUI *bool `json:",omitempty"`
|
||||
//KubeUser string `json:",omitempty"`
|
||||
//
|
||||
//// These are moved to CAStore / SecretStore
|
||||
////KubePassword string
|
||||
////KubeletToken string
|
||||
////KubeProxyToken string
|
||||
////BearerToken string
|
||||
////CACert []byte
|
||||
////CAKey []byte
|
||||
////KubeletCert []byte
|
||||
////KubeletKey []byte
|
||||
////MasterCert []byte
|
||||
////MasterKey []byte
|
||||
////KubecfgCert []byte
|
||||
////KubecfgKey []byte
|
||||
//
|
||||
//AdmissionControl string `json:",omitempty"`
|
||||
//RuntimeConfig string `json:",omitempty"`
|
||||
//
|
||||
//KubeImageTag string `json:",omitempty"`
|
||||
//KubeDockerRegistry string `json:",omitempty"`
|
||||
//KubeAddonRegistry string `json:",omitempty"`
|
||||
//
|
||||
//KubeletPort int `json:",omitempty"`
|
||||
//
|
||||
//KubeApiserverRequestTimeout int `json:",omitempty"`
|
||||
//
|
||||
//TerminatedPodGcThreshold string `json:",omitempty"`
|
||||
//
|
||||
//EnableManifestURL *bool `json:",omitempty"`
|
||||
//ManifestURL string `json:",omitempty"`
|
||||
//ManifestURLHeader string `json:",omitempty"`
|
||||
//
|
||||
//TestCluster string `json:",omitempty"`
|
||||
//
|
||||
//DockerOptions string `json:",omitempty"`
|
||||
//DockerStorage string `json:",omitempty"`
|
||||
//ExtraDockerOpts string `json:",omitempty"`
|
||||
//
|
||||
//E2EStorageTestEnvironment string `json:",omitempty"`
|
||||
//KubeletTestArgs string `json:",omitempty"`
|
||||
//KubeletTestLogLevel string `json:",omitempty"`
|
||||
//DockerTestArgs string `json:",omitempty"`
|
||||
//DockerTestLogLevel string `json:",omitempty"`
|
||||
//ApiserverTestArgs string `json:",omitempty"`
|
||||
//ApiserverTestLogLevel string `json:",omitempty"`
|
||||
//ControllerManagerTestArgs string `json:",omitempty"`
|
||||
//ControllerManagerTestLogLevel string `json:",omitempty"`
|
||||
//SchedulerTestArgs string `json:",omitempty"`
|
||||
//SchedulerTestLogLevel string `json:",omitempty"`
|
||||
//KubeProxyTestArgs string `json:",omitempty"`
|
||||
//KubeProxyTestLogLevel string `json:",omitempty"`
|
||||
|
||||
EnableClusterDNS *bool `json:",omitempty"`
|
||||
DNSReplicas int `json:",omitempty"`
|
||||
DNSServerIP string `json:",omitempty"`
|
||||
DNSDomain string `json:",omitempty"`
|
||||
//NodeUp *NodeUpConfig `json:",omitempty"`
|
||||
|
||||
EnableClusterLogging *bool `json:",omitempty"`
|
||||
EnableNodeLogging *bool `json:",omitempty"`
|
||||
LoggingDestination string `json:",omitempty"`
|
||||
ElasticsearchLoggingReplicas int `json:",omitempty"`
|
||||
// nodeSets is a list of all the NodeSets in the cluster.
|
||||
// It is not exported: we populate it from other files
|
||||
//nodeSets []*NodeSetConfig `json:",omitempty"`
|
||||
|
||||
EnableClusterRegistry *bool `json:",omitempty"`
|
||||
ClusterRegistryDisk string `json:",omitempty"`
|
||||
ClusterRegistryDiskSize int `json:",omitempty"`
|
||||
// Masters is the configuration for each master in the cluster
|
||||
Masters []*MasterConfig `json:",omitempty"`
|
||||
|
||||
EnableCustomMetrics *bool `json:",omitempty"`
|
||||
// MasterVolumes stores the configurations for each master data volume
|
||||
MasterVolumes []*VolumeConfig `json:",omitempty"`
|
||||
|
||||
RegisterMasterKubelet *bool `json:",omitempty"`
|
||||
MasterVolumeType string `json:",omitempty"`
|
||||
MasterVolumeSize int `json:",omitempty"`
|
||||
MasterTag string `json:",omitempty"`
|
||||
MasterMachineType string `json:",omitempty"`
|
||||
MasterImage string `json:",omitempty"`
|
||||
// Component configurations
|
||||
Docker *DockerConfig `json:",omitempty"`
|
||||
KubeDNS *KubeDNSConfig `json:",omitempty"`
|
||||
APIServer *APIServerConfig `json:",omitempty"`
|
||||
KubeControllerManager *KubeControllerManagerConfig `json:",omitempty"`
|
||||
KubeScheduler *KubeSchedulerConfig `json:",omitempty"`
|
||||
KubeProxy *KubeProxyConfig `json:",omitempty"`
|
||||
Kubelet *KubeletConfig `json:",omitempty"`
|
||||
MasterKubelet *KubeletConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
NodeImage string `json:",omitempty"`
|
||||
NodeCount int `json:",omitempty"`
|
||||
NodeInstancePrefix string `json:",omitempty"`
|
||||
NodeLabels string `json:",omitempty"`
|
||||
NodeMachineType string `json:",omitempty"`
|
||||
NodeTag string `json:",omitempty"`
|
||||
type KubeDNSConfig struct {
|
||||
Replicas int `json:",omitempty"`
|
||||
Domain string `json:",omitempty"`
|
||||
ServerIP string `json:",omitempty"`
|
||||
}
|
||||
|
||||
KubeUser string `json:",omitempty"`
|
||||
type MasterConfig struct {
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
// These are moved to CAStore / SecretStore
|
||||
//KubePassword string
|
||||
//KubeletToken string
|
||||
//KubeProxyToken string
|
||||
//BearerToken string
|
||||
//CACert []byte
|
||||
//CAKey []byte
|
||||
//KubeletCert []byte
|
||||
//KubeletKey []byte
|
||||
//MasterCert []byte
|
||||
//MasterKey []byte
|
||||
//KubecfgCert []byte
|
||||
//KubecfgKey []byte
|
||||
Image string `json:",omitempty"`
|
||||
Zone string `json:",omitempty"`
|
||||
MachineType string `json:",omitempty"`
|
||||
}
|
||||
|
||||
AdmissionControl string `json:",omitempty"`
|
||||
RuntimeConfig string `json:",omitempty"`
|
||||
type VolumeConfig struct {
|
||||
Name string `json:",omitempty"`
|
||||
Type string `json:",omitempty"`
|
||||
Size int `json:",omitempty"`
|
||||
|
||||
KubeImageTag string `json:",omitempty"`
|
||||
KubeDockerRegistry string `json:",omitempty"`
|
||||
KubeAddonRegistry string `json:",omitempty"`
|
||||
Zone string `json:",omitempty"`
|
||||
|
||||
KubeletPort int `json:",omitempty"`
|
||||
Roles map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
KubeApiserverRequestTimeout int `json:",omitempty"`
|
||||
type NodeSetConfig struct {
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
TerminatedPodGcThreshold string `json:",omitempty"`
|
||||
|
||||
EnableManifestURL *bool `json:",omitempty"`
|
||||
ManifestURL string `json:",omitempty"`
|
||||
ManifestURLHeader string `json:",omitempty"`
|
||||
|
||||
TestCluster string `json:",omitempty"`
|
||||
|
||||
DockerOptions string `json:",omitempty"`
|
||||
DockerStorage string `json:",omitempty"`
|
||||
ExtraDockerOpts string `json:",omitempty"`
|
||||
|
||||
E2EStorageTestEnvironment string `json:",omitempty"`
|
||||
KubeletTestArgs string `json:",omitempty"`
|
||||
KubeletTestLogLevel string `json:",omitempty"`
|
||||
DockerTestArgs string `json:",omitempty"`
|
||||
DockerTestLogLevel string `json:",omitempty"`
|
||||
ApiserverTestArgs string `json:",omitempty"`
|
||||
ApiserverTestLogLevel string `json:",omitempty"`
|
||||
ControllerManagerTestArgs string `json:",omitempty"`
|
||||
ControllerManagerTestLogLevel string `json:",omitempty"`
|
||||
SchedulerTestArgs string `json:",omitempty"`
|
||||
SchedulerTestLogLevel string `json:",omitempty"`
|
||||
KubeProxyTestArgs string `json:",omitempty"`
|
||||
KubeProxyTestLogLevel string `json:",omitempty"`
|
||||
|
||||
Assets []string `json:",omitempty"`
|
||||
|
||||
NodeUpTags []string `json:",omitempty"`
|
||||
|
||||
NodeUp *NodeUpConfig `json:",omitempty"`
|
||||
Image string `json:",omitempty"`
|
||||
MinSize *int `json:",omitempty"`
|
||||
MaxSize *int `json:",omitempty"`
|
||||
//NodeInstancePrefix string `json:",omitempty"`
|
||||
//NodeLabels string `json:",omitempty"`
|
||||
MachineType string `json:",omitempty"`
|
||||
//NodeTag string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ZoneConfig struct {
|
||||
|
@ -162,12 +224,17 @@ type ZoneConfig struct {
|
|||
CIDR string `json:"cidr,omitempty"`
|
||||
}
|
||||
|
||||
type NodeUpConfig struct {
|
||||
Location string `json:",omitempty"`
|
||||
Hash string `json:",omitempty"`
|
||||
}
|
||||
//type NodeUpConfig struct {
|
||||
// Source string `json:",omitempty"`
|
||||
// SourceHash string `json:",omitempty"`
|
||||
//
|
||||
// Tags []string `json:",omitempty"`
|
||||
//
|
||||
// // Assets that NodeUp should use. This is a "search-path" for resolving dependencies.
|
||||
// Assets []string `json:",omitempty"`
|
||||
//}
|
||||
|
||||
func (c *CloudConfig) WellKnownServiceIP(id int) (net.IP, error) {
|
||||
func (c *ClusterConfig) WellKnownServiceIP(id int) (net.IP, error) {
|
||||
_, cidr, err := net.ParseCIDR(c.ServiceClusterIPRange)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing ServiceClusterIPRange: %v", err)
|
||||
|
@ -199,22 +266,53 @@ func (c *CloudConfig) WellKnownServiceIP(id int) (net.IP, error) {
|
|||
return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", c.ServiceClusterIPRange)
|
||||
}
|
||||
|
||||
func (c *CloudConfig) PerformAssignments() error {
|
||||
// PerformAssignments populates values that are required and immutable
|
||||
// For example, it assigns stable Keys to NodeSets & Masters, and
|
||||
// it assigns CIDRs to subnets
|
||||
func (c *ClusterConfig) PerformAssignments() error {
|
||||
if c.NetworkCIDR == "" {
|
||||
// TODO: Choose non-overlapping networking CIDRs for VPCs?
|
||||
c.NetworkCIDR = "172.20.0.0/16"
|
||||
}
|
||||
|
||||
for _, zone := range c.NodeZones {
|
||||
for _, zone := range c.Zones {
|
||||
err := zone.performAssignments(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *ZoneConfig) performAssignments(c *CloudConfig) error {
|
||||
// performAssignmentsNodesets populates NodeSets with default values
|
||||
func PerformAssignmentsNodesets(nodeSets []*NodeSetConfig) error {
|
||||
keys := map[string]bool{}
|
||||
for _, n := range nodeSets {
|
||||
keys[n.Name] = true
|
||||
}
|
||||
|
||||
for _, n := range nodeSets {
|
||||
// We want to give them a stable Key as soon as possible
|
||||
if n.Name == "" {
|
||||
// Loop to find the first unassigned name like `nodes-%d`
|
||||
i := 0
|
||||
for {
|
||||
key := fmt.Sprintf("nodes-%d", i)
|
||||
if !keys[key] {
|
||||
n.Name = key
|
||||
keys[key] = true
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *ZoneConfig) performAssignments(c *ClusterConfig) error {
|
||||
if z.CIDR == "" {
|
||||
cidr, err := z.assignCIDR(c)
|
||||
if err != nil {
|
||||
|
@ -227,19 +325,19 @@ func (z *ZoneConfig) performAssignments(c *CloudConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (z *ZoneConfig) assignCIDR(c *CloudConfig) (string, error) {
|
||||
func (z *ZoneConfig) assignCIDR(c *ClusterConfig) (string, error) {
|
||||
// TODO: We probably could query for the existing subnets & allocate appropriately
|
||||
// for now we'll require users to set CIDRs themselves
|
||||
|
||||
lastCharMap := make(map[byte]bool)
|
||||
for _, nodeZone := range c.NodeZones {
|
||||
for _, nodeZone := range c.Zones {
|
||||
lastChar := nodeZone.Name[len(nodeZone.Name)-1]
|
||||
lastCharMap[lastChar] = true
|
||||
}
|
||||
|
||||
index := -1
|
||||
|
||||
if len(lastCharMap) == len(c.NodeZones) {
|
||||
if len(lastCharMap) == len(c.Zones) {
|
||||
// Last char of zones are unique (GCE, AWS)
|
||||
// At least on AWS, we also want 'a' to be 1, so that we don't collide with the lowest range,
|
||||
// because kube-up uses that range
|
||||
|
@ -247,7 +345,7 @@ func (z *ZoneConfig) assignCIDR(c *CloudConfig) (string, error) {
|
|||
} else {
|
||||
glog.Warningf("Last char of zone names not unique")
|
||||
|
||||
for i, nodeZone := range c.NodeZones {
|
||||
for i, nodeZone := range c.Zones {
|
||||
if nodeZone.Name == z.Name {
|
||||
index = i
|
||||
break
|
||||
|
@ -285,7 +383,7 @@ func (z *ZoneConfig) assignCIDR(c *CloudConfig) (string, error) {
|
|||
}
|
||||
|
||||
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
|
||||
func (c *CloudConfig) SharedVPC() bool {
|
||||
func (c *ClusterConfig) SharedVPC() bool {
|
||||
return c.NetworkID != ""
|
||||
}
|
||||
|
||||
|
@ -304,3 +402,76 @@ func (p *CloudPermissions) AddS3Bucket(bucket string) {
|
|||
|
||||
p.S3Buckets = append(p.S3Buckets, bucket)
|
||||
}
|
||||
|
||||
//
|
||||
//// findImage finds the default image
|
||||
//func (c*NodeSetConfig) resolveImage() error {
|
||||
// cloud.(*awsup.AWSCloud).ResolveImage()
|
||||
//
|
||||
// if n.Image == "" {
|
||||
// if defaultImage == "" {
|
||||
// image, err := c.determineImage()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defaultImage = image
|
||||
// }
|
||||
// n.Image = defaultImage
|
||||
// }
|
||||
//
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
|
||||
func WriteConfig(stateStore fi.StateStore, cluster *ClusterConfig, nodeSets []*NodeSetConfig) error {
|
||||
// Check for nodeset Name duplicates before writing
|
||||
{
|
||||
names := map[string]bool{}
|
||||
for i, ns := range nodeSets {
|
||||
if ns.Name == "" {
|
||||
return fmt.Errorf("NodeSet #%d did not have Name set", i+1)
|
||||
}
|
||||
if names[ns.Name] {
|
||||
return fmt.Errorf("Duplicate NodeSet Name found: %q", ns.Name)
|
||||
}
|
||||
names[ns.Name] = true
|
||||
}
|
||||
}
|
||||
err := stateStore.WriteConfig("config", cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing updated cluster configuration: %v", err)
|
||||
}
|
||||
|
||||
for _, ns := range nodeSets {
|
||||
err = stateStore.WriteConfig("nodeset/"+ns.Name, ns)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing updated nodeset configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadConfig(stateStore fi.StateStore) (*ClusterConfig, []*NodeSetConfig, error) {
|
||||
cluster := &ClusterConfig{}
|
||||
err := stateStore.ReadConfig("config", cluster)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error reading cluster configuration: %v", err)
|
||||
}
|
||||
|
||||
var nodeSets []*NodeSetConfig
|
||||
keys, err := stateStore.ListChildren("nodeset")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing nodesets in state store: %v", err)
|
||||
}
|
||||
for _, key := range keys {
|
||||
ns := &NodeSetConfig{}
|
||||
err = stateStore.ReadConfig("nodeset/"+key, ns)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error reading nodeset configuration %q: %v", key, err)
|
||||
}
|
||||
nodeSets = append(nodeSets, ns)
|
||||
}
|
||||
|
||||
return cluster, nodeSets, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,648 @@
|
|||
package cloudup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awstasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/gcetasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/fitasks"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DefaultNodeTypeAWS = "t2.medium"
|
||||
const DefaultNodeTypeGCE = "n1-standard-2"
|
||||
|
||||
// Path for completed cluster spec in the state store
|
||||
const PathClusterCompleted = "cluster.spec"
|
||||
|
||||
type CreateClusterCmd struct {
|
||||
// ClusterConfig is the cluster configuration
|
||||
ClusterConfig *ClusterConfig
|
||||
|
||||
// NodeSets is the configuration for each NodeSet (group of nodes)
|
||||
NodeSets []*NodeSetConfig
|
||||
|
||||
//// NodeUp stores the configuration we are going to pass to nodeup
|
||||
//NodeUpConfig *nodeup.NodeConfig
|
||||
|
||||
// NodeUpSource is the location from which we download nodeup
|
||||
NodeUpSource string
|
||||
|
||||
// Tags to pass to NodeUp
|
||||
NodeUpTags []string
|
||||
|
||||
// ModelStore is the location where models are found
|
||||
ModelStore string
|
||||
// Models is a list of cloudup models to apply
|
||||
Models []string
|
||||
// StateStore is a StateStore in which we store state (such as the PKI tree)
|
||||
StateStore fi.StateStore
|
||||
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
|
||||
Target string
|
||||
// The node model to use
|
||||
NodeModel string
|
||||
// The SSH public key (file) to use
|
||||
SSHPublicKey string
|
||||
// OutDir is a local directory in which we place output, can cache files etc
|
||||
OutDir string
|
||||
|
||||
// Assets is a list of sources for files (primarily when not using everything containerized)
|
||||
Assets []string
|
||||
}
|
||||
|
||||
func (c *CreateClusterCmd) LoadConfig(configFile string) error {
|
||||
conf, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading configuration file %q: %v", configFile, err)
|
||||
}
|
||||
err = utils.YamlUnmarshal(conf, c.ClusterConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing configuration file %q: %v", configFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CreateClusterCmd) Run() error {
|
||||
// TODO: Make these configurable?
|
||||
useMasterASG := true
|
||||
useMasterLB := false
|
||||
|
||||
//// We (currently) have to use protokube with ASGs
|
||||
//useProtokube := useMasterASG
|
||||
|
||||
//if c.NodeUpConfig == nil {
|
||||
// c.NodeUpConfig = &nodeup.NodeConfig{}
|
||||
//}
|
||||
|
||||
if c.ClusterConfig.ClusterName == "" {
|
||||
return fmt.Errorf("--name is required (e.g. mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
if c.ClusterConfig.MasterPublicName == "" {
|
||||
c.ClusterConfig.MasterPublicName = "api." + c.ClusterConfig.ClusterName
|
||||
}
|
||||
if c.ClusterConfig.DNSZone == "" {
|
||||
tokens := strings.Split(c.ClusterConfig.MasterPublicName, ".")
|
||||
c.ClusterConfig.DNSZone = strings.Join(tokens[len(tokens)-2:], ".")
|
||||
glog.Infof("Defaulting DNS zone to: %s", c.ClusterConfig.DNSZone)
|
||||
}
|
||||
|
||||
if len(c.ClusterConfig.Zones) == 0 {
|
||||
// TODO: Auto choose zones from region?
|
||||
return fmt.Errorf("must configuration at least one Zone (use --zones)")
|
||||
}
|
||||
|
||||
if len(c.NodeSets) == 0 {
|
||||
return fmt.Errorf("must configure at least one NodeSet")
|
||||
}
|
||||
|
||||
if len(c.ClusterConfig.Masters) == 0 {
|
||||
return fmt.Errorf("must configure at least one Master")
|
||||
}
|
||||
|
||||
// Check basic master configuration
|
||||
{
|
||||
masterZones := make(map[string]string)
|
||||
for i, m := range c.ClusterConfig.Masters {
|
||||
k := m.Name
|
||||
if k == "" {
|
||||
return fmt.Errorf("Master #%d did not have a key specified", i)
|
||||
}
|
||||
|
||||
z := m.Zone
|
||||
if z == "" {
|
||||
return fmt.Errorf("Master %s did not specify a zone", k)
|
||||
}
|
||||
if masterZones[z] != "" {
|
||||
return fmt.Errorf("Masters %s and %s are in the same zone", k, masterZones[z])
|
||||
}
|
||||
masterZones[z] = k
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
zones := make(map[string]bool)
|
||||
for _, z := range c.ClusterConfig.Zones {
|
||||
if zones[z.Name] {
|
||||
return fmt.Errorf("Zones contained a duplicate value: %v", z.Name)
|
||||
}
|
||||
zones[z.Name] = true
|
||||
}
|
||||
|
||||
for _, m := range c.ClusterConfig.Masters {
|
||||
if !zones[m.Zone] {
|
||||
// We could relax this, but this seems like a reasonable constraint
|
||||
return fmt.Errorf("Master %q is configured in %q, but this is not configured as a Zone", m.Name, m.Zone)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (len(c.ClusterConfig.Masters) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
return fmt.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zone and --master-zone to declare node zones and master zones separately.")
|
||||
}
|
||||
|
||||
if c.StateStore == nil {
|
||||
return fmt.Errorf("StateStore is required")
|
||||
}
|
||||
|
||||
if c.ClusterConfig.CloudProvider == "" {
|
||||
return fmt.Errorf("--cloud is required (e.g. aws, gce)")
|
||||
}
|
||||
|
||||
tags := make(map[string]struct{})
|
||||
|
||||
l := &Loader{}
|
||||
l.Init()
|
||||
|
||||
keyStore := c.StateStore.CA()
|
||||
secretStore := c.StateStore.Secrets()
|
||||
|
||||
if vfs.IsClusterReadable(secretStore.VFSPath()) {
|
||||
vfsPath := secretStore.VFSPath()
|
||||
c.ClusterConfig.SecretStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.ClusterConfig.MasterPermissions == nil {
|
||||
c.ClusterConfig.MasterPermissions = &CloudPermissions{}
|
||||
}
|
||||
c.ClusterConfig.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.ClusterConfig.NodePermissions == nil {
|
||||
c.ClusterConfig.NodePermissions = &CloudPermissions{}
|
||||
}
|
||||
c.ClusterConfig.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("secrets path is not cluster readable: %v", secretStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(keyStore.VFSPath()) {
|
||||
vfsPath := keyStore.VFSPath()
|
||||
c.ClusterConfig.KeyStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.ClusterConfig.MasterPermissions == nil {
|
||||
c.ClusterConfig.MasterPermissions = &CloudPermissions{}
|
||||
}
|
||||
c.ClusterConfig.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.ClusterConfig.NodePermissions == nil {
|
||||
c.ClusterConfig.NodePermissions = &CloudPermissions{}
|
||||
}
|
||||
c.ClusterConfig.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("keyStore path is not cluster readable: %v", keyStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(c.StateStore.VFSPath()) {
|
||||
c.ClusterConfig.ConfigStore = c.StateStore.VFSPath().Path()
|
||||
} else {
|
||||
// We do support this...
|
||||
}
|
||||
|
||||
if c.ClusterConfig.KubernetesVersion == "" {
|
||||
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
|
||||
b, err := vfs.Context.ReadFile(stableURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("--kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
|
||||
}
|
||||
latestVersion := strings.TrimSpace(string(b))
|
||||
glog.Infof("Using kubernetes latest stable version: %s", latestVersion)
|
||||
|
||||
c.ClusterConfig.KubernetesVersion = latestVersion
|
||||
//return fmt.Errorf("Must either specify a KubernetesVersion (-kubernetes-version) or provide an asset with the release bundle")
|
||||
}
|
||||
|
||||
// Normalize k8s version
|
||||
versionWithoutV := strings.TrimSpace(c.ClusterConfig.KubernetesVersion)
|
||||
if strings.HasPrefix(versionWithoutV, "v") {
|
||||
versionWithoutV = versionWithoutV[1:]
|
||||
}
|
||||
if c.ClusterConfig.KubernetesVersion != versionWithoutV {
|
||||
glog.Warningf("Normalizing kubernetes version: %q -> %q", c.ClusterConfig.KubernetesVersion, versionWithoutV)
|
||||
c.ClusterConfig.KubernetesVersion = versionWithoutV
|
||||
}
|
||||
|
||||
if len(c.Assets) == 0 {
|
||||
//defaultReleaseAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/kubernetes-server-linux-amd64.tar.gz", c.Config.KubernetesVersion)
|
||||
//glog.Infof("Adding default kubernetes release asset: %s", defaultReleaseAsset)
|
||||
|
||||
defaultKubeletAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubelet", c.ClusterConfig.KubernetesVersion)
|
||||
glog.Infof("Adding default kubelet release asset: %s", defaultKubeletAsset)
|
||||
|
||||
defaultKubectlAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubectl", c.ClusterConfig.KubernetesVersion)
|
||||
glog.Infof("Adding default kubelet release asset: %s", defaultKubectlAsset)
|
||||
|
||||
// TODO: Verify assets exist, get the hash (that will check that KubernetesVersion is valid)
|
||||
|
||||
c.Assets = append(c.Assets, defaultKubeletAsset, defaultKubectlAsset)
|
||||
}
|
||||
|
||||
if c.NodeUpSource == "" {
|
||||
location := "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup-1.3.tar.gz"
|
||||
glog.Infof("Using default nodeup location: %q", location)
|
||||
c.NodeUpSource = location
|
||||
}
|
||||
|
||||
var cloud fi.Cloud
|
||||
|
||||
var project string
|
||||
|
||||
checkExisting := true
|
||||
|
||||
//c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_jessie", "_debian_family", "_systemd")
|
||||
//
|
||||
//if useProtokube {
|
||||
// tags["_protokube"] = struct{}{}
|
||||
// c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_protokube")
|
||||
//} else {
|
||||
// tags["_not_protokube"] = struct{}{}
|
||||
// c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_not_protokube")
|
||||
//}
|
||||
|
||||
c.NodeUpTags = append(c.NodeUpTags, "_protokube")
|
||||
|
||||
if useMasterASG {
|
||||
tags["_master_asg"] = struct{}{}
|
||||
} else {
|
||||
tags["_master_single"] = struct{}{}
|
||||
}
|
||||
|
||||
if useMasterLB {
|
||||
tags["_master_lb"] = struct{}{}
|
||||
} else {
|
||||
tags["_not_master_lb"] = struct{}{}
|
||||
}
|
||||
|
||||
if c.ClusterConfig.MasterPublicName != "" {
|
||||
tags["_master_dns"] = struct{}{}
|
||||
}
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
"keypair": &fitasks.Keypair{},
|
||||
"secret": &fitasks.Secret{},
|
||||
})
|
||||
|
||||
region := ""
|
||||
|
||||
switch c.ClusterConfig.CloudProvider {
|
||||
case "gce":
|
||||
{
|
||||
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
||||
tags["_gce"] = struct{}{}
|
||||
c.NodeUpTags = append(c.NodeUpTags, "_gce")
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
"persistentDisk": &gcetasks.PersistentDisk{},
|
||||
"instance": &gcetasks.Instance{},
|
||||
"instanceTemplate": &gcetasks.InstanceTemplate{},
|
||||
"network": &gcetasks.Network{},
|
||||
"managedInstanceGroup": &gcetasks.ManagedInstanceGroup{},
|
||||
"firewallRule": &gcetasks.FirewallRule{},
|
||||
"ipAddress": &gcetasks.IPAddress{},
|
||||
})
|
||||
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, zone := range c.ClusterConfig.Zones {
|
||||
nodeZones[zone.Name] = true
|
||||
|
||||
tokens := strings.Split(zone.Name, "-")
|
||||
if len(tokens) <= 2 {
|
||||
return fmt.Errorf("Invalid GCE Zone: %v", zone.Name)
|
||||
}
|
||||
zoneRegion := tokens[0] + "-" + tokens[1]
|
||||
if region != "" && zoneRegion != region {
|
||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
region = zoneRegion
|
||||
}
|
||||
|
||||
//err := awsup.ValidateRegion(region)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
project = c.ClusterConfig.Project
|
||||
if project == "" {
|
||||
return fmt.Errorf("project is required for GCE")
|
||||
}
|
||||
gceCloud, err := gce.NewGCECloud(region, project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//var zoneNames []string
|
||||
//for _, z := range c.Config.Zones {
|
||||
// zoneNames = append(zoneNames, z.Name)
|
||||
//}
|
||||
//err = gceCloud.ValidateZones(zoneNames)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
cloud = gceCloud
|
||||
}
|
||||
|
||||
case "aws":
|
||||
{
|
||||
tags["_aws"] = struct{}{}
|
||||
c.NodeUpTags = append(c.NodeUpTags, "_aws")
|
||||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
// EC2
|
||||
"elasticIP": &awstasks.ElasticIP{},
|
||||
"instance": &awstasks.Instance{},
|
||||
"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
|
||||
"instanceVolumeAttachment": &awstasks.InstanceVolumeAttachment{},
|
||||
"ebsVolume": &awstasks.EBSVolume{},
|
||||
"sshKey": &awstasks.SSHKey{},
|
||||
|
||||
// IAM
|
||||
"iamInstanceProfile": &awstasks.IAMInstanceProfile{},
|
||||
"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
|
||||
"iamRole": &awstasks.IAMRole{},
|
||||
"iamRolePolicy": &awstasks.IAMRolePolicy{},
|
||||
|
||||
// VPC / Networking
|
||||
"dhcpOptions": &awstasks.DHCPOptions{},
|
||||
"internetGateway": &awstasks.InternetGateway{},
|
||||
"route": &awstasks.Route{},
|
||||
"routeTable": &awstasks.RouteTable{},
|
||||
"routeTableAssociation": &awstasks.RouteTableAssociation{},
|
||||
"securityGroup": &awstasks.SecurityGroup{},
|
||||
"securityGroupRule": &awstasks.SecurityGroupRule{},
|
||||
"subnet": &awstasks.Subnet{},
|
||||
"vpc": &awstasks.VPC{},
|
||||
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
|
||||
|
||||
// ELB
|
||||
"loadBalancer": &awstasks.LoadBalancer{},
|
||||
"loadBalancerAttachment": &awstasks.LoadBalancerAttachment{},
|
||||
"loadBalancerHealthChecks": &awstasks.LoadBalancerHealthChecks{},
|
||||
|
||||
// Autoscaling
|
||||
"autoscalingGroup": &awstasks.AutoscalingGroup{},
|
||||
"launchConfiguration": &awstasks.LaunchConfiguration{},
|
||||
|
||||
// Route53
|
||||
"dnsName": &awstasks.DNSName{},
|
||||
"dnsZone": &awstasks.DNSZone{},
|
||||
})
|
||||
|
||||
nodeZones := make(map[string]bool)
|
||||
for _, zone := range c.ClusterConfig.Zones {
|
||||
if len(zone.Name) <= 2 {
|
||||
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
||||
}
|
||||
|
||||
nodeZones[zone.Name] = true
|
||||
|
||||
zoneRegion := zone.Name[:len(zone.Name)-1]
|
||||
if region != "" && zoneRegion != region {
|
||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||
}
|
||||
|
||||
region = zoneRegion
|
||||
}
|
||||
|
||||
err := awsup.ValidateRegion(region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.SSHPublicKey == "" {
|
||||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
||||
}
|
||||
|
||||
cloudTags := map[string]string{awsup.TagClusterName: c.ClusterConfig.ClusterName}
|
||||
|
||||
awsCloud, err := awsup.NewAWSCloud(region, cloudTags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var zoneNames []string
|
||||
for _, z := range c.ClusterConfig.Zones {
|
||||
zoneNames = append(zoneNames, z.Name)
|
||||
}
|
||||
err = awsCloud.ValidateZones(zoneNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cloud = awsCloud
|
||||
|
||||
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown CloudProvider %q", c.ClusterConfig.CloudProvider)
|
||||
}
|
||||
|
||||
l.Tags = tags
|
||||
l.WorkDir = c.OutDir
|
||||
l.ModelStore = c.ModelStore
|
||||
l.NodeModel = c.NodeModel
|
||||
l.OptionsLoader = loader.NewOptionsLoader(c.ClusterConfig)
|
||||
|
||||
l.TemplateFunctions["HasTag"] = func(tag string) bool {
|
||||
_, found := l.Tags[tag]
|
||||
return found
|
||||
}
|
||||
|
||||
l.TemplateFunctions["CA"] = func() fi.CAStore {
|
||||
return keyStore
|
||||
}
|
||||
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
|
||||
return secretStore
|
||||
}
|
||||
|
||||
l.TemplateFunctions["NodeUpTags"] = func() []string {
|
||||
return c.NodeUpTags
|
||||
}
|
||||
|
||||
// TotalNodeCount computes the total count of nodes
|
||||
l.TemplateFunctions["TotalNodeCount"] = func() (int, error) {
|
||||
count := 0
|
||||
for _, nodeset := range c.NodeSets {
|
||||
if nodeset.MaxSize != nil {
|
||||
count += *nodeset.MaxSize
|
||||
} else if nodeset.MinSize != nil {
|
||||
count += *nodeset.MinSize
|
||||
} else {
|
||||
// Guestimate
|
||||
count += 5
|
||||
}
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
l.TemplateFunctions["Region"] = func() string {
|
||||
return region
|
||||
}
|
||||
l.TemplateFunctions["NodeSets"] = c.populateNodeSets
|
||||
l.TemplateFunctions["Masters"] = c.populateMasters
|
||||
//l.TemplateFunctions["NodeUp"] = c.populateNodeUpConfig
|
||||
l.TemplateFunctions["NodeUpSource"] = func() string { return c.NodeUpSource }
|
||||
l.TemplateFunctions["NodeUpSourceHash"] = func() string { return "" }
|
||||
l.TemplateFunctions["ClusterLocation"] = func() string { return c.StateStore.VFSPath().Join(PathClusterCompleted).Path() }
|
||||
l.TemplateFunctions["Assets"] = func() []string { return c.Assets }
|
||||
|
||||
// TODO: Fix this duplication
|
||||
l.OptionsLoader.TemplateFunctions["HasTag"] = l.TemplateFunctions["HasTag"]
|
||||
l.OptionsLoader.TemplateFunctions["TotalNodeCount"] = l.TemplateFunctions["TotalNodeCount"]
|
||||
l.OptionsLoader.TemplateFunctions["Assets"] = l.TemplateFunctions["Assets"]
|
||||
|
||||
if c.SSHPublicKey != "" {
|
||||
authorized, err := ioutil.ReadFile(c.SSHPublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
|
||||
}
|
||||
|
||||
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
|
||||
}
|
||||
|
||||
completed, err := l.BuildCompleteSpec(c.ModelStore, c.Models)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building complete spec: %v", err)
|
||||
}
|
||||
|
||||
taskMap, err := l.BuildTasks(c.ModelStore, c.Models)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building tasks: %v", err)
|
||||
}
|
||||
|
||||
err = c.StateStore.WriteConfig(PathClusterCompleted, completed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing cluster spec: %v", err)
|
||||
}
|
||||
|
||||
var target fi.Target
|
||||
|
||||
switch c.Target {
|
||||
case "direct":
|
||||
switch c.ClusterConfig.CloudProvider {
|
||||
case "gce":
|
||||
target = gce.NewGCEAPITarget(cloud.(*gce.GCECloud))
|
||||
case "aws":
|
||||
target = awsup.NewAWSAPITarget(cloud.(*awsup.AWSCloud))
|
||||
default:
|
||||
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", c.ClusterConfig.CloudProvider)
|
||||
}
|
||||
|
||||
case "terraform":
|
||||
checkExisting = false
|
||||
outDir := path.Join(c.OutDir, "terraform")
|
||||
target = terraform.NewTerraformTarget(cloud, region, project, outDir)
|
||||
|
||||
case "dryrun":
|
||||
target = fi.NewDryRunTarget(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||
}
|
||||
|
||||
context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building context: %v", err)
|
||||
}
|
||||
defer context.Close()
|
||||
|
||||
err = context.RunTasks(taskMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running tasks: %v", err)
|
||||
}
|
||||
|
||||
err = target.Finish(taskMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error closing target: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateNodeSets returns the NodeSets with values populated from defaults or top-level config
|
||||
func (c *CreateClusterCmd) populateNodeSets() ([]*NodeSetConfig, error) {
|
||||
var results []*NodeSetConfig
|
||||
for _, src := range c.NodeSets {
|
||||
n := &NodeSetConfig{}
|
||||
*n = *src
|
||||
|
||||
if n.MachineType == "" {
|
||||
n.MachineType = c.defaultMachineType()
|
||||
}
|
||||
|
||||
if n.Image == "" {
|
||||
n.Image = c.defaultImage()
|
||||
}
|
||||
|
||||
results = append(results, n)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// populateMasters returns the Masters with values populated from defaults or top-level config
|
||||
func (c *CreateClusterCmd) populateMasters() ([]*MasterConfig, error) {
|
||||
cluster := c.ClusterConfig
|
||||
|
||||
var results []*MasterConfig
|
||||
for _, src := range cluster.Masters {
|
||||
m := &MasterConfig{}
|
||||
*m = *src
|
||||
|
||||
if m.MachineType == "" {
|
||||
m.MachineType = c.defaultMachineType()
|
||||
}
|
||||
|
||||
if m.Image == "" {
|
||||
m.Image = c.defaultImage()
|
||||
}
|
||||
|
||||
results = append(results, m)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
//// populateNodeUpConfig returns the NodeUpConfig with values populated from defaults or top-level config
|
||||
//func (c*CreateClusterCmd) populateNodeUpConfig() (*nodeup.NodeConfig, error) {
|
||||
// conf := &nodeup.NodeConfig{}
|
||||
// *conf = *c.NodeUpConfig
|
||||
//
|
||||
// return conf, nil
|
||||
//}
|
||||
|
||||
// defaultMachineType returns the default MachineType, based on the cloudprovider
|
||||
func (c *CreateClusterCmd) defaultMachineType() string {
|
||||
cluster := c.ClusterConfig
|
||||
switch cluster.CloudProvider {
|
||||
case "aws":
|
||||
return DefaultNodeTypeAWS
|
||||
case "gce":
|
||||
return DefaultNodeTypeGCE
|
||||
default:
|
||||
glog.V(2).Infof("Cannot set default MachineType for CloudProvider=%q", cluster.CloudProvider)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// defaultImage returns the default Image, based on the cloudprovider
|
||||
func (c *CreateClusterCmd) defaultImage() string {
|
||||
// TODO: Use spec
|
||||
cluster := c.ClusterConfig
|
||||
switch cluster.CloudProvider {
|
||||
case "aws":
|
||||
return "282335181503/k8s-1.3-debian-jessie-amd64-hvm-ebs-2016-06-18"
|
||||
default:
|
||||
glog.V(2).Infof("Cannot set default Image for CloudProvider=%q", cluster.CloudProvider)
|
||||
return ""
|
||||
}
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -37,7 +36,7 @@ type Loader struct {
|
|||
typeMap map[string]reflect.Type
|
||||
|
||||
templates []*template.Template
|
||||
config interface{}
|
||||
config *ClusterConfig
|
||||
|
||||
Resources map[string]fi.Resource
|
||||
//deferred []*deferredBinding
|
||||
|
@ -104,9 +103,9 @@ func (l *Loader) executeTemplate(key string, d string, args []string) (string, e
|
|||
funcMap["Args"] = func() []string {
|
||||
return args
|
||||
}
|
||||
funcMap["BuildNodeConfig"] = func(target string, configResourceName string, args []string) (string, error) {
|
||||
return l.buildNodeConfig(target, configResourceName, args)
|
||||
}
|
||||
//funcMap["BuildNodeConfig"] = func(target string, configResourceName string, args []string) (string, error) {
|
||||
// return l.buildNodeConfig(target, configResourceName, args)
|
||||
//}
|
||||
funcMap["RenderResource"] = func(resourceName string, args []string) (string, error) {
|
||||
return l.renderResource(resourceName, args)
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ func ignoreHandler(i *loader.TreeWalkItem) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) Build(modelStore string, models []string) (map[string]fi.Task, error) {
|
||||
func (l *Loader) BuildCompleteSpec(modelStore string, models []string) (*ClusterConfig, error) {
|
||||
// First pass: load options
|
||||
tw := &loader.TreeWalker{
|
||||
DefaultHandler: ignoreHandler,
|
||||
|
@ -163,15 +162,25 @@ func (l *Loader) Build(modelStore string, models []string) (map[string]fi.Task,
|
|||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
l.config, err = l.OptionsLoader.Build()
|
||||
loaded, err := l.OptionsLoader.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(1).Infof("options: %s", fi.DebugAsJsonStringIndent(l.config))
|
||||
l.config = loaded.(*ClusterConfig)
|
||||
|
||||
// Master kubelet config = (base kubelet config + master kubelet config)
|
||||
masterKubelet := &KubeletConfig{}
|
||||
utils.JsonMergeStruct(masterKubelet, l.config.Kubelet)
|
||||
utils.JsonMergeStruct(masterKubelet, l.config.MasterKubelet)
|
||||
l.config.MasterKubelet = masterKubelet
|
||||
|
||||
glog.V(1).Infof("options: %s", fi.DebugAsJsonStringIndent(l.config))
|
||||
return l.config, nil
|
||||
}
|
||||
|
||||
func (l *Loader) BuildTasks(modelStore string, models []string) (map[string]fi.Task, error) {
|
||||
// Second pass: load everything else
|
||||
tw = &loader.TreeWalker{
|
||||
tw := &loader.TreeWalker{
|
||||
DefaultHandler: l.objectHandler,
|
||||
Contexts: map[string]loader.Handler{
|
||||
"resources": l.resourceHandler,
|
||||
|
@ -184,13 +193,13 @@ func (l *Loader) Build(modelStore string, models []string) (map[string]fi.Task,
|
|||
|
||||
for _, model := range models {
|
||||
modelDir := path.Join(modelStore, model)
|
||||
err = tw.Walk(modelDir)
|
||||
err := tw.Walk(modelDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = l.processDeferrals()
|
||||
err := l.processDeferrals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -325,14 +334,16 @@ func (l *Loader) objectHandler(i *loader.TreeWalkItem) error {
|
|||
|
||||
func (l *Loader) loadYamlObjects(key string, data string) (map[string]interface{}, error) {
|
||||
var o map[string]interface{}
|
||||
err := utils.YamlUnmarshal([]byte(data), &o)
|
||||
if err != nil {
|
||||
// TODO: It would be nice if yaml returned us the line number here
|
||||
glog.Infof("error parsing yaml. yaml follows:")
|
||||
for i, line := range strings.Split(string(data), "\n") {
|
||||
fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line)
|
||||
if strings.TrimSpace(data) != "" {
|
||||
err := utils.YamlUnmarshal([]byte(data), &o)
|
||||
if err != nil {
|
||||
// TODO: It would be nice if yaml returned us the line number here
|
||||
glog.Infof("error parsing yaml. yaml follows:")
|
||||
for i, line := range strings.Split(string(data), "\n") {
|
||||
fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line)
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing yaml %q: %v", key, err)
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing yaml %q: %v", key, err)
|
||||
}
|
||||
|
||||
return l.loadObjectMap(key, o)
|
||||
|
@ -421,36 +432,36 @@ func (l *Loader) populateResource(rh *fi.ResourceHolder, resource fi.Resource, a
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) buildNodeConfig(target string, configResourceName string, args []string) (string, error) {
|
||||
assetDir := path.Join(l.WorkDir, "node/assets")
|
||||
|
||||
confData, err := l.renderResource(configResourceName, args)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
config := &nodeup.NodeConfig{}
|
||||
err = utils.YamlUnmarshal([]byte(confData), config)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing configuration %q: %v", configResourceName, err)
|
||||
}
|
||||
|
||||
cmd := &nodeup.NodeUpCommand{
|
||||
Config: config,
|
||||
ConfigLocation: "",
|
||||
ModelDir: path.Join(l.ModelStore, l.NodeModel),
|
||||
Target: target,
|
||||
AssetDir: assetDir,
|
||||
}
|
||||
|
||||
var buff bytes.Buffer
|
||||
err = cmd.Run(&buff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error building node configuration: %v", err)
|
||||
}
|
||||
|
||||
return buff.String(), nil
|
||||
}
|
||||
//func (l *Loader) buildNodeConfig(target string, configResourceName string, args []string) (string, error) {
|
||||
// assetDir := path.Join(l.WorkDir, "node/assets")
|
||||
//
|
||||
// confData, err := l.renderResource(configResourceName, args)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
//
|
||||
// config := &nodeup.NodeConfig{}
|
||||
// err = utils.YamlUnmarshal([]byte(confData), config)
|
||||
// if err != nil {
|
||||
// return "", fmt.Errorf("error parsing configuration %q: %v", configResourceName, err)
|
||||
// }
|
||||
//
|
||||
// cmd := &nodeup.NodeUpCommand{
|
||||
// Config: config,
|
||||
// ConfigLocation: "",
|
||||
// ModelDir: path.Join(l.ModelStore, l.NodeModel),
|
||||
// Target: target,
|
||||
// AssetDir: assetDir,
|
||||
// }
|
||||
//
|
||||
// var buff bytes.Buffer
|
||||
// err = cmd.Run(&buff)
|
||||
// if err != nil {
|
||||
// return "", fmt.Errorf("error building node configuration: %v", err)
|
||||
// }
|
||||
//
|
||||
// return buff.String(), nil
|
||||
//}
|
||||
|
||||
func (l *Loader) renderResource(resourceName string, args []string) (string, error) {
|
||||
resourceKey := strings.TrimSuffix(resourceName, ".template")
|
||||
|
|
|
@ -62,36 +62,22 @@ func (l *OptionsLoader) AddTemplate(t *OptionsTemplate) {
|
|||
l.templates = append(l.templates, t)
|
||||
}
|
||||
|
||||
// copyFromStruct merges src into dest
|
||||
// It uses a JSON marshal & unmarshal, so only fields that are JSON-visible will be copied
|
||||
func copyFromStruct(dest, src interface{}) {
|
||||
// Not the most efficient approach, but simple & relatively well defined
|
||||
j, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
glog.Fatalf("error marshalling config: %v", err)
|
||||
}
|
||||
err = json.Unmarshal(j, dest)
|
||||
if err != nil {
|
||||
glog.Fatalf("error unmarshalling config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// iterate performs a single iteration of all the templates, executing each template in order
|
||||
func (l *OptionsLoader) iterate(inConfig interface{}) (interface{}, error) {
|
||||
func (l *OptionsLoader) iterate(userConfig interface{}, current interface{}) (interface{}, error) {
|
||||
sort.Sort(l.templates)
|
||||
|
||||
t := reflect.TypeOf(inConfig).Elem()
|
||||
t := reflect.TypeOf(current).Elem()
|
||||
|
||||
options := reflect.New(t).Interface()
|
||||
next := reflect.New(t).Interface()
|
||||
|
||||
// Copy the provided values before applying rules; they act as defaults (and overrides below)
|
||||
copyFromStruct(options, inConfig)
|
||||
// Copy the current state before applying rules; they act as defaults
|
||||
utils.JsonMergeStruct(next, current)
|
||||
|
||||
for _, t := range l.templates {
|
||||
glog.V(2).Infof("executing template %s (tags=%s)", t.Name, t.Tags)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err := t.Template.ExecuteTemplate(&buffer, t.Name, inConfig)
|
||||
err := t.Template.ExecuteTemplate(&buffer, t.Name, current)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error executing template %q: %v", t.Name, err)
|
||||
}
|
||||
|
@ -108,16 +94,16 @@ func (l *OptionsLoader) iterate(inConfig interface{}) (interface{}, error) {
|
|||
return nil, fmt.Errorf("error parsing yaml %q: %v", t.Name, err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(jsonBytes, options)
|
||||
err = json.Unmarshal(jsonBytes, next)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing yaml (converted to JSON) %q: %v", t.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Also copy the provided values after applying rules; they act as overrides now
|
||||
copyFromStruct(options, inConfig)
|
||||
// Also copy the user-provided values after applying rules; they act as overrides now
|
||||
utils.JsonMergeStruct(next, userConfig)
|
||||
|
||||
return options, nil
|
||||
return next, nil
|
||||
}
|
||||
|
||||
// Build executes the options configuration templates, until they converge
|
||||
|
@ -126,7 +112,7 @@ func (l *OptionsLoader) Build() (interface{}, error) {
|
|||
options := l.config
|
||||
iteration := 0
|
||||
for {
|
||||
nextOptions, err := l.iterate(options)
|
||||
nextOptions, err := l.iterate(l.config, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup/local"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
|
@ -12,37 +13,76 @@ import (
|
|||
)
|
||||
|
||||
type NodeUpCommand struct {
|
||||
Config *NodeConfig
|
||||
config *NodeUpConfig
|
||||
cluster *cloudup.ClusterConfig
|
||||
ConfigLocation string
|
||||
ModelDir string
|
||||
AssetDir string
|
||||
Target string
|
||||
FSRoot string
|
||||
}
|
||||
|
||||
func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||
if c.FSRoot == "" {
|
||||
return fmt.Errorf("FSRoot is required")
|
||||
}
|
||||
|
||||
if c.ConfigLocation != "" {
|
||||
config, err := vfs.Context.ReadFile(c.ConfigLocation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading configuration %q: %v", c.ConfigLocation, err)
|
||||
}
|
||||
|
||||
err = utils.YamlUnmarshal(config, c.Config)
|
||||
err = utils.YamlUnmarshal(config, &c.config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing configuration %q: %v", c.ConfigLocation, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("ConfigLocation is required")
|
||||
}
|
||||
|
||||
if c.AssetDir == "" {
|
||||
return fmt.Errorf("AssetDir is required")
|
||||
}
|
||||
assets := fi.NewAssetStore(c.AssetDir)
|
||||
for _, asset := range c.Config.Assets {
|
||||
for _, asset := range c.config.Assets {
|
||||
err := assets.Add(asset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding asset %q: %v", asset, err)
|
||||
}
|
||||
}
|
||||
|
||||
//c.nodeset = &cloudup.NodeSetConfig{}
|
||||
//if c.config.NodeSetLocation != "" {
|
||||
// b, err := vfs.Context.ReadFile(c.config.NodeSetLocation)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error loading NodeSet %q: %v", c.config.NodeSetLocation, err)
|
||||
// }
|
||||
//
|
||||
// err = utils.YamlUnmarshal(b, c.nodeset)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error parsing NodeSet %q: %v", c.config.NodeSetLocation, err)
|
||||
// }
|
||||
//} else {
|
||||
// return fmt.Errorf("NodeSetLocation is required")
|
||||
//}
|
||||
|
||||
c.cluster = &cloudup.ClusterConfig{}
|
||||
if c.config.ClusterLocation != "" {
|
||||
b, err := vfs.Context.ReadFile(c.config.ClusterLocation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading Cluster %q: %v", c.config.ClusterLocation, err)
|
||||
}
|
||||
|
||||
err = utils.YamlUnmarshal(b, c.cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing Cluster %q: %v", c.config.ClusterLocation, err)
|
||||
}
|
||||
} else {
|
||||
// TODO Infer this from NodeSetLocation?
|
||||
return fmt.Errorf("ClusterLocation is required")
|
||||
}
|
||||
|
||||
//if c.Config.ConfigurationStore != "" {
|
||||
// // TODO: If we ever delete local files, we need to filter so we only copy
|
||||
// // certain directories (i.e. not secrets / keys), because dest is a parent dir!
|
||||
|
@ -63,12 +103,26 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
// c.Config.Tags = append(c.Config.Tags, "_not_config_store")
|
||||
//}
|
||||
|
||||
loader := NewLoader(c.Config, assets)
|
||||
osTags, err := FindOSTags(c.FSRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error determining OS tags: %v", err)
|
||||
}
|
||||
|
||||
err := buildTemplateFunctions(c.Config, loader.TemplateFunctions)
|
||||
tags := make(map[string]struct{})
|
||||
for _, tag := range osTags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
for _, tag := range c.config.Tags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
|
||||
loader := NewLoader(c.config, c.cluster, assets, tags)
|
||||
|
||||
tf, err := newTemplateFunctions(c.config, c.cluster, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing: %v", err)
|
||||
}
|
||||
tf.populate(loader.TemplateFunctions)
|
||||
|
||||
taskMap, err := loader.Build(c.ModelDir)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,618 +1,52 @@
|
|||
package nodeup
|
||||
|
||||
import (
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
//"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
)
|
||||
|
||||
type NodeUpConfig struct {
|
||||
// Tags enable/disable chunks of the model
|
||||
Tags []string `json:",omitempty"`
|
||||
// Assets are locations where we can find files to be installed
|
||||
// TODO: Remove once everything is in containers?
|
||||
Assets []string `json:",omitempty"`
|
||||
|
||||
// ClusterLocation is the VFS path to the cluster spec
|
||||
ClusterLocation string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Our client configuration structure
|
||||
// Wherever possible, we try to use the types & names in https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
type NodeConfig struct {
|
||||
Kubelet *KubeletConfig `json:",omitempty"`
|
||||
KubeProxy *KubeProxyConfig `json:",omitempty"`
|
||||
KubeControllerManager *KubeControllerManagerConfig `json:",omitempty"`
|
||||
KubeScheduler *KubeSchedulerConfig `json:",omitempty"`
|
||||
Docker *DockerConfig `json:",omitempty"`
|
||||
APIServer *APIServerConfig `json:",omitempty"`
|
||||
|
||||
DNS *DNSConfig `json:",omitempty"`
|
||||
|
||||
// NodeConfig can directly access a store of secrets, keys or configuration
|
||||
// (for example on S3) and then configure based on that
|
||||
// This supports (limited) dynamic reconfiguration also
|
||||
SecretStore string `json:",omitempty"`
|
||||
KeyStore string `json:",omitempty"`
|
||||
ConfigStore string `json:",omitempty"`
|
||||
|
||||
KubeUser string `json:",omitempty"`
|
||||
|
||||
Tags []string `json:",omitempty"`
|
||||
Assets []string `json:",omitempty"`
|
||||
|
||||
MasterInternalName string `json:",omitempty"`
|
||||
|
||||
// The DNS zone to use if configuring a cloud provided DNS zone
|
||||
DNSZone string `json:",omitempty"`
|
||||
|
||||
// Deprecated in favor of KeyStore / SecretStore
|
||||
Tokens map[string]string `json:",omitempty"`
|
||||
Certificates map[string]*fi.Certificate `json:",omitempty"`
|
||||
PrivateKeys map[string]*fi.PrivateKey `json:",omitempty"`
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
Replicas int `json:",omitempty"`
|
||||
Domain string `json:",omitempty"`
|
||||
ServerIP string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type KubeletConfig struct {
|
||||
APIServers string `json:",omitempty" flag:"api-servers"`
|
||||
|
||||
LogLevel *int `json:",omitempty" flag:"v"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
// config is the path to the config file or directory of files
|
||||
Config string `json:"config,omitempty" flag:"config"`
|
||||
//// syncFrequency is the max period between synchronizing running
|
||||
//// containers and config
|
||||
//SyncFrequency unversioned.Duration `json:"syncFrequency"`
|
||||
//// fileCheckFrequency is the duration between checking config files for
|
||||
//// new data
|
||||
//FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"`
|
||||
//// httpCheckFrequency is the duration between checking http for new data
|
||||
//HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"`
|
||||
//// manifestURL is the URL for accessing the container manifest
|
||||
//ManifestURL string `json:"manifestURL"`
|
||||
//// manifestURLHeader is the HTTP header to use when accessing the manifest
|
||||
//// URL, with the key separated from the value with a ':', as in 'key:value'
|
||||
//ManifestURLHeader string `json:"manifestURLHeader"`
|
||||
//// enableServer enables the Kubelet's server
|
||||
//EnableServer bool `json:"enableServer"`
|
||||
//// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
|
||||
//// for all interfaces)
|
||||
//Address string `json:"address"`
|
||||
//// port is the port for the Kubelet to serve on.
|
||||
//Port uint `json:"port"`
|
||||
//// readOnlyPort is the read-only port for the Kubelet to serve on with
|
||||
//// no authentication/authorization (set to 0 to disable)
|
||||
//ReadOnlyPort uint `json:"readOnlyPort"`
|
||||
//// tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
|
||||
//// if any, concatenated after server cert). If tlsCertFile and
|
||||
//// tlsPrivateKeyFile are not provided, a self-signed certificate
|
||||
//// and key are generated for the public address and saved to the directory
|
||||
//// passed to certDir.
|
||||
//TLSCertFile string `json:"tLSCertFile"`
|
||||
//// tLSPrivateKeyFile is the ile containing x509 private key matching
|
||||
//// tlsCertFile.
|
||||
//TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"`
|
||||
//// certDirectory is the directory where the TLS certs are located (by
|
||||
//// default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile
|
||||
//// are provided, this flag will be ignored.
|
||||
//CertDirectory string `json:"certDirectory"`
|
||||
//// hostnameOverride is the hostname used to identify the kubelet instead
|
||||
//// of the actual hostname.
|
||||
//HostnameOverride string `json:"hostnameOverride"`
|
||||
//// podInfraContainerImage is the image whose network/ipc namespaces
|
||||
//// containers in each pod will use.
|
||||
//PodInfraContainerImage string `json:"podInfraContainerImage"`
|
||||
//// dockerEndpoint is the path to the docker endpoint to communicate with.
|
||||
//DockerEndpoint string `json:"dockerEndpoint"`
|
||||
//// rootDirectory is the directory path to place kubelet files (volume
|
||||
//// mounts,etc).
|
||||
//RootDirectory string `json:"rootDirectory"`
|
||||
//// seccompProfileRoot is the directory path for seccomp profiles.
|
||||
//SeccompProfileRoot string `json:"seccompProfileRoot"`
|
||||
// allowPrivileged enables containers to request privileged mode.
|
||||
// Defaults to false.
|
||||
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"`
|
||||
//// hostNetworkSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use of host network. Defaults to "*".
|
||||
//HostNetworkSources string `json:"hostNetworkSources"`
|
||||
//// hostPIDSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use the host pid namespace. Defaults to "*".
|
||||
//HostPIDSources string `json:"hostPIDSources"`
|
||||
//// hostIPCSources is a comma-separated list of sources from which the
|
||||
//// Kubelet allows pods to use the host ipc namespace. Defaults to "*".
|
||||
//HostIPCSources string `json:"hostIPCSources"`
|
||||
//// registryPullQPS is the limit of registry pulls per second. If 0,
|
||||
//// unlimited. Set to 0 for no limit. Defaults to 5.0.
|
||||
//RegistryPullQPS float64 `json:"registryPullQPS"`
|
||||
//// registryBurst is the maximum size of a bursty pulls, temporarily allows
|
||||
//// pulls to burst to this number, while still not exceeding registryQps.
|
||||
//// Only used if registryQps > 0.
|
||||
//RegistryBurst int32 `json:"registryBurst"`
|
||||
//// eventRecordQPS is the maximum event creations per second. If 0, there
|
||||
//// is no limit enforced.
|
||||
//EventRecordQPS float32 `json:"eventRecordQPS"`
|
||||
//// eventBurst is the maximum size of a bursty event records, temporarily
|
||||
//// allows event records to burst to this number, while still not exceeding
|
||||
//// event-qps. Only used if eventQps > 0
|
||||
//EventBurst int32 `json:"eventBurst"`
|
||||
// enableDebuggingHandlers enables server endpoints for log collection
|
||||
// and local running of containers and commands
|
||||
EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers,omitempty" flag:"enable-debugging-handlers"`
|
||||
//// minimumGCAge is the minimum age for a finished container before it is
|
||||
//// garbage collected.
|
||||
//MinimumGCAge unversioned.Duration `json:"minimumGCAge"`
|
||||
//// maxPerPodContainerCount is the maximum number of old instances to
|
||||
//// retain per container. Each container takes up some disk space.
|
||||
//MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"`
|
||||
//// maxContainerCount is the maximum number of old instances of containers
|
||||
//// to retain globally. Each container takes up some disk space.
|
||||
//MaxContainerCount int32 `json:"maxContainerCount"`
|
||||
//// cAdvisorPort is the port of the localhost cAdvisor endpoint
|
||||
//CAdvisorPort uint `json:"cAdvisorPort"`
|
||||
//// healthzPort is the port of the localhost healthz endpoint
|
||||
//HealthzPort int32 `json:"healthzPort"`
|
||||
//// healthzBindAddress is the IP address for the healthz server to serve
|
||||
//// on.
|
||||
//HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
//// oomScoreAdj is The oom-score-adj value for kubelet process. Values
|
||||
//// must be within the range [-1000, 1000].
|
||||
//OOMScoreAdj int32 `json:"oomScoreAdj"`
|
||||
//// registerNode enables automatic registration with the apiserver.
|
||||
//RegisterNode bool `json:"registerNode"`
|
||||
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
|
||||
// configure all containers to search this domain in addition to the
|
||||
// host's search domains.
|
||||
ClusterDomain string `json:"clusterDomain,omitempty" flag:"cluster-domain"`
|
||||
//// masterServiceNamespace is The namespace from which the kubernetes
|
||||
//// master services should be injected into pods.
|
||||
//MasterServiceNamespace string `json:"masterServiceNamespace"`
|
||||
// clusterDNS is the IP address for a cluster DNS server. If set, kubelet
|
||||
// will configure all containers to use this for DNS resolution in
|
||||
// addition to the host's DNS servers
|
||||
ClusterDNS string `json:"clusterDNS,omitempty" flag:"cluster-dns"`
|
||||
//// streamingConnectionIdleTimeout is the maximum time a streaming connection
|
||||
//// can be idle before the connection is automatically closed.
|
||||
//StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"`
|
||||
//// nodeStatusUpdateFrequency is the frequency that kubelet posts node
|
||||
//// status to master. Note: be cautious when changing the constant, it
|
||||
//// must work with nodeMonitorGracePeriod in nodecontroller.
|
||||
//NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"`
|
||||
//// minimumGCAge is the minimum age for a unused image before it is
|
||||
//// garbage collected.
|
||||
//ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"`
|
||||
//// imageGCHighThresholdPercent is the percent of disk usage after which
|
||||
//// image garbage collection is always run.
|
||||
//ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"`
|
||||
//// imageGCLowThresholdPercent is the percent of disk usage before which
|
||||
//// image garbage collection is never run. Lowest disk usage to garbage
|
||||
//// collect to.
|
||||
//ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"`
|
||||
//// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
|
||||
//// maintain. When disk space falls below this threshold, new pods would
|
||||
//// be rejected.
|
||||
//LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
|
||||
//// How frequently to calculate and cache volume disk usage for all pods
|
||||
//VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
|
||||
//// networkPluginName is the name of the network plugin to be invoked for
|
||||
//// various events in kubelet/pod lifecycle
|
||||
//NetworkPluginName string `json:"networkPluginName"`
|
||||
//// networkPluginDir is the full path of the directory in which to search
|
||||
//// for network plugins
|
||||
//NetworkPluginDir string `json:"networkPluginDir"`
|
||||
//// volumePluginDir is the full path of the directory in which to search
|
||||
//// for additional third party volume plugins
|
||||
//VolumePluginDir string `json:"volumePluginDir"`
|
||||
// cloudProvider is the provider for cloud services.
|
||||
CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"`
|
||||
//// cloudConfigFile is the path to the cloud provider configuration file.
|
||||
//CloudConfigFile string `json:"cloudConfigFile,omitempty"`
|
||||
// KubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
|
||||
KubeletCgroups string `json:"kubeletCgroups,omitempty" flag:"kubelet-cgroups"`
|
||||
// Cgroups that container runtime is expected to be isolated in.
|
||||
RuntimeCgroups string `json:"runtimeCgroups,omitempty" flag:"runtime-cgroups"`
|
||||
// SystemCgroups is absolute name of cgroups in which to place
|
||||
// all non-kernel processes that are not already in a container. Empty
|
||||
// for no container. Rolling back the flag requires a reboot.
|
||||
SystemCgroups string `json:"systemContainer,omitempty" flag:"system-cgroups"`
|
||||
// cgroupRoot is the root cgroup to use for pods. This is handled by the
|
||||
// container runtime on a best effort basis.
|
||||
CgroupRoot string `json:"cgroupRoot,omitempty" flag:"cgroup-root"`
|
||||
//// containerRuntime is the container runtime to use.
|
||||
//ContainerRuntime string `json:"containerRuntime"`
|
||||
//// rktPath is the path of rkt binary. Leave empty to use the first rkt in
|
||||
//// $PATH.
|
||||
//RktPath string `json:"rktPath,omitempty"`
|
||||
//// rktApiEndpoint is the endpoint of the rkt API service to communicate with.
|
||||
//RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"`
|
||||
//// rktStage1Image is the image to use as stage1. Local paths and
|
||||
//// http/https URLs are supported.
|
||||
//RktStage1Image string `json:"rktStage1Image,omitempty"`
|
||||
//// lockFilePath is the path that kubelet will use to as a lock file.
|
||||
//// It uses this file as a lock to synchronize with other kubelet processes
|
||||
//// that may be running.
|
||||
//LockFilePath string `json:"lockFilePath"`
|
||||
//// ExitOnLockContention is a flag that signifies to the kubelet that it is running
|
||||
//// in "bootstrap" mode. This requires that 'LockFilePath' has been set.
|
||||
//// This will cause the kubelet to listen to inotify events on the lock file,
|
||||
//// releasing it and exiting when another process tries to open that file.
|
||||
//ExitOnLockContention bool `json:"exitOnLockContention"`
|
||||
// configureCBR0 enables the kublet to configure cbr0 based on
|
||||
// Node.Spec.PodCIDR.
|
||||
ConfigureCBR0 *bool `json:"configureCbr0,omitempty" flag:"configure-cbr0"`
|
||||
// How should the kubelet configure the container bridge for hairpin packets.
|
||||
// Setting this flag allows endpoints in a Service to loadbalance back to
|
||||
// themselves if they should try to access their own Service. Values:
|
||||
// "promiscuous-bridge": make the container bridge promiscuous.
|
||||
// "hairpin-veth": set the hairpin flag on container veth interfaces.
|
||||
// "none": do nothing.
|
||||
// Setting --configure-cbr0 to false implies that to achieve hairpin NAT
|
||||
// one must set --hairpin-mode=veth-flag, because bridge assumes the
|
||||
// existence of a container bridge named cbr0.
|
||||
HairpinMode string `json:"hairpinMode,omitempty" flag:"hairpin-mode"`
|
||||
// The node has babysitter process monitoring docker and kubelet.
|
||||
BabysitDaemons *bool `json:"babysitDaemons,omitempty" flag:"babysit-daemons"`
|
||||
//// maxPods is the number of pods that can run on this Kubelet.
|
||||
//MaxPods int32 `json:"maxPods"`
|
||||
//// nvidiaGPUs is the number of NVIDIA GPU devices on this node.
|
||||
//NvidiaGPUs int32 `json:"nvidiaGPUs"`
|
||||
//// dockerExecHandlerName is the handler to use when executing a command
|
||||
//// in a container. Valid values are 'native' and 'nsenter'. Defaults to
|
||||
//// 'native'.
|
||||
//DockerExecHandlerName string `json:"dockerExecHandlerName"`
|
||||
// The CIDR to use for pod IP addresses, only used in standalone mode.
|
||||
// In cluster mode, this is obtained from the master.
|
||||
PodCIDR string `json:"podCIDR,omitempty" flag:"pod-cidr"`
|
||||
//// ResolverConfig is the resolver configuration file used as the basis
|
||||
//// for the container DNS resolution configuration."), []
|
||||
//ResolverConfig string `json:"resolvConf"`
|
||||
//// cpuCFSQuota is Enable CPU CFS quota enforcement for containers that
|
||||
//// specify CPU limits
|
||||
//CPUCFSQuota bool `json:"cpuCFSQuota"`
|
||||
//// containerized should be set to true if kubelet is running in a container.
|
||||
//Containerized bool `json:"containerized"`
|
||||
//// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
||||
//MaxOpenFiles uint64 `json:"maxOpenFiles"`
|
||||
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
|
||||
// API server. No-op if register-node or configure-cbr0 is false.
|
||||
ReconcileCIDR *bool `json:"reconcileCIDR,omitempty" flag:"reconcile-cidr"`
|
||||
// registerSchedulable tells the kubelet to register the node as
|
||||
// schedulable. No-op if register-node is false.
|
||||
RegisterSchedulable *bool `json:"registerSchedulable,omitempty" flag:"register-schedulable"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the burst to allow while talking with kubernetes
|
||||
//// apiserver
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
//// serializeImagePulls when enabled, tells the Kubelet to pull images one
|
||||
//// at a time. We recommend *not* changing the default value on nodes that
|
||||
//// run docker daemon with version < 1.9 or an Aufs storage backend.
|
||||
//// Issue #10959 has more details.
|
||||
//SerializeImagePulls bool `json:"serializeImagePulls"`
|
||||
//// experimentalFlannelOverlay enables experimental support for starting the
|
||||
//// kubelet with the default overlay network (flannel). Assumes flanneld
|
||||
//// is already running in client mode.
|
||||
//ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
|
||||
//// outOfDiskTransitionFrequency is duration for which the kubelet has to
|
||||
//// wait before transitioning out of out-of-disk node condition status.
|
||||
//OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`
|
||||
//// nodeIP is IP address of the node. If set, kubelet will use this IP
|
||||
//// address for the node.
|
||||
//NodeIP string `json:"nodeIP,omitempty"`
|
||||
//// nodeLabels to add when registering the node in the cluster.
|
||||
//NodeLabels map[string]string `json:"nodeLabels"`
|
||||
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
|
||||
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty" flag:"non-masquerade-cidr"`
|
||||
//// enable gathering custom metrics.
|
||||
//EnableCustomMetrics bool `json:"enableCustomMetrics"`
|
||||
//// Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'.
|
||||
//EvictionHard string `json:"evictionHard,omitempty"`
|
||||
//// Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'.
|
||||
//EvictionSoft string `json:"evictionSoft,omitempty"`
|
||||
//// Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'.
|
||||
//EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"`
|
||||
//// Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
|
||||
//EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
|
||||
//// Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
|
||||
//EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
|
||||
//// Maximum number of pods per core. Cannot exceed MaxPods
|
||||
//PodsPerCore int32 `json:"podsPerCore"`
|
||||
//// enableControllerAttachDetach enables the Attach/Detach controller to
|
||||
//// manage attachment/detachment of volumes scheduled to this node, and
|
||||
//// disables kubelet from executing any attach/detach operations
|
||||
//EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"`
|
||||
|
||||
}
|
||||
|
||||
type KubeProxyConfig struct {
|
||||
Image string `json:",omitempty"`
|
||||
// TODO: Better type ?
|
||||
CPURequest string `json:",omitempty"` // e.g. "20m"
|
||||
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
|
||||
//// for all interfaces)
|
||||
//BindAddress string `json:"bindAddress"`
|
||||
//// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
|
||||
//// bridge traffic coming from outside of the cluster. If not provided,
|
||||
//// no off-cluster bridging will be performed.
|
||||
//ClusterCIDR string `json:"clusterCIDR"`
|
||||
//// healthzBindAddress is the IP address for the health check server to serve on,
|
||||
//// defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||
//HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
//// healthzPort is the port to bind the health check server. Use 0 to disable.
|
||||
//HealthzPort int32 `json:"healthzPort"`
|
||||
//// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
|
||||
//HostnameOverride string `json:"hostnameOverride"`
|
||||
//// iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
|
||||
//// the pure iptables proxy mode. Values must be within the range [0, 31].
|
||||
//IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"`
|
||||
//// iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
//// '2h22m'). Must be greater than 0.
|
||||
//IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
|
||||
//// kubeconfigPath is the path to the kubeconfig file with authorization information (the
|
||||
//// master location is set by the master flag).
|
||||
//KubeconfigPath string `json:"kubeconfigPath"`
|
||||
//// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
|
||||
//MasqueradeAll bool `json:"masqueradeAll"`
|
||||
// master is the address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
Master string `json:"master,omitempty" flag:"master"`
|
||||
//// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
|
||||
//// the range [-1000, 1000]
|
||||
//OOMScoreAdj *int32 `json:"oomScoreAdj"`
|
||||
//// mode specifies which proxy mode to use.
|
||||
//Mode ProxyMode `json:"mode"`
|
||||
//// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
//// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
//PortRange string `json:"portRange"`
|
||||
//// resourceContainer is the bsolute name of the resource-only container to create and run
|
||||
//// the Kube-proxy in (Default: /kube-proxy).
|
||||
//ResourceContainer string `json:"resourceContainer"`
|
||||
//// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
//// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
//UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||
//// conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
|
||||
//ConntrackMax int32 `json:"conntrackMax"`
|
||||
//// conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
|
||||
//// (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
|
||||
//ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Bridge string `json:",omitempty" flag:"bridge"`
|
||||
LogLevel string `json:",omitempty" flag:"log-level"`
|
||||
IPTables bool `json:",omitempty" flag:"iptables"`
|
||||
IPMasq bool `json:",omitempty" flag:"ip-masq"`
|
||||
Storage string `json:",omitempty" flag:"s"`
|
||||
}
|
||||
|
||||
type APIServerConfig struct {
|
||||
PathSrvKubernetes string `json:",omitempty"`
|
||||
PathSrvSshproxy string `json:",omitempty"`
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
CloudProvider string `json:",omitempty" flag:"cloud-provider"`
|
||||
SecurePort int `json:",omitempty" flag:"secure-port"`
|
||||
Address string `json:",omitempty" flag:"address"`
|
||||
EtcdServers string `json:",omitempty" flag:"etcd-servers"`
|
||||
EtcdServersOverrides string `json:",omitempty" flag:"etcd-servers-overrides"`
|
||||
// TODO: []string and join with commas?
|
||||
AdmissionControl string `json:",omitempty" flag:"admission-control"`
|
||||
ServiceClusterIPRange string `json:",omitempty" flag:"service-cluster-ip-range"`
|
||||
ClientCAFile string `json:",omitempty" flag:"client-ca-file"`
|
||||
BasicAuthFile string `json:",omitempty" flag:"basic-auth-file"`
|
||||
TLSCertFile string `json:",omitempty" flag:"tls-cert-file"`
|
||||
TLSPrivateKeyFile string `json:",omitempty" flag:"tls-private-key-file"`
|
||||
TokenAuthFile string `json:",omitempty" flag:"token-auth-file"`
|
||||
AllowPrivileged *bool `json:",omitempty" flag:"allow-privileged"`
|
||||
}
|
||||
|
||||
type KubeControllerManagerConfig struct {
|
||||
Master string `json:",omitempty" flag:"master"`
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
ServiceAccountPrivateKeyFile string `json:",omitempty" flag:"service-account-private-key-file"`
|
||||
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
PathSrvKubernetes string `json:",omitempty"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// port is the port that the controller-manager's http service runs on.
|
||||
//Port int32 `json:"port"`
|
||||
//// address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
|
||||
//Address string `json:"address"`
|
||||
// cloudProvider is the provider for cloud services.
|
||||
CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"`
|
||||
//// cloudConfigFile is the path to the cloud provider configuration file.
|
||||
//CloudConfigFile string `json:"cloudConfigFile"`
|
||||
//// concurrentEndpointSyncs is the number of endpoint syncing operations
|
||||
//// that will be done concurrently. Larger number = faster endpoint updating,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"`
|
||||
//// concurrentRSSyncs is the number of replica sets that are allowed to sync
|
||||
//// concurrently. Larger number = more responsive replica management, but more
|
||||
//// CPU (and network) load.
|
||||
//ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"`
|
||||
//// concurrentRCSyncs is the number of replication controllers that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive replica
|
||||
//// management, but more CPU (and network) load.
|
||||
//ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"`
|
||||
//// concurrentResourceQuotaSyncs is the number of resource quotas that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive quota
|
||||
//// management, but more CPU (and network) load.
|
||||
//ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"`
|
||||
//// concurrentDeploymentSyncs is the number of deployment objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive deployments,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"`
|
||||
//// concurrentDaemonSetSyncs is the number of daemonset objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive daemonset,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"`
|
||||
//// concurrentJobSyncs is the number of job objects that are
|
||||
//// allowed to sync concurrently. Larger number = more responsive jobs,
|
||||
//// but more CPU (and network) load.
|
||||
//ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"`
|
||||
//// concurrentNamespaceSyncs is the number of namespace objects that are
|
||||
//// allowed to sync concurrently.
|
||||
//ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"`
|
||||
//// lookupCacheSizeForRC is the size of lookup cache for replication controllers.
|
||||
//// Larger number = more responsive replica management, but more MEM load.
|
||||
//LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"`
|
||||
//// lookupCacheSizeForRS is the size of lookup cache for replicatsets.
|
||||
//// Larger number = more responsive replica management, but more MEM load.
|
||||
//LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"`
|
||||
//// lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets.
|
||||
//// Larger number = more responsive daemonset, but more MEM load.
|
||||
//LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"`
|
||||
//// serviceSyncPeriod is the period for syncing services with their external
|
||||
//// load balancers.
|
||||
//ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
|
||||
//// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
|
||||
//// periods will result in fewer calls to cloud provider, but may delay addition
|
||||
//// of new nodes to cluster.
|
||||
//NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"`
|
||||
//// resourceQuotaSyncPeriod is the period for syncing quota usage status
|
||||
//// in the system.
|
||||
//ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"`
|
||||
//// namespaceSyncPeriod is the period for syncing namespace life-cycle
|
||||
//// updates.
|
||||
//NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"`
|
||||
//// pvClaimBinderSyncPeriod is the period for syncing persistent volumes
|
||||
//// and persistent volume claims.
|
||||
//PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"`
|
||||
//// minResyncPeriod is the resync period in reflectors; will be random between
|
||||
//// minResyncPeriod and 2*minResyncPeriod.
|
||||
//MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"`
|
||||
//// terminatedPodGCThreshold is the number of terminated pods that can exist
|
||||
//// before the terminated pod garbage collector starts deleting terminated pods.
|
||||
//// If <= 0, the terminated pod garbage collector is disabled.
|
||||
//TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"`
|
||||
//// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
|
||||
//// pods in horizontal pod autoscaler.
|
||||
//HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"`
|
||||
//// deploymentControllerSyncPeriod is the period for syncing the deployments.
|
||||
//DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"`
|
||||
//// podEvictionTimeout is the grace period for deleting pods on failed nodes.
|
||||
//PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"`
|
||||
//// deletingPodsQps is the number of nodes per second on which pods are deleted in
|
||||
//// case of node failure.
|
||||
//DeletingPodsQps float32 `json:"deletingPodsQps"`
|
||||
//// deletingPodsBurst is the number of nodes on which pods are bursty deleted in
|
||||
//// case of node failure. For more details look into RateLimiter.
|
||||
//DeletingPodsBurst int32 `json:"deletingPodsBurst"`
|
||||
//// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
|
||||
//// unresponsive before marking it unhealty. Must be N times more than kubelet's
|
||||
//// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
|
||||
//// to post node status.
|
||||
//NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"`
|
||||
//// registerRetryCount is the number of retries for initial node registration.
|
||||
//// Retry interval equals node-sync-period.
|
||||
//RegisterRetryCount int32 `json:"registerRetryCount"`
|
||||
//// nodeStartupGracePeriod is the amount of time which we allow starting a node to
|
||||
//// be unresponsive before marking it unhealty.
|
||||
//NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"`
|
||||
//// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
|
||||
//NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"`
|
||||
//// serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
|
||||
//// used to sign service account tokens.
|
||||
//ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
|
||||
//// enableProfiling enables profiling via web interface host:port/debug/pprof/
|
||||
//EnableProfiling bool `json:"enableProfiling"`
|
||||
// clusterName is the instance prefix for the cluster.
|
||||
ClusterName string `json:"clusterName,omitempty" flag:"cluster-name"`
|
||||
// clusterCIDR is CIDR Range for Pods in cluster.
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty" flag:"cluster-cidr"`
|
||||
//// serviceCIDR is CIDR Range for Services in cluster.
|
||||
//ServiceCIDR string `json:"serviceCIDR"`
|
||||
//// NodeCIDRMaskSize is the mask size for node cidr in cluster.
|
||||
//NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
|
||||
// allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
|
||||
// ConfigureCloudRoutes is true, to be set on the cloud provider.
|
||||
AllocateNodeCIDRs *bool `json:"allocateNodeCIDRs,omitempty" flag:"allocate-node-cidrs"`
|
||||
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
|
||||
// to be configured on the cloud provider.
|
||||
ConfigureCloudRoutes *bool `json:"configureCloudRoutes,omitempty" flag:"configure-cloud-routes"`
|
||||
// rootCAFile is the root certificate authority will be included in service
|
||||
// account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
RootCAFile string `json:"rootCAFile,omitempty" flag:"root-ca-file"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection *LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
//// volumeConfiguration holds configuration for volume related features.
|
||||
//VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
|
||||
//// How long to wait between starting controller managers
|
||||
//ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"`
|
||||
//// enables the generic garbage collector. MUST be synced with the
|
||||
//// corresponding flag of the kube-apiserver. WARNING: the generic garbage
|
||||
//// collector is an alpha feature.
|
||||
//EnableGarbageCollector bool `json:"enableGarbageCollector"`
|
||||
}
|
||||
|
||||
type KubeSchedulerConfig struct {
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
//// port is the port that the scheduler's http service runs on.
|
||||
//Port int32 `json:"port"`
|
||||
//// address is the IP address to serve on.
|
||||
//Address string `json:"address"`
|
||||
//// algorithmProvider is the scheduling algorithm provider to use.
|
||||
//AlgorithmProvider string `json:"algorithmProvider"`
|
||||
//// policyConfigFile is the filepath to the scheduler policy configuration.
|
||||
//PolicyConfigFile string `json:"policyConfigFile"`
|
||||
//// enableProfiling enables profiling via web interface.
|
||||
//EnableProfiling bool `json:"enableProfiling"`
|
||||
//// contentType is contentType of requests sent to apiserver.
|
||||
//ContentType string `json:"contentType"`
|
||||
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||
//KubeAPIQPS float32 `json:"kubeAPIQPS"`
|
||||
//// kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
|
||||
//KubeAPIBurst int32 `json:"kubeAPIBurst"`
|
||||
//// schedulerName is name of the scheduler, used to select which pods
|
||||
//// will be processed by this scheduler, based on pod's annotation with
|
||||
//// key 'scheduler.alpha.kubernetes.io/name'.
|
||||
//SchedulerName string `json:"schedulerName"`
|
||||
//// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
//// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
//// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
//HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
|
||||
//// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
//FailureDomains string `json:"failureDomains"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection *LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
}
|
||||
|
||||
// LeaderElectionConfiguration defines the configuration of leader election
|
||||
// clients for components that can run with leader election enabled.
|
||||
type LeaderElectionConfiguration struct {
|
||||
// leaderElect enables a leader election client to gain leadership
|
||||
// before executing the main loop. Enable this when running replicated
|
||||
// components for high availability.
|
||||
LeaderElect *bool `json:"leaderElect,omitempty" flag:"leader-elect"`
|
||||
//// leaseDuration is the duration that non-leader candidates will wait
|
||||
//// after observing a leadership renewal until attempting to acquire
|
||||
//// leadership of a led but unrenewed leader slot. This is effectively the
|
||||
//// maximum duration that a leader can be stopped before it is replaced
|
||||
//// by another candidate. This is only applicable if leader election is
|
||||
//// enabled.
|
||||
//LeaseDuration unversioned.Duration `json:"leaseDuration"`
|
||||
//// renewDeadline is the interval between attempts by the acting master to
|
||||
//// renew a leadership slot before it stops leading. This must be less
|
||||
//// than or equal to the lease duration. This is only applicable if leader
|
||||
//// election is enabled.
|
||||
//RenewDeadline unversioned.Duration `json:"renewDeadline"`
|
||||
//// retryPeriod is the duration the clients should wait between attempting
|
||||
//// acquisition and renewal of a leadership. This is only applicable if
|
||||
//// leader election is enabled.
|
||||
//RetryPeriod unversioned.Duration `json:"retryPeriod"`
|
||||
}
|
||||
//type NodeConfig struct {
|
||||
// Kubelet *KubeletConfig `json:",omitempty"`
|
||||
// KubeProxy *KubeProxyConfig `json:",omitempty"`
|
||||
// KubeControllerManager *KubeControllerManagerConfig `json:",omitempty"`
|
||||
// KubeScheduler *KubeSchedulerConfig `json:",omitempty"`
|
||||
// Docker *DockerConfig `json:",omitempty"`
|
||||
// APIServer *APIServerConfig `json:",omitempty"`
|
||||
//
|
||||
// DNS *DNSConfig `json:",omitempty"`
|
||||
//
|
||||
// // NodeConfig can directly access a store of secrets, keys or configuration
|
||||
// // (for example on S3) and then configure based on that
|
||||
// // This supports (limited) dynamic reconfiguration also
|
||||
// SecretStore string `json:",omitempty"`
|
||||
// KeyStore string `json:",omitempty"`
|
||||
// ConfigStore string `json:",omitempty"`
|
||||
//
|
||||
// KubeUser string `json:",omitempty"`
|
||||
//
|
||||
// Tags []string `json:",omitempty"`
|
||||
// Assets []string `json:",omitempty"`
|
||||
//
|
||||
// MasterInternalName string `json:",omitempty"`
|
||||
//
|
||||
// // The DNS zone to use if configuring a cloud provided DNS zone
|
||||
// DNSZone string `json:",omitempty"`
|
||||
//
|
||||
// // Deprecated in favor of KeyStore / SecretStore
|
||||
// Tokens map[string]string `json:",omitempty"`
|
||||
// Certificates map[string]*fi.Certificate `json:",omitempty"`
|
||||
// PrivateKeys map[string]*fi.PrivateKey `json:",omitempty"`
|
||||
//}
|
||||
|
|
|
@ -2,11 +2,11 @@ package nodeup
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/loader"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup/nodetasks"
|
||||
"os"
|
||||
|
@ -15,25 +15,25 @@ import (
|
|||
)
|
||||
|
||||
type Loader struct {
|
||||
templates []*template.Template
|
||||
optionsLoader *loader.OptionsLoader
|
||||
config *NodeConfig
|
||||
templates []*template.Template
|
||||
config *NodeUpConfig
|
||||
cluster *cloudup.ClusterConfig
|
||||
|
||||
assets *fi.AssetStore
|
||||
tasks map[string]fi.Task
|
||||
|
||||
tags map[string]struct{}
|
||||
|
||||
tags map[string]struct{}
|
||||
TemplateFunctions template.FuncMap
|
||||
}
|
||||
|
||||
func NewLoader(config *NodeConfig, assets *fi.AssetStore) *Loader {
|
||||
func NewLoader(config *NodeUpConfig, cluster *cloudup.ClusterConfig, assets *fi.AssetStore, tags map[string]struct{}) *Loader {
|
||||
l := &Loader{}
|
||||
l.assets = assets
|
||||
l.tasks = make(map[string]fi.Task)
|
||||
l.optionsLoader = loader.NewOptionsLoader(config)
|
||||
l.config = config
|
||||
l.cluster = cluster
|
||||
l.TemplateFunctions = make(template.FuncMap)
|
||||
l.tags = tags
|
||||
|
||||
return l
|
||||
}
|
||||
|
@ -42,20 +42,12 @@ func (l *Loader) executeTemplate(key string, d string) (string, error) {
|
|||
t := template.New(key)
|
||||
|
||||
funcMap := make(template.FuncMap)
|
||||
funcMap["BuildFlags"] = buildFlags
|
||||
funcMap["Base64Encode"] = func(s string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(s))
|
||||
}
|
||||
funcMap["HasTag"] = func(tag string) bool {
|
||||
_, found := l.tags[tag]
|
||||
return found
|
||||
}
|
||||
for k, fn := range l.TemplateFunctions {
|
||||
funcMap[k] = fn
|
||||
}
|
||||
t.Funcs(funcMap)
|
||||
|
||||
context := l.config
|
||||
context := l.cluster
|
||||
|
||||
_, err := t.Parse(d)
|
||||
if err != nil {
|
||||
|
@ -78,25 +70,17 @@ func ignoreHandler(i *loader.TreeWalkItem) error {
|
|||
}
|
||||
|
||||
func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
|
||||
tags := make(map[string]struct{})
|
||||
for _, tag := range l.config.Tags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
|
||||
l.tags = tags
|
||||
|
||||
// First pass: load options
|
||||
tw := &loader.TreeWalker{
|
||||
DefaultHandler: ignoreHandler,
|
||||
Contexts: map[string]loader.Handler{
|
||||
"options": l.optionsLoader.HandleOptions,
|
||||
"files": ignoreHandler,
|
||||
"disks": ignoreHandler,
|
||||
"packages": ignoreHandler,
|
||||
"services": ignoreHandler,
|
||||
"users": ignoreHandler,
|
||||
},
|
||||
Tags: tags,
|
||||
Tags: l.tags,
|
||||
}
|
||||
|
||||
err := tw.Walk(baseDir)
|
||||
|
@ -104,25 +88,17 @@ func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
config, err := l.optionsLoader.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.config = config.(*NodeConfig)
|
||||
glog.V(4).Infof("options: %s", fi.DebugAsJsonStringIndent(l.config))
|
||||
|
||||
// Second pass: load everything else
|
||||
tw = &loader.TreeWalker{
|
||||
DefaultHandler: l.handleFile,
|
||||
Contexts: map[string]loader.Handler{
|
||||
"options": ignoreHandler,
|
||||
"files": l.handleFile,
|
||||
"disks": l.newTaskHandler("disk/", nodetasks.NewMountDiskTask),
|
||||
"packages": l.newTaskHandler("package/", nodetasks.NewPackage),
|
||||
"services": l.newTaskHandler("service/", nodetasks.NewService),
|
||||
"users": l.newTaskHandler("user/", nodetasks.NewUserTask),
|
||||
},
|
||||
Tags: tags,
|
||||
Tags: l.tags,
|
||||
}
|
||||
|
||||
err = tw.Walk(baseDir)
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
package nodeup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FindOSTags infers tags from the current distro
|
||||
// We will likely remove this when everything is containerized
|
||||
func FindOSTags(rootfs string) ([]string, error) {
|
||||
debianVersionBytes, err := ioutil.ReadFile(path.Join(rootfs, "etc/debian_version"))
|
||||
if err == nil {
|
||||
debianVersion := strings.TrimSpace(string(debianVersionBytes))
|
||||
if strings.HasPrefix(debianVersion, "8.") {
|
||||
return []string{"_jessie", "_debian_family", "_systemd"}, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("unhandled debian version %q", debianVersion)
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
glog.Infof("error reading /etc/debian_version: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("cannot identify distro")
|
||||
}
|
||||
|
||||
//// FindCloudTags infers tags from the cloud environment
|
||||
//func FindCloudTags(rootfs string) ([]string, error) {
|
||||
// productVersionBytes, err := ioutil.ReadFile(path.Join(rootfs, "sys/class/dmi/id/product_version"))
|
||||
// if err == nil {
|
||||
// productVersion := strings.TrimSpace(string(productVersionBytes))
|
||||
// switch productVersion {
|
||||
// case "amazon":
|
||||
// return findCloudTagsAWS(rootfs)
|
||||
// default:
|
||||
// glog.V(2).Infof("Unknown /sys/class/dmi/id/product_version %q", productVersion)
|
||||
// }
|
||||
// } else if !os.IsNotExist(err) {
|
||||
// glog.Infof("error reading /sys/class/dmi/id/product_version: %v", err)
|
||||
// }
|
||||
// return nil, fmt.Errorf("cannot identify cloud")
|
||||
//}
|
||||
//
|
||||
//type awsIAMInfo struct {
|
||||
// Code string
|
||||
// LastUpdated string
|
||||
// InstanceProfileArn string
|
||||
// InstanceProfileId string
|
||||
//}
|
||||
//
|
||||
//// findAWSCloudTags infers cloud tags once we have determined we are on AWS
|
||||
//func findCloudTagsAWS(rootfs string) ([]string, error) {
|
||||
// tags := []string{"_aws"}
|
||||
//
|
||||
// // We can't get the tags, annoyingly
|
||||
//
|
||||
// iamInfoBytes, err := vfs.Context.ReadFile("http://169.254.169.254/2016-04-19/meta-data/iam/info")
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error querying for iam info: %v", err)
|
||||
// }
|
||||
//
|
||||
// iamInfo := &awsIAMInfo{}
|
||||
// if err := json.Unmarshal(iamInfoBytes, iamInfo); err != nil {
|
||||
// glog.Infof("Invalid IAM info: %q", string(iamInfoBytes))
|
||||
// return nil, fmt.Errorf("error decoding iam info: %v", err)
|
||||
// }
|
||||
//
|
||||
// arn := iamInfo.InstanceProfileArn
|
||||
// if strings.HasSuffix(arn, "-masters") {
|
||||
// tags = append(tags, "_master")
|
||||
// } else if strings.HasSuffix(arn, "-nodes") {
|
||||
// tags = append(tags, "_master")
|
||||
// } else {
|
||||
// return nil, fmt.Errorf("unexpected IAM role name %q", arn)
|
||||
// }
|
||||
//
|
||||
// return tags, nil
|
||||
//}
|
||||
//
|
||||
//
|
|
@ -1,55 +1,74 @@
|
|||
package nodeup
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"text/template"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
const TagMaster = "_kubernetes_master"
|
||||
|
||||
// templateFunctions is a simple helper-class for the functions accessible to templates
|
||||
type templateFunctions struct {
|
||||
config *NodeConfig
|
||||
nodeupConfig *NodeUpConfig
|
||||
cluster *cloudup.ClusterConfig
|
||||
// keyStore is populated with a KeyStore, if KeyStore is set
|
||||
keyStore fi.CAStore
|
||||
// secretStore is populated with a SecretStore, if SecretStore is set
|
||||
secretStore fi.SecretStore
|
||||
|
||||
tags map[string]struct{}
|
||||
}
|
||||
|
||||
func buildTemplateFunctions(config *NodeConfig, dest template.FuncMap) error {
|
||||
// newTemplateFunctions is the constructor for templateFunctions
|
||||
func newTemplateFunctions(nodeupConfig *NodeUpConfig, cluster *cloudup.ClusterConfig, tags map[string]struct{}) (*templateFunctions, error) {
|
||||
t := &templateFunctions{
|
||||
config: config,
|
||||
nodeupConfig: nodeupConfig,
|
||||
cluster: cluster,
|
||||
tags: tags,
|
||||
}
|
||||
|
||||
if config.SecretStore != "" {
|
||||
glog.Infof("Building SecretStore at %q", config.SecretStore)
|
||||
p, err := vfs.Context.BuildVfsPath(config.SecretStore)
|
||||
if cluster.SecretStore != "" {
|
||||
glog.Infof("Building SecretStore at %q", cluster.SecretStore)
|
||||
p, err := vfs.Context.BuildVfsPath(cluster.SecretStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building secret store path: %v", err)
|
||||
return nil, fmt.Errorf("error building secret store path: %v", err)
|
||||
}
|
||||
|
||||
secretStore, err := fi.NewVFSSecretStore(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building secret store: %v", err)
|
||||
return nil, fmt.Errorf("error building secret store: %v", err)
|
||||
}
|
||||
|
||||
t.secretStore = secretStore
|
||||
} else {
|
||||
return nil, fmt.Errorf("SecretStore not set")
|
||||
}
|
||||
|
||||
if config.KeyStore != "" {
|
||||
glog.Infof("Building KeyStore at %q", config.KeyStore)
|
||||
p, err := vfs.Context.BuildVfsPath(config.KeyStore)
|
||||
if cluster.KeyStore != "" {
|
||||
glog.Infof("Building KeyStore at %q", cluster.KeyStore)
|
||||
p, err := vfs.Context.BuildVfsPath(cluster.KeyStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building key store path: %v", err)
|
||||
return nil, fmt.Errorf("error building key store path: %v", err)
|
||||
}
|
||||
|
||||
keyStore, err := fi.NewVFSCAStore(p, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building key store: %v", err)
|
||||
return nil, fmt.Errorf("error building key store: %v", err)
|
||||
}
|
||||
t.keyStore = keyStore
|
||||
} else {
|
||||
return nil, fmt.Errorf("KeyStore not set")
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *templateFunctions) populate(dest template.FuncMap) {
|
||||
dest["CACertificatePool"] = t.CACertificatePool
|
||||
dest["CACertificate"] = t.CACertificate
|
||||
dest["PrivateKey"] = t.PrivateKey
|
||||
|
@ -57,18 +76,58 @@ func buildTemplateFunctions(config *NodeConfig, dest template.FuncMap) error {
|
|||
dest["AllTokens"] = t.AllTokens
|
||||
dest["GetToken"] = t.GetToken
|
||||
|
||||
return nil
|
||||
dest["BuildFlags"] = buildFlags
|
||||
dest["Base64Encode"] = func(s string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(s))
|
||||
}
|
||||
dest["HasTag"] = t.HasTag
|
||||
dest["IsMaster"] = t.IsMaster
|
||||
|
||||
// TODO: We may want to move these to a nodeset / masterset specific thing
|
||||
dest["KubeDNS"] = func() *cloudup.KubeDNSConfig {
|
||||
return t.cluster.KubeDNS
|
||||
}
|
||||
dest["KubeScheduler"] = func() *cloudup.KubeSchedulerConfig {
|
||||
return t.cluster.KubeScheduler
|
||||
}
|
||||
dest["APIServer"] = func() *cloudup.APIServerConfig {
|
||||
return t.cluster.APIServer
|
||||
}
|
||||
dest["KubeControllerManager"] = func() *cloudup.KubeControllerManagerConfig {
|
||||
return t.cluster.KubeControllerManager
|
||||
}
|
||||
dest["KubeProxy"] = func() *cloudup.KubeProxyConfig {
|
||||
return t.cluster.KubeProxy
|
||||
}
|
||||
dest["Kubelet"] = func() *cloudup.KubeletConfig {
|
||||
if t.IsMaster() {
|
||||
return t.cluster.MasterKubelet
|
||||
} else {
|
||||
return t.cluster.Kubelet
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsMaster returns true if we are tagged as a master
|
||||
func (t *templateFunctions) IsMaster() bool {
|
||||
return t.HasTag(TagMaster)
|
||||
}
|
||||
|
||||
// Tag returns true if we are tagged with the specified tag
|
||||
func (t *templateFunctions) HasTag(tag string) bool {
|
||||
_, found := t.tags[tag]
|
||||
return found
|
||||
}
|
||||
|
||||
// CACertificatePool returns the set of valid CA certificates for the cluster
|
||||
func (c *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.CertificatePool(fi.CertificateId_CA)
|
||||
func (t *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
|
||||
if t.keyStore != nil {
|
||||
return t.keyStore.CertificatePool(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
cert, err := c.CACertificate()
|
||||
cert, err := t.CACertificate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -81,86 +140,45 @@ func (c *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
|
|||
}
|
||||
|
||||
// CACertificate returns the primary CA certificate for the cluster
|
||||
func (c *templateFunctions) CACertificate() (*fi.Certificate, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.Cert(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
return c.Certificate(fi.CertificateId_CA)
|
||||
func (t *templateFunctions) CACertificate() (*fi.Certificate, error) {
|
||||
return t.keyStore.Cert(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// PrivateKey returns the specified private key
|
||||
func (c *templateFunctions) PrivateKey(id string) (*fi.PrivateKey, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.PrivateKey(id)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
k := c.config.PrivateKeys[id]
|
||||
if k == nil {
|
||||
return nil, fmt.Errorf("private key not found: %q (with fallback)", id)
|
||||
}
|
||||
return k, nil
|
||||
func (t *templateFunctions) PrivateKey(id string) (*fi.PrivateKey, error) {
|
||||
return t.keyStore.PrivateKey(id)
|
||||
}
|
||||
|
||||
// Certificate returns the specified private key
|
||||
func (c *templateFunctions) Certificate(id string) (*fi.Certificate, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.Cert(id)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
cert := c.config.Certificates[id]
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("certificate not found: %q (with fallback)", id)
|
||||
}
|
||||
return cert, nil
|
||||
func (t *templateFunctions) Certificate(id string) (*fi.Certificate, error) {
|
||||
return t.keyStore.Cert(id)
|
||||
}
|
||||
|
||||
// AllTokens returns a map of all tokens
|
||||
func (n *templateFunctions) AllTokens() (map[string]string, error) {
|
||||
if n.secretStore != nil {
|
||||
tokens := make(map[string]string)
|
||||
ids, err := n.secretStore.ListSecrets()
|
||||
func (t *templateFunctions) AllTokens() (map[string]string, error) {
|
||||
tokens := make(map[string]string)
|
||||
ids, err := t.secretStore.ListSecrets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range ids {
|
||||
token, err := t.secretStore.FindSecret(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range ids {
|
||||
token, err := n.secretStore.FindSecret(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens[id] = string(token.Data)
|
||||
}
|
||||
return tokens, nil
|
||||
tokens[id] = string(token.Data)
|
||||
}
|
||||
|
||||
// Fallback to direct configuration
|
||||
glog.Infof("Falling back to direct configuration for secrets")
|
||||
return n.config.Tokens, nil
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
// GetToken returns the specified token
|
||||
func (n *templateFunctions) GetToken(key string) (string, error) {
|
||||
if n.secretStore != nil {
|
||||
token, err := n.secretStore.FindSecret(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if token == nil {
|
||||
return "", fmt.Errorf("token not found: %q", key)
|
||||
}
|
||||
return string(token.Data), nil
|
||||
func (t *templateFunctions) GetToken(key string) (string, error) {
|
||||
token, err := t.secretStore.FindSecret(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Fallback to direct configuration
|
||||
glog.Infof("Falling back to direct configuration for secrets")
|
||||
token := n.config.Tokens[key]
|
||||
if token == "" {
|
||||
if token == nil {
|
||||
return "", fmt.Errorf("token not found: %q", key)
|
||||
}
|
||||
return token, nil
|
||||
return string(token.Data), nil
|
||||
}
|
||||
|
|
|
@ -15,8 +15,12 @@ type StateStore interface {
|
|||
CA() CAStore
|
||||
Secrets() SecretStore
|
||||
|
||||
ReadConfig(config interface{}) error
|
||||
WriteConfig(config interface{}) error
|
||||
ReadConfig(path string, config interface{}) error
|
||||
WriteConfig(path string, config interface{}) error
|
||||
|
||||
// ListChildren returns a list of all (direct) children of the specified path
|
||||
// It only returns the raw names, not the prefixes
|
||||
ListChildren(pathPrefix string) ([]string, error)
|
||||
}
|
||||
|
||||
type VFSStateStore struct {
|
||||
|
@ -56,8 +60,25 @@ func (s *VFSStateStore) Secrets() SecretStore {
|
|||
return s.secrets
|
||||
}
|
||||
|
||||
func (s *VFSStateStore) ReadConfig(config interface{}) error {
|
||||
configPath := s.location.Join("config")
|
||||
func (s *VFSStateStore) ListChildren(pathPrefix string) ([]string, error) {
|
||||
vfsPath := s.location.Join(pathPrefix)
|
||||
children, err := vfsPath.ReadDir()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error listing children of %s: %v", pathPrefix, err)
|
||||
}
|
||||
|
||||
var names []string
|
||||
for _, child := range children {
|
||||
names = append(names, child.Base())
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (s *VFSStateStore) ReadConfig(path string, config interface{}) error {
|
||||
configPath := s.location.Join(path)
|
||||
data, err := configPath.ReadFile()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -80,8 +101,8 @@ func (s *VFSStateStore) ReadConfig(config interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *VFSStateStore) WriteConfig(config interface{}) error {
|
||||
configPath := s.location.Join("config")
|
||||
func (s *VFSStateStore) WriteConfig(path string, config interface{}) error {
|
||||
configPath := s.location.Join(path)
|
||||
|
||||
data, err := utils.YamlMarshal(config)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
|
@ -9,6 +10,20 @@ import (
|
|||
|
||||
var SkipReflection = errors.New("skip this value")
|
||||
|
||||
// JsonMergeStruct merges src into dest
|
||||
// It uses a JSON marshal & unmarshal, so only fields that are JSON-visible will be copied
|
||||
func JsonMergeStruct(dest, src interface{}) {
|
||||
// Not the most efficient approach, but simple & relatively well defined
|
||||
j, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
glog.Fatalf("error marshalling config: %v", err)
|
||||
}
|
||||
err = json.Unmarshal(j, dest)
|
||||
if err != nil {
|
||||
glog.Fatalf("error unmarshalling config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// InvokeMethod calls the specified method by reflection
|
||||
func InvokeMethod(target interface{}, name string, args ...interface{}) ([]reflect.Value, error) {
|
||||
v := reflect.ValueOf(target)
|
||||
|
|
|
@ -1,21 +1,20 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// VFSContext is a 'context' for VFS, that is normally a singleton
|
||||
// but allows us to configure S3 credentials, for example
|
||||
type VFSContext struct {
|
||||
|
||||
}
|
||||
|
||||
var Context VFSContext
|
||||
|
@ -24,7 +23,7 @@ var Context VFSContext
|
|||
// It supports additional schemes which don't (yet) have full VFS implementations:
|
||||
// metadata: reads from instance metadata on GCE/AWS
|
||||
// http / https: reads from HTTP
|
||||
func (c*VFSContext) ReadFile(location string) ([]byte, error) {
|
||||
func (c *VFSContext) ReadFile(location string) ([]byte, error) {
|
||||
if strings.Contains(location, "://") {
|
||||
// Handle our special case schemas
|
||||
u, err := url.Parse(location)
|
||||
|
@ -61,7 +60,7 @@ func (c*VFSContext) ReadFile(location string) ([]byte, error) {
|
|||
return p.ReadFile()
|
||||
}
|
||||
|
||||
func (c*VFSContext) BuildVfsPath(p string) (Path, error) {
|
||||
func (c *VFSContext) BuildVfsPath(p string) (Path, error) {
|
||||
if !strings.Contains(p, "://") {
|
||||
return NewFSPath(p), nil
|
||||
}
|
||||
|
@ -73,7 +72,7 @@ func (c*VFSContext) BuildVfsPath(p string) (Path, error) {
|
|||
return nil, fmt.Errorf("unknown / unhandled path type: %q", p)
|
||||
}
|
||||
|
||||
func (c*VFSContext) readHttpLocation(httpURL string, httpHeaders map[string]string) ([]byte, error) {
|
||||
func (c *VFSContext) readHttpLocation(httpURL string, httpHeaders map[string]string) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", httpURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -95,7 +94,7 @@ func (c*VFSContext) readHttpLocation(httpURL string, httpHeaders map[string]stri
|
|||
return body, nil
|
||||
}
|
||||
|
||||
func (c*VFSContext) buildS3Path(p string) (*S3Path, error) {
|
||||
func (c *VFSContext) buildS3Path(p string) (*S3Path, error) {
|
||||
u, err := url.Parse(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid s3 path: %q", err)
|
||||
|
|
|
@ -36,8 +36,9 @@ type ResourceTracker struct {
|
|||
Type string
|
||||
ID string
|
||||
|
||||
blocks []string
|
||||
done bool
|
||||
blocks []string
|
||||
blocked []string
|
||||
done bool
|
||||
|
||||
deleter func(cloud fi.Cloud, tracker *ResourceTracker) error
|
||||
}
|
||||
|
@ -147,6 +148,10 @@ func (c *DeleteCluster) DeleteResources(resources map[string]*ResourceTracker) e
|
|||
depMap[block] = append(depMap[block], k)
|
||||
}
|
||||
|
||||
for _, blocked := range t.blocked {
|
||||
depMap[k] = append(depMap[k], blocked)
|
||||
}
|
||||
|
||||
if t.done {
|
||||
done[k] = t
|
||||
}
|
||||
|
@ -700,9 +705,16 @@ func ListRouteTables(cloud fi.Cloud, clusterName string) ([]*ResourceTracker, er
|
|||
}
|
||||
|
||||
var blocks []string
|
||||
var blocked []string
|
||||
|
||||
blocks = append(blocks, "vpc:"+aws.StringValue(rt.VpcId))
|
||||
|
||||
for _, a := range rt.Associations {
|
||||
blocked = append(blocked, "subnet:"+aws.StringValue(a.SubnetId))
|
||||
}
|
||||
|
||||
tracker.blocks = blocks
|
||||
tracker.blocked = blocked
|
||||
|
||||
trackers = append(trackers, tracker)
|
||||
}
|
||||
|
@ -970,7 +982,7 @@ func ListAutoScalingGroups(cloud fi.Cloud, clusterName string) ([]*ResourceTrack
|
|||
}
|
||||
blocks = append(blocks, "subnet:"+subnet)
|
||||
}
|
||||
blocks = append(blocks, "autoscaling-launchconfiguration:"+aws.StringValue(asg.LaunchConfigurationName))
|
||||
blocks = append(blocks, "launchconfig:"+aws.StringValue(asg.LaunchConfigurationName))
|
||||
|
||||
tracker.blocks = blocks
|
||||
|
||||
|
@ -1014,12 +1026,12 @@ func ListAutoScalingLaunchConfigurations(cloud fi.Cloud, clusterName string) ([]
|
|||
tracker := &ResourceTracker{
|
||||
Name: aws.StringValue(t.LaunchConfigurationName),
|
||||
ID: aws.StringValue(t.LaunchConfigurationName),
|
||||
Type: "autoscaling-launchconfiguration",
|
||||
Type: "launchconfig",
|
||||
deleter: DeleteAutoscalingLaunchConfiguration,
|
||||
}
|
||||
|
||||
var blocks []string
|
||||
//blocks = append(blocks, "autoscaling-launchconfiguration:" + aws.StringValue(asg.LaunchConfigurationName))
|
||||
//blocks = append(blocks, "launchconfig:" + aws.StringValue(asg.LaunchConfigurationName))
|
||||
|
||||
tracker.blocks = blocks
|
||||
|
||||
|
|
|
@ -29,34 +29,37 @@ func (x *ExportCluster) ReverseAWS() error {
|
|||
return fmt.Errorf("ClusterName must be specified")
|
||||
}
|
||||
|
||||
k8s := &cloudup.CloudConfig{}
|
||||
k8s.CloudProvider = "aws"
|
||||
k8s.ClusterName = clusterName
|
||||
clusterConfig := &cloudup.ClusterConfig{}
|
||||
clusterConfig.CloudProvider = "aws"
|
||||
clusterConfig.ClusterName = clusterName
|
||||
|
||||
masterConfig := &cloudup.MasterConfig{}
|
||||
clusterConfig.Masters = append(clusterConfig.Masters, masterConfig)
|
||||
|
||||
instances, err := findInstances(awsCloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding instances: %v", err)
|
||||
}
|
||||
|
||||
var master *ec2.Instance
|
||||
var masterInstance *ec2.Instance
|
||||
for _, instance := range instances {
|
||||
role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
|
||||
if role == clusterName+"-master" {
|
||||
if master != nil {
|
||||
if masterInstance != nil {
|
||||
return fmt.Errorf("found multiple masters")
|
||||
}
|
||||
master = instance
|
||||
masterInstance = instance
|
||||
}
|
||||
}
|
||||
if master == nil {
|
||||
if masterInstance == nil {
|
||||
return fmt.Errorf("could not find master node")
|
||||
}
|
||||
masterInstanceID := aws.StringValue(master.InstanceId)
|
||||
masterInstanceID := aws.StringValue(masterInstance.InstanceId)
|
||||
glog.Infof("Found master: %q", masterInstanceID)
|
||||
|
||||
k8s.MasterMachineType = aws.StringValue(master.InstanceType)
|
||||
masterConfig.MachineType = aws.StringValue(masterInstance.InstanceType)
|
||||
|
||||
masterSubnetID := aws.StringValue(master.SubnetId)
|
||||
masterSubnetID := aws.StringValue(masterInstance.SubnetId)
|
||||
|
||||
subnets, err := DescribeSubnets(x.Cloud)
|
||||
if err != nil {
|
||||
|
@ -75,17 +78,19 @@ func (x *ExportCluster) ReverseAWS() error {
|
|||
return fmt.Errorf("cannot find subnet %q", masterSubnetID)
|
||||
}
|
||||
|
||||
vpcID := aws.StringValue(master.VpcId)
|
||||
k8s.NetworkID = vpcID
|
||||
vpcID := aws.StringValue(masterInstance.VpcId)
|
||||
clusterConfig.NetworkID = vpcID
|
||||
|
||||
az := aws.StringValue(masterSubnet.AvailabilityZone)
|
||||
k8s.MasterZones = []string{az}
|
||||
k8s.NodeZones = append(k8s.NodeZones, &cloudup.ZoneConfig{
|
||||
masterConfig.Zone = az
|
||||
clusterConfig.Zones = append(clusterConfig.Zones, &cloudup.ZoneConfig{
|
||||
Name: az,
|
||||
|
||||
// We will allocate a new CIDR
|
||||
//CIDR: aws.StringValue(masterSubnet.CidrBlock),
|
||||
})
|
||||
|
||||
userData, err := GetInstanceUserData(awsCloud, aws.StringValue(master.InstanceId))
|
||||
userData, err := GetInstanceUserData(awsCloud, aws.StringValue(masterInstance.InstanceId))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting master user-data: %v", err)
|
||||
}
|
||||
|
@ -122,8 +127,6 @@ func (x *ExportCluster) ReverseAWS() error {
|
|||
return fmt.Errorf("INSTANCE_PREFIX %q did not match cluster name %q", instancePrefix, clusterName)
|
||||
}
|
||||
|
||||
k8s.NodeMachineType = k8s.MasterMachineType
|
||||
|
||||
//k8s.NodeMachineType, err = InstanceType(node)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot determine node instance type: %v", err)
|
||||
|
@ -132,57 +135,62 @@ func (x *ExportCluster) ReverseAWS() error {
|
|||
// We want to upgrade!
|
||||
// k8s.ImageId = ""
|
||||
|
||||
k8s.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
|
||||
k8s.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
|
||||
k8s.KubeUser = conf.Settings["KUBE_USER"]
|
||||
k8s.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
|
||||
k8s.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
|
||||
k8s.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
|
||||
k8s.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
|
||||
k8s.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
|
||||
k8s.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
|
||||
}
|
||||
k8s.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
|
||||
k8s.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
|
||||
k8s.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
||||
}
|
||||
k8s.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
k8s.DNSDomain = conf.Settings["DNS_DOMAIN"]
|
||||
k8s.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
||||
k8s.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
||||
k8s.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
k8s.DockerStorage = conf.Settings["DOCKER_STORAGE"]
|
||||
//clusterConfig.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
|
||||
clusterConfig.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
|
||||
//clusterConfig.KubeUser = conf.Settings["KUBE_USER"]
|
||||
clusterConfig.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
|
||||
//clusterConfig.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
|
||||
//clusterConfig.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
|
||||
//clusterConfig.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
|
||||
//clusterConfig.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
|
||||
//clusterConfig.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
|
||||
//}
|
||||
//clusterConfig.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
|
||||
//clusterConfig.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
|
||||
//clusterConfig.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
||||
//}
|
||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
clusterConfig.DNSDomain = conf.Settings["DNS_DOMAIN"]
|
||||
//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
||||
//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
//clusterConfig.DockerStorage = conf.Settings["DOCKER_STORAGE"]
|
||||
//k8s.MasterExtraSans = conf.Settings["MASTER_EXTRA_SANS"] // Not user set
|
||||
k8s.NodeCount, err = parseInt(conf.Settings["NUM_MINIONS"])
|
||||
|
||||
primaryNodeSet := &cloudup.NodeSetConfig{}
|
||||
nodeSets := []*cloudup.NodeSetConfig{primaryNodeSet}
|
||||
primaryNodeSet.MinSize, err = conf.ParseInt("NUM_MINIONS")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse NUM_MINIONS=%q: %v", conf.Settings["NUM_MINIONS"], err)
|
||||
}
|
||||
primaryNodeSet.MaxSize = primaryNodeSet.MinSize
|
||||
//primaryNodeSet.NodeMachineType = k8s.MasterMachineType
|
||||
|
||||
if conf.Version == "1.1" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
if k8s.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
|
||||
// More admission controllers in 1.2
|
||||
k8s.AdmissionControl = ""
|
||||
}
|
||||
if k8s.MasterMachineType == "t2.micro" {
|
||||
//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
|
||||
// // More admission controllers in 1.2
|
||||
// clusterConfig.AdmissionControl = ""
|
||||
//}
|
||||
if masterConfig.MachineType == "t2.micro" {
|
||||
// Different defaults in 1.2
|
||||
k8s.MasterMachineType = ""
|
||||
masterConfig.MachineType = ""
|
||||
}
|
||||
if k8s.NodeMachineType == "t2.micro" {
|
||||
if primaryNodeSet.MachineType == "t2.micro" {
|
||||
// Encourage users to pick something better...
|
||||
k8s.NodeMachineType = ""
|
||||
primaryNodeSet.MachineType = ""
|
||||
}
|
||||
}
|
||||
if conf.Version == "1.2" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
if k8s.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
|
||||
// More admission controllers in 1.2
|
||||
k8s.AdmissionControl = ""
|
||||
}
|
||||
//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
|
||||
// // More admission controllers in 1.2
|
||||
// clusterConfig.AdmissionControl = ""
|
||||
//}
|
||||
}
|
||||
|
||||
//if masterInstance.PublicIpAddress != nil {
|
||||
|
@ -283,7 +291,7 @@ func (x *ExportCluster) ReverseAWS() error {
|
|||
//kubeletToken = conf.Settings["KUBELET_TOKEN"]
|
||||
//kubeProxyToken = conf.Settings["KUBE_PROXY_TOKEN"]
|
||||
|
||||
err = x.StateStore.WriteConfig(k8s)
|
||||
err = cloudup.WriteConfig(x.StateStore, clusterConfig, nodeSets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -499,6 +507,20 @@ func (u *UserDataConfiguration) ParseBool(key string) *bool {
|
|||
return fi.Bool(false)
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseInt(key string) (*int, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing key %q=%q", key, s)
|
||||
}
|
||||
|
||||
return fi.Int(int(n)), nil
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseCert(key string) (*fi.Certificate, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
|
|
|
@ -18,13 +18,14 @@ type UpgradeCluster struct {
|
|||
|
||||
StateStore fi.StateStore
|
||||
|
||||
Config *cloudup.CloudConfig
|
||||
ClusterConfig *cloudup.ClusterConfig
|
||||
NodeSets []*cloudup.NodeSetConfig
|
||||
}
|
||||
|
||||
func (x *UpgradeCluster) Upgrade() error {
|
||||
awsCloud := x.Cloud.(*awsup.AWSCloud)
|
||||
|
||||
config := x.Config
|
||||
clusterConfig := x.ClusterConfig
|
||||
|
||||
newClusterName := x.NewClusterName
|
||||
if newClusterName == "" {
|
||||
|
@ -98,7 +99,7 @@ func (x *UpgradeCluster) Upgrade() error {
|
|||
// Retag VPC
|
||||
// We have to be careful because VPCs can be shared
|
||||
{
|
||||
vpcID := config.NetworkID
|
||||
vpcID := clusterConfig.NetworkID
|
||||
if vpcID != "" {
|
||||
tags, err := awsCloud.GetTags(vpcID)
|
||||
if err != nil {
|
||||
|
@ -163,8 +164,8 @@ func (x *UpgradeCluster) Upgrade() error {
|
|||
}
|
||||
}
|
||||
|
||||
config.ClusterName = newClusterName
|
||||
err = x.StateStore.WriteConfig(config)
|
||||
clusterConfig.ClusterName = newClusterName
|
||||
err = cloudup.WriteConfig(x.StateStore, clusterConfig, x.NodeSets)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing updated configuration: %v", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue