mirror of https://github.com/kubernetes/kops.git
megre from upstream
This commit is contained in:
commit
aa6693a6ed
|
@ -1,6 +1,52 @@
|
||||||
.build/
|
# OSX leaves these everywhere on SMB shares
|
||||||
|
._*
|
||||||
|
|
||||||
# IntelliJ
|
# OSX trash
|
||||||
*.iml
|
.DS_Store
|
||||||
|
|
||||||
|
# Eclipse files
|
||||||
|
.classpath
|
||||||
|
.project
|
||||||
|
.settings/**
|
||||||
|
|
||||||
|
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
|
||||||
.idea/
|
.idea/
|
||||||
|
*.iml
|
||||||
|
|
||||||
|
# Vscode files
|
||||||
|
.vscode
|
||||||
|
|
||||||
|
# Emacs save files
|
||||||
|
*~
|
||||||
|
\#*\#
|
||||||
|
.\#*
|
||||||
|
|
||||||
|
# Vim-related files
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
*.un~
|
||||||
|
Session.vim
|
||||||
|
.netrwhist
|
||||||
|
|
||||||
|
# Go test binaries
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Mercurial files
|
||||||
|
**/.hg
|
||||||
|
**/.hg*
|
||||||
|
|
||||||
|
# Vagrant
|
||||||
|
.vagrant
|
||||||
|
network_closure.sh
|
||||||
|
|
||||||
|
# make-related metadata
|
||||||
|
/.make/
|
||||||
|
|
||||||
|
# Terraform plans get put here
|
||||||
|
/out/
|
||||||
|
|
||||||
|
# Docker _src sync
|
||||||
|
/docker/_src
|
||||||
|
|
||||||
|
# build stuff
|
||||||
|
.build
|
||||||
|
|
17
CHANGES.md
17
CHANGES.md
|
@ -1,3 +1,20 @@
|
||||||
|
## Aug 11 2016
|
||||||
|
|
||||||
|
Reworked SSH keys and support for running CI builds
|
||||||
|
|
||||||
|
* SSH keys are now stored as secrets. `--ssh-public-key` will be created when you do `kops create cluster`.
|
||||||
|
You no longer need to specify a `--ssh-public-key` when you do an update, but if you do it will be imported.
|
||||||
|
* An SSH public key must exist for AWS, if you do not have one you can import one with:
|
||||||
|
`kops create secret --name $CLUSTER_NAME sshpublickey admin -i ~/.ssh/id_rsa.pub`
|
||||||
|
* For AWS, only a single SSH key can be used; you can delete extra keys with `kops delete secret`
|
||||||
|
* To support changing SSH keys reliably, the name of the imported AWS SSH keypair will change to include
|
||||||
|
the OpenSSH key fingerprint. Existing clusters will continue to work, but you will likely be prompted to
|
||||||
|
do a rolling update when you would otherwise not have to. I suggest waiting till you next upgrade kubernetes.
|
||||||
|
|
||||||
|
* Builds that are not published as Docker images can be run. `kops` will pass a list of images in the NodeUp
|
||||||
|
configuration, and NodeUp will download and `docker load` these images. For examples, see the
|
||||||
|
[testing tips](docs/testing.md)
|
||||||
|
|
||||||
## Jul 21 2016
|
## Jul 21 2016
|
||||||
|
|
||||||
More rational model/UX - `kops create cluster` just creates spec, `kops update cluster` does real creation:
|
More rational model/UX - `kops create cluster` just creates spec, `kops update cluster` does real creation:
|
||||||
|
|
51
Makefile
51
Makefile
|
@ -1,22 +1,28 @@
|
||||||
all: gocode
|
all: kops
|
||||||
|
|
||||||
DOCKER_REGISTRY=gcr.io/must-override/
|
DOCKER_REGISTRY=gcr.io/must-override/
|
||||||
S3_BUCKET=s3://must-override/
|
S3_BUCKET=s3://must-override/
|
||||||
GOPATH_1ST=$(shell echo ${GOPATH} | cut -d : -f 1)
|
GOPATH_1ST=$(shell echo ${GOPATH} | cut -d : -f 1)
|
||||||
|
|
||||||
ifndef VERSION
|
ifndef VERSION
|
||||||
VERSION := git-$(shell git rev-parse --short HEAD)
|
VERSION := git-$(shell git rev-parse --short HEAD)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
gocode:
|
kops:
|
||||||
GO15VENDOREXPERIMENT=1 go install -ldflags "-X main.BuildVersion=${VERSION}" k8s.io/kops/cmd/...
|
GO15VENDOREXPERIMENT=1 go install -ldflags "-X main.BuildVersion=${VERSION}" k8s.io/kops/cmd/kops/...
|
||||||
ln -sfn ${GOPATH_1ST}/src/k8s.io/kops/upup/models/ ${GOPATH_1ST}/bin/models
|
ln -sfn ${GOPATH_1ST}/src/k8s.io/kops/upup/models/ ${GOPATH_1ST}/bin/models
|
||||||
|
|
||||||
# Build in a docker container with golang 1.5
|
# Build in a docker container with golang 1.X
|
||||||
# Used to test we have not broken 1.5
|
# Used to test we have not broken 1.X
|
||||||
check-builds-in-go15:
|
check-builds-in-go15:
|
||||||
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.5 make -f /go/src/k8s.io/kops/Makefile gocode
|
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.5 make -f /go/src/k8s.io/kops/Makefile gocode
|
||||||
|
|
||||||
|
check-builds-in-go16:
|
||||||
|
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.6 make -f /go/src/k8s.io/kops/Makefile gocode
|
||||||
|
|
||||||
|
check-builds-in-go17:
|
||||||
|
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.7 make -f /go/src/k8s.io/kops/Makefile gocode
|
||||||
|
|
||||||
codegen:
|
codegen:
|
||||||
GO15VENDOREXPERIMENT=1 go install k8s.io/kops/upup/tools/generators/...
|
GO15VENDOREXPERIMENT=1 go install k8s.io/kops/upup/tools/generators/...
|
||||||
GO15VENDOREXPERIMENT=1 go generate k8s.io/kops/upup/pkg/fi/cloudup/awstasks
|
GO15VENDOREXPERIMENT=1 go generate k8s.io/kops/upup/pkg/fi/cloudup/awstasks
|
||||||
|
@ -38,7 +44,7 @@ gofmt:
|
||||||
gofmt -w -s dns-controller/cmd
|
gofmt -w -s dns-controller/cmd
|
||||||
gofmt -w -s dns-controller/pkg
|
gofmt -w -s dns-controller/pkg
|
||||||
|
|
||||||
kops-tar: gocode
|
kops-tar: kops
|
||||||
rm -rf .build/kops/tar
|
rm -rf .build/kops/tar
|
||||||
mkdir -p .build/kops/tar/kops/
|
mkdir -p .build/kops/tar/kops/
|
||||||
cp ${GOPATH_1ST}/bin/kops .build/kops/tar/kops/kops
|
cp ${GOPATH_1ST}/bin/kops .build/kops/tar/kops/kops
|
||||||
|
@ -47,15 +53,6 @@ kops-tar: gocode
|
||||||
tar tvf .build/kops.tar.gz
|
tar tvf .build/kops.tar.gz
|
||||||
(sha1sum .build/kops.tar.gz | cut -d' ' -f1) > .build/kops.tar.gz.sha1
|
(sha1sum .build/kops.tar.gz | cut -d' ' -f1) > .build/kops.tar.gz.sha1
|
||||||
|
|
||||||
nodeup-tar: gocode
|
|
||||||
rm -rf .build/nodeup/tar
|
|
||||||
mkdir -p .build/nodeup/tar/nodeup/root
|
|
||||||
cp ${GOPATH_1ST}/bin/nodeup .build/nodeup/tar/nodeup/root
|
|
||||||
cp -r upup/models/nodeup/ .build/nodeup/tar/nodeup/root/model/
|
|
||||||
tar czvf .build/nodeup.tar.gz -C .build/nodeup/tar/ .
|
|
||||||
tar tvf .build/nodeup.tar.gz
|
|
||||||
(sha1sum .build/nodeup.tar.gz | cut -d' ' -f1) > .build/nodeup.tar.gz.sha1
|
|
||||||
|
|
||||||
upload: nodeup-tar kops-tar
|
upload: nodeup-tar kops-tar
|
||||||
rm -rf .build/s3
|
rm -rf .build/s3
|
||||||
mkdir -p .build/s3/nodeup
|
mkdir -p .build/s3/nodeup
|
||||||
|
@ -101,6 +98,27 @@ protokube-push: protokube-image
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
nodeup: nodeup-tar
|
||||||
|
|
||||||
|
nodeup-gocode:
|
||||||
|
go install -ldflags "-X main.BuildVersion=${VERSION}" k8s.io/kops/cmd/nodeup
|
||||||
|
|
||||||
|
nodeup-builder-image:
|
||||||
|
docker build -f images/nodeup-builder/Dockerfile -t nodeup-builder .
|
||||||
|
|
||||||
|
nodeup-build-in-docker: nodeup-builder-image
|
||||||
|
docker run -it -v `pwd`:/src nodeup-builder /onbuild.sh
|
||||||
|
|
||||||
|
nodeup-tar: nodeup-build-in-docker
|
||||||
|
rm -rf .build/nodeup/tar
|
||||||
|
mkdir -p .build/nodeup/tar/nodeup/root
|
||||||
|
cp .build/artifacts/nodeup .build/nodeup/tar/nodeup/root
|
||||||
|
cp -r upup/models/nodeup/ .build/nodeup/tar/nodeup/root/model/
|
||||||
|
tar czvf .build/nodeup.tar.gz -C .build/nodeup/tar/ .
|
||||||
|
tar tvf .build/nodeup.tar.gz
|
||||||
|
(sha1sum .build/nodeup.tar.gz | cut -d' ' -f1) > .build/nodeup.tar.gz.sha1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
dns-controller-gocode:
|
dns-controller-gocode:
|
||||||
go install k8s.io/kops/dns-controller/cmd/dns-controller
|
go install k8s.io/kops/dns-controller/cmd/dns-controller
|
||||||
|
@ -121,4 +139,3 @@ dns-controller-push: dns-controller-image
|
||||||
|
|
||||||
copydeps:
|
copydeps:
|
||||||
rsync -avz _vendor/ vendor/ --exclude vendor/ --exclude .git
|
rsync -avz _vendor/ vendor/ --exclude vendor/ --exclude .git
|
||||||
|
|
||||||
|
|
41
README.md
41
README.md
|
@ -15,6 +15,10 @@ Some of the more interesting features:
|
||||||
|
|
||||||
## Recent changes
|
## Recent changes
|
||||||
|
|
||||||
|
Improvements are being made almost daily, but bigger changes are described here (until we get to
|
||||||
|
a more formal release process):
|
||||||
|
|
||||||
|
* Reworked SSH keys and support for running CI builds [Aug 11 2016](CHANGES.md#aug-11-2016)
|
||||||
* Create command was split into create and update [Jul 21 2016](CHANGES.md#jul-21-2016)
|
* Create command was split into create and update [Jul 21 2016](CHANGES.md#jul-21-2016)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
@ -133,38 +137,11 @@ Each file in the tree describes a Task.
|
||||||
On the nodeup side, Tasks can manage files, systemd services, packages etc.
|
On the nodeup side, Tasks can manage files, systemd services, packages etc.
|
||||||
On the `kops update cluster` side, Tasks manage cloud resources: instances, networks, disks etc.
|
On the `kops update cluster` side, Tasks manage cloud resources: instances, networks, disks etc.
|
||||||
|
|
||||||
## Workaround for terraform bug
|
## Generate a terraform configuration
|
||||||
|
|
||||||
Terraform currently has a bug where it can't create AWS tags containing a dot. Until this is fixed,
|
Kops can also generate a terraform configuration, which you can then apply using terraform, to build a Kubernetes
|
||||||
you can't use terraform to build EC2 resources that are tagged with `k8s.io/...` tags. Thankfully this is only
|
cluster using terraform.
|
||||||
the volumes, and it isn't the worst idea to build these separately anyway.
|
|
||||||
|
|
||||||
We divide the cloudup model into three parts:
|
If you are using a version of terraform prior to 0.7, please read about the [workaround for earlier versions of terraform](docs/terraform.md).
|
||||||
* models/config which contains all the options - this is run automatically by "create cluster"
|
|
||||||
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
|
||||||
* models/cloudup which is the main cloud model for configuring everything else
|
|
||||||
|
|
||||||
So you don't use terraform for the 'proto' phase (you can't anyway, because of the bug!):
|
For more details, please read the [how to use terraform to create a Kubernetes cluster](docs/terraform.md)
|
||||||
|
|
||||||
```
|
|
||||||
export KOPS_STATE_STORE=s3://<somes3bucket>
|
|
||||||
export NAME=<kubernetes.mydomain.com>
|
|
||||||
${GOPATH}/bin/kops create cluster --v=0 --zones=us-east-1c ${NAME}
|
|
||||||
${GOPATH}/bin/kops update cluster --v=0 ${NAME} --model=proto --yes
|
|
||||||
```
|
|
||||||
|
|
||||||
And then you can use terraform to do the remainder of the installation:
|
|
||||||
|
|
||||||
```
|
|
||||||
export CLUSTER_NAME=<kubernetes.mydomain.com>
|
|
||||||
${GOPATH}/bin/kops update cluster --v=0 ${NAME} --model=cloudup --target=terraform
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, to apply using terraform:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd out/terraform
|
|
||||||
|
|
||||||
terraform plan
|
|
||||||
terraform apply
|
|
||||||
```
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io/ioutil"
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||||
|
@ -15,24 +16,25 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type CreateClusterCmd struct {
|
type CreateClusterCmd struct {
|
||||||
Yes bool
|
Yes bool
|
||||||
Target string
|
Target string
|
||||||
Models string
|
Models string
|
||||||
Cloud string
|
Cloud string
|
||||||
Zones string
|
Zones string
|
||||||
MasterZones string
|
MasterZones string
|
||||||
NodeSize string
|
NodeSize string
|
||||||
MasterSize string
|
MasterSize string
|
||||||
NodeCount int
|
NodeCount int
|
||||||
Project string
|
Project string
|
||||||
KubernetesVersion string
|
KubernetesVersion string
|
||||||
OutDir string
|
OutDir string
|
||||||
Image string
|
Image string
|
||||||
SSHPublicKey string
|
SSHPublicKey string
|
||||||
VPCID string
|
VPCID string
|
||||||
NetworkCIDR string
|
NetworkCIDR string
|
||||||
DNSZone string
|
DNSZone string
|
||||||
AdminAccess string
|
AdminAccess string
|
||||||
|
Networking string
|
||||||
NoAssociatePublicIP bool
|
NoAssociatePublicIP bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,6 +80,8 @@ func init() {
|
||||||
|
|
||||||
cmd.Flags().StringVar(&createCluster.Image, "image", "", "Image to use")
|
cmd.Flags().StringVar(&createCluster.Image, "image", "", "Image to use")
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&createCluster.Networking, "networking", "classic", "Networking mode to use. kubenet, classic, external. This currently defaults to classic, but will likely default to kubenet soon.")
|
||||||
|
|
||||||
cmd.Flags().StringVar(&createCluster.DNSZone, "dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
cmd.Flags().StringVar(&createCluster.DNSZone, "dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
||||||
cmd.Flags().StringVar(&createCluster.OutDir, "out", "", "Path to write any local output")
|
cmd.Flags().StringVar(&createCluster.OutDir, "out", "", "Path to write any local output")
|
||||||
cmd.Flags().StringVar(&createCluster.AdminAccess, "admin-access", "", "Restrict access to admin endpoints (SSH, HTTPS) to this CIDR. If not set, access will not be restricted by IP.")
|
cmd.Flags().StringVar(&createCluster.AdminAccess, "admin-access", "", "Restrict access to admin endpoints (SSH, HTTPS) to this CIDR. If not set, access will not be restricted by IP.")
|
||||||
|
@ -93,15 +97,16 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
|
|
||||||
isDryrun := false
|
isDryrun := false
|
||||||
// direct requires --yes (others do not, because they don't make changes)
|
// direct requires --yes (others do not, because they don't make changes)
|
||||||
|
targetName := c.Target
|
||||||
if c.Target == cloudup.TargetDirect {
|
if c.Target == cloudup.TargetDirect {
|
||||||
if !c.Yes {
|
if !c.Yes {
|
||||||
isDryrun = true
|
isDryrun = true
|
||||||
c.Target = cloudup.TargetDryRun
|
targetName = cloudup.TargetDryRun
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.Target == cloudup.TargetDryRun {
|
if c.Target == cloudup.TargetDryRun {
|
||||||
isDryrun = true
|
isDryrun = true
|
||||||
c.Target = cloudup.TargetDryRun
|
targetName = cloudup.TargetDryRun
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterName := rootCommand.clusterName
|
clusterName := rootCommand.clusterName
|
||||||
|
@ -132,6 +137,17 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
cluster = &api.Cluster{}
|
cluster = &api.Cluster{}
|
||||||
var instanceGroups []*api.InstanceGroup
|
var instanceGroups []*api.InstanceGroup
|
||||||
|
|
||||||
|
cluster.Spec.Networking = &api.NetworkingSpec{}
|
||||||
|
switch c.Networking {
|
||||||
|
case "classic":
|
||||||
|
cluster.Spec.Networking.Classic = &api.ClassicNetworkingSpec{}
|
||||||
|
case "kubenet":
|
||||||
|
cluster.Spec.Networking.Kubenet = &api.KubenetNetworkingSpec{}
|
||||||
|
case "external":
|
||||||
|
cluster.Spec.Networking.External = &api.ExternalNetworkingSpec{}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown networking mode %q", c.Networking)
|
||||||
|
}
|
||||||
if c.Zones != "" {
|
if c.Zones != "" {
|
||||||
existingZones := make(map[string]*api.ClusterZoneSpec)
|
existingZones := make(map[string]*api.ClusterZoneSpec)
|
||||||
for _, zone := range cluster.Spec.Zones {
|
for _, zone := range cluster.Spec.Zones {
|
||||||
|
@ -304,8 +320,14 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sshPublicKeys := make(map[string][]byte)
|
||||||
if c.SSHPublicKey != "" {
|
if c.SSHPublicKey != "" {
|
||||||
c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
|
c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
|
||||||
|
authorized, err := ioutil.ReadFile(c.SSHPublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
|
||||||
|
}
|
||||||
|
sshPublicKeys[fi.SecretNameSSHPrimary] = authorized
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.AdminAccess != "" {
|
if c.AdminAccess != "" {
|
||||||
|
@ -357,6 +379,14 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
return fmt.Errorf("error writing completed cluster spec: %v", err)
|
return fmt.Errorf("error writing completed cluster spec: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for k, data := range sshPublicKeys {
|
||||||
|
keyStore := clusterRegistry.KeyStore(cluster.Name)
|
||||||
|
err = keyStore.AddSSHPublicKey(k, data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error addding SSH public key: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if isDryrun {
|
if isDryrun {
|
||||||
fmt.Println("Previewing changes that will be made:\n")
|
fmt.Println("Previewing changes that will be made:\n")
|
||||||
}
|
}
|
||||||
|
@ -366,8 +396,7 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
InstanceGroups: fullInstanceGroups,
|
InstanceGroups: fullInstanceGroups,
|
||||||
Models: strings.Split(c.Models, ","),
|
Models: strings.Split(c.Models, ","),
|
||||||
ClusterRegistry: clusterRegistry,
|
ClusterRegistry: clusterRegistry,
|
||||||
Target: c.Target,
|
TargetName: targetName,
|
||||||
SSHPublicKey: c.SSHPublicKey,
|
|
||||||
OutDir: c.OutDir,
|
OutDir: c.OutDir,
|
||||||
DryRun: isDryrun,
|
DryRun: isDryrun,
|
||||||
}
|
}
|
||||||
|
@ -399,6 +428,7 @@ func (c *CreateClusterCmd) Run(args []string) error {
|
||||||
x := &kutil.CreateKubecfg{
|
x := &kutil.CreateKubecfg{
|
||||||
ClusterName: cluster.Name,
|
ClusterName: cluster.Name,
|
||||||
KeyStore: clusterRegistry.KeyStore(cluster.Name),
|
KeyStore: clusterRegistry.KeyStore(cluster.Name),
|
||||||
|
SecretStore: clusterRegistry.SecretStore(cluster.Name),
|
||||||
MasterPublicName: cluster.Spec.MasterPublicName,
|
MasterPublicName: cluster.Spec.MasterPublicName,
|
||||||
}
|
}
|
||||||
defer x.Close()
|
defer x.Close()
|
||||||
|
|
|
@ -56,7 +56,7 @@ func (c *DeleteClusterCmd) Run(args []string) error {
|
||||||
|
|
||||||
clusterName := rootCommand.clusterName
|
clusterName := rootCommand.clusterName
|
||||||
if clusterName == "" {
|
if clusterName == "" {
|
||||||
return fmt.Errorf("--name is required (when --external)")
|
return fmt.Errorf("--name is required (for safety)")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cloud fi.Cloud
|
var cloud fi.Cloud
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (c *DeleteSecretCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(secrets) == 0 {
|
if len(secrets) == 0 {
|
||||||
return fmt.Errorf("secret %q not found")
|
return fmt.Errorf("secret not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(secrets) != 1 {
|
if len(secrets) != 1 {
|
||||||
|
|
|
@ -45,6 +45,11 @@ func (c *EditClusterCmd) Run(args []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = oldCluster.FillDefaults()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
instanceGroupRegistry, err := rootCommand.InstanceGroupRegistry()
|
instanceGroupRegistry, err := rootCommand.InstanceGroupRegistry()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -54,6 +54,7 @@ func (c *ExportKubecfgCommand) Run(args []string) error {
|
||||||
x := &kutil.CreateKubecfg{
|
x := &kutil.CreateKubecfg{
|
||||||
ClusterName: clusterName,
|
ClusterName: clusterName,
|
||||||
KeyStore: clusterRegistry.KeyStore(clusterName),
|
KeyStore: clusterRegistry.KeyStore(clusterName),
|
||||||
|
SecretStore: clusterRegistry.SecretStore(cluster.Name),
|
||||||
MasterPublicName: master,
|
MasterPublicName: master,
|
||||||
}
|
}
|
||||||
defer x.Close()
|
defer x.Close()
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -25,7 +24,7 @@ func init() {
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
err := getClustersCmd.Run(args)
|
err := getClustersCmd.Run(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Exitf("%v", err)
|
exitWithError(err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
type kubectlConfig struct {
|
|
||||||
Kind string `json:"kind`
|
|
||||||
ApiVersion string `json:"apiVersion`
|
|
||||||
Clusters []*kubectlClusterWithName `json:"clusters`
|
|
||||||
}
|
|
||||||
|
|
||||||
type kubectlClusterWithName struct {
|
|
||||||
Name string `json:"name`
|
|
||||||
Cluster kubectlCluster `json:"cluster`
|
|
||||||
}
|
|
||||||
|
|
||||||
type kubectlCluster struct {
|
|
||||||
Server string `json:"server`
|
|
||||||
}
|
|
|
@ -5,14 +5,14 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||||
"k8s.io/kops/upup/pkg/kutil"
|
"k8s.io/kops/upup/pkg/kutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RollingUpdateClusterCmd struct {
|
type RollingUpdateClusterCmd struct {
|
||||||
Yes bool
|
Yes bool
|
||||||
|
Force bool
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
cobraCommand *cobra.Command
|
||||||
}
|
}
|
||||||
|
@ -29,12 +29,13 @@ func init() {
|
||||||
cmd := rollingupdateCluster.cobraCommand
|
cmd := rollingupdateCluster.cobraCommand
|
||||||
rollingUpdateCommand.cobraCommand.AddCommand(cmd)
|
rollingUpdateCommand.cobraCommand.AddCommand(cmd)
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&rollingupdateCluster.Yes, "yes", false, "Rollingupdate without confirmation")
|
cmd.Flags().BoolVar(&rollingupdateCluster.Yes, "yes", false, "perform rolling update without confirmation")
|
||||||
|
cmd.Flags().BoolVar(&rollingupdateCluster.Force, "force", false, "Force rolling update, even if no changes")
|
||||||
|
|
||||||
cmd.Run = func(cmd *cobra.Command, args []string) {
|
cmd.Run = func(cmd *cobra.Command, args []string) {
|
||||||
err := rollingupdateCluster.Run()
|
err := rollingupdateCluster.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Exitf("%v", err)
|
exitWithError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -107,8 +108,7 @@ func (c *RollingUpdateClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !needUpdate {
|
if !needUpdate && !c.Force {
|
||||||
// TODO: Allow --force option to force even if not needed?
|
|
||||||
fmt.Printf("\nNo rolling-update required\n")
|
fmt.Printf("\nNo rolling-update required\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -118,5 +118,5 @@ func (c *RollingUpdateClusterCmd) Run() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.RollingUpdate(groups)
|
return d.RollingUpdate(groups, c.Force)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"encoding/json"
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
@ -114,7 +113,7 @@ func (c *RootCmd) ClusterName() string {
|
||||||
return c.clusterName
|
return c.clusterName
|
||||||
}
|
}
|
||||||
|
|
||||||
func readKubectlClusterConfig() (*kubectlClusterWithName, error) {
|
func readKubectlClusterConfig() (*kutil.KubectlClusterWithName, error) {
|
||||||
kubectl := &kutil.Kubectl{}
|
kubectl := &kutil.Kubectl{}
|
||||||
context, err := kubectl.GetCurrentContext()
|
context, err := kubectl.GetCurrentContext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -122,18 +121,12 @@ func readKubectlClusterConfig() (*kubectlClusterWithName, error) {
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("context = %q", context)
|
glog.V(4).Infof("context = %q", context)
|
||||||
|
|
||||||
configString, err := kubectl.GetConfig(true, "json")
|
config, err := kubectl.GetConfig(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error getting current config from kubectl: %v", err)
|
return nil, fmt.Errorf("error getting current config from kubectl: %v", err)
|
||||||
}
|
}
|
||||||
glog.V(8).Infof("config = %q", configString)
|
|
||||||
|
|
||||||
config := &kubectlConfig{}
|
|
||||||
err = json.Unmarshal([]byte(configString), config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot parse current config from kubectl: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Minify should have done this
|
||||||
if len(config.Clusters) != 1 {
|
if len(config.Clusters) != 1 {
|
||||||
return nil, fmt.Errorf("expected exactly one cluster in kubectl config, found %d", len(config.Clusters))
|
return nil, fmt.Errorf("expected exactly one cluster in kubectl config, found %d", len(config.Clusters))
|
||||||
}
|
}
|
||||||
|
@ -155,6 +148,10 @@ func (c *RootCmd) ClusterRegistry() (*api.ClusterRegistry, error) {
|
||||||
return nil, fmt.Errorf("error building state store path for %q: %v", c.stateLocation, err)
|
return nil, fmt.Errorf("error building state store path for %q: %v", c.stateLocation, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !vfs.IsClusterReadable(basePath) {
|
||||||
|
return nil, fmt.Errorf("State store %q is not cloud-reachable - please use an S3 bucket", c.stateLocation)
|
||||||
|
}
|
||||||
|
|
||||||
clusterRegistry := api.NewClusterRegistry(basePath)
|
clusterRegistry := api.NewClusterRegistry(basePath)
|
||||||
c.clusterRegistry = clusterRegistry
|
c.clusterRegistry = clusterRegistry
|
||||||
return clusterRegistry, nil
|
return clusterRegistry, nil
|
||||||
|
@ -186,7 +183,7 @@ func (c *RootCmd) Cluster() (*api.ClusterRegistry, *api.Cluster, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RootCmd) InstanceGroupRegistry() (*api.InstanceGroupRegistry, error) {
|
func (c *RootCmd) InstanceGroupRegistry() (*api.InstanceGroupRegistry, error) {
|
||||||
clusterStore, err := c.ClusterRegistry()
|
clusterRegistry, err := c.ClusterRegistry()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -196,11 +193,11 @@ func (c *RootCmd) InstanceGroupRegistry() (*api.InstanceGroupRegistry, error) {
|
||||||
return nil, fmt.Errorf("--name is required")
|
return nil, fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
return clusterStore.InstanceGroups(clusterName)
|
return clusterRegistry.InstanceGroups(clusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RootCmd) SecretStore() (fi.SecretStore, error) {
|
func (c *RootCmd) SecretStore() (fi.SecretStore, error) {
|
||||||
clusterStore, err := c.ClusterRegistry()
|
clusterRegistry, err := c.ClusterRegistry()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -210,11 +207,11 @@ func (c *RootCmd) SecretStore() (fi.SecretStore, error) {
|
||||||
return nil, fmt.Errorf("--name is required")
|
return nil, fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
return clusterStore.SecretStore(clusterName), nil
|
return clusterRegistry.SecretStore(clusterName), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RootCmd) KeyStore() (fi.CAStore, error) {
|
func (c *RootCmd) KeyStore() (fi.CAStore, error) {
|
||||||
clusterStore, err := c.ClusterRegistry()
|
clusterRegistry, err := c.ClusterRegistry()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -224,5 +221,5 @@ func (c *RootCmd) KeyStore() (fi.CAStore, error) {
|
||||||
return nil, fmt.Errorf("--name is required")
|
return nil, fmt.Errorf("--name is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
return clusterStore.KeyStore(clusterName), nil
|
return clusterRegistry.KeyStore(clusterName), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// toolboxCmd represents the toolbox command
|
||||||
|
var toolboxCmd = &cobra.Command{
|
||||||
|
Use: "toolbox",
|
||||||
|
Short: "Misc infrequently used commands",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCommand.AddCommand(toolboxCmd)
|
||||||
|
}
|
|
@ -0,0 +1,100 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
||||||
|
"k8s.io/kops/upup/pkg/kutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConvertImportedCmd struct {
|
||||||
|
NewClusterName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var convertImported ConvertImportedCmd
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "convert-imported",
|
||||||
|
Short: "Convert an imported cluster into a kops cluster",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := convertImported.Run()
|
||||||
|
if err != nil {
|
||||||
|
exitWithError(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
toolboxCmd.AddCommand(cmd)
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&convertImported.NewClusterName, "newname", "", "new cluster name")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConvertImportedCmd) Run() error {
|
||||||
|
clusterRegistry, cluster, err := rootCommand.Cluster()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceGroupRegistry, err := rootCommand.InstanceGroupRegistry()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceGroups, err := instanceGroupRegistry.ReadAll()
|
||||||
|
|
||||||
|
if cluster.Annotations[api.AnnotationNameManagement] != api.AnnotationValueManagementImported {
|
||||||
|
return fmt.Errorf("cluster %q does not appear to be a cluster imported using kops import", cluster.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.NewClusterName == "" {
|
||||||
|
return fmt.Errorf("--newname is required for converting an imported cluster")
|
||||||
|
}
|
||||||
|
|
||||||
|
oldClusterName := cluster.Name
|
||||||
|
if oldClusterName == "" {
|
||||||
|
return fmt.Errorf("(Old) ClusterName must be set in configuration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Switch to cloudup.BuildCloud
|
||||||
|
if len(cluster.Spec.Zones) == 0 {
|
||||||
|
return fmt.Errorf("Configuration must include Zones")
|
||||||
|
}
|
||||||
|
|
||||||
|
region := ""
|
||||||
|
for _, zone := range cluster.Spec.Zones {
|
||||||
|
if len(zone.Name) <= 2 {
|
||||||
|
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
zoneRegion := zone.Name[:len(zone.Name)-1]
|
||||||
|
if region != "" && zoneRegion != region {
|
||||||
|
return fmt.Errorf("Clusters cannot span multiple regions")
|
||||||
|
}
|
||||||
|
|
||||||
|
region = zoneRegion
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{"KubernetesCluster": oldClusterName}
|
||||||
|
cloud, err := awsup.NewAWSCloud(region, tags)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d := &kutil.ConvertKubeupCluster{}
|
||||||
|
d.NewClusterName = c.NewClusterName
|
||||||
|
d.OldClusterName = oldClusterName
|
||||||
|
d.Cloud = cloud
|
||||||
|
d.ClusterConfig = cluster
|
||||||
|
d.InstanceGroups = instanceGroups
|
||||||
|
d.ClusterRegistry = clusterRegistry
|
||||||
|
|
||||||
|
err = d.Upgrade()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -4,10 +4,13 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io/ioutil"
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
"k8s.io/kops/upup/pkg/kutil"
|
"k8s.io/kops/upup/pkg/kutil"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,7 +42,7 @@ func init() {
|
||||||
cmd.Flags().BoolVar(&updateCluster.Yes, "yes", false, "Actually create cloud resources")
|
cmd.Flags().BoolVar(&updateCluster.Yes, "yes", false, "Actually create cloud resources")
|
||||||
cmd.Flags().StringVar(&updateCluster.Target, "target", "direct", "Target - direct, terraform")
|
cmd.Flags().StringVar(&updateCluster.Target, "target", "direct", "Target - direct, terraform")
|
||||||
cmd.Flags().StringVar(&updateCluster.Models, "model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)")
|
cmd.Flags().StringVar(&updateCluster.Models, "model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)")
|
||||||
cmd.Flags().StringVar(&updateCluster.SSHPublicKey, "ssh-public-key", "~/.ssh/id_rsa.pub", "SSH public key to use")
|
cmd.Flags().StringVar(&updateCluster.SSHPublicKey, "ssh-public-key", "", "SSH public key to use (deprecated: use kops create secret instead)")
|
||||||
cmd.Flags().StringVar(&updateCluster.OutDir, "out", "", "Path to write any local output")
|
cmd.Flags().StringVar(&updateCluster.OutDir, "out", "", "Path to write any local output")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,16 +53,18 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
isDryrun := false
|
isDryrun := false
|
||||||
|
targetName := c.Target
|
||||||
|
|
||||||
// direct requires --yes (others do not, because they don't do anything!)
|
// direct requires --yes (others do not, because they don't do anything!)
|
||||||
if c.Target == cloudup.TargetDirect {
|
if c.Target == cloudup.TargetDirect {
|
||||||
if !c.Yes {
|
if !c.Yes {
|
||||||
isDryrun = true
|
isDryrun = true
|
||||||
c.Target = cloudup.TargetDryRun
|
targetName = cloudup.TargetDryRun
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.Target == cloudup.TargetDryRun {
|
if c.Target == cloudup.TargetDryRun {
|
||||||
isDryrun = true
|
isDryrun = true
|
||||||
c.Target = cloudup.TargetDryRun
|
targetName = cloudup.TargetDryRun
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.OutDir == "" {
|
if c.OutDir == "" {
|
||||||
|
@ -87,7 +92,21 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.SSHPublicKey != "" {
|
if c.SSHPublicKey != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "--ssh-public-key on update is deprecated - please use `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub` instead\n", cluster.Name)
|
||||||
|
|
||||||
c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
|
c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
|
||||||
|
authorized, err := ioutil.ReadFile(c.SSHPublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
|
||||||
|
}
|
||||||
|
keyStore, err := rootCommand.KeyStore()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = keyStore.AddSSHPublicKey(fi.SecretNameSSHPrimary, authorized)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error addding SSH public key: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
strict := false
|
strict := false
|
||||||
|
@ -101,8 +120,7 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
InstanceGroups: fullInstanceGroups,
|
InstanceGroups: fullInstanceGroups,
|
||||||
Models: strings.Split(c.Models, ","),
|
Models: strings.Split(c.Models, ","),
|
||||||
ClusterRegistry: clusterRegistry,
|
ClusterRegistry: clusterRegistry,
|
||||||
Target: c.Target,
|
TargetName: targetName,
|
||||||
SSHPublicKey: c.SSHPublicKey,
|
|
||||||
OutDir: c.OutDir,
|
OutDir: c.OutDir,
|
||||||
DryRun: isDryrun,
|
DryRun: isDryrun,
|
||||||
}
|
}
|
||||||
|
@ -112,12 +130,23 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if isDryrun {
|
if isDryrun {
|
||||||
fmt.Printf("Must specify --yes to apply changes\n")
|
target := applyCmd.Target.(*fi.DryRunTarget)
|
||||||
|
if target.HasChanges() {
|
||||||
|
fmt.Printf("Must specify --yes to apply changes\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("No changes need to be applied\n")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Only if not yet set?
|
// TODO: Only if not yet set?
|
||||||
if !isDryrun {
|
if !isDryrun {
|
||||||
|
hasKubecfg, err := hasKubecfg(cluster.Name)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("error reading kubecfg: %v", err)
|
||||||
|
hasKubecfg = true
|
||||||
|
}
|
||||||
|
|
||||||
keyStore := clusterRegistry.KeyStore(cluster.Name)
|
keyStore := clusterRegistry.KeyStore(cluster.Name)
|
||||||
|
|
||||||
kubecfgCert, err := keyStore.FindCert("kubecfg")
|
kubecfgCert, err := keyStore.FindCert("kubecfg")
|
||||||
|
@ -131,6 +160,7 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
x := &kutil.CreateKubecfg{
|
x := &kutil.CreateKubecfg{
|
||||||
ClusterName: cluster.Name,
|
ClusterName: cluster.Name,
|
||||||
KeyStore: keyStore,
|
KeyStore: keyStore,
|
||||||
|
SecretStore: clusterRegistry.SecretStore(cluster.Name),
|
||||||
MasterPublicName: cluster.Spec.MasterPublicName,
|
MasterPublicName: cluster.Spec.MasterPublicName,
|
||||||
}
|
}
|
||||||
defer x.Close()
|
defer x.Close()
|
||||||
|
@ -142,7 +172,35 @@ func (c *UpdateClusterCmd) Run(args []string) error {
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("kubecfg cert not found; won't export kubecfg")
|
glog.Infof("kubecfg cert not found; won't export kubecfg")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !hasKubecfg {
|
||||||
|
// Assume initial creation
|
||||||
|
fmt.Printf("\n")
|
||||||
|
fmt.Printf("Cluster is starting. It should be ready in a few minutes.\n")
|
||||||
|
fmt.Printf("\n")
|
||||||
|
fmt.Printf("Suggestions:\n")
|
||||||
|
fmt.Printf(" * list nodes: kubectl get nodes --show-labels\n")
|
||||||
|
fmt.Printf(" * ssh to the master: ssh -i ~/.ssh/id_rsa admin@%s\n", cluster.Spec.MasterPublicName)
|
||||||
|
fmt.Printf(" * read about installing addons: https://github.com/kubernetes/kops/blob/master/docs/addons.md\n")
|
||||||
|
fmt.Printf("\n")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasKubecfg(contextName string) (bool, error) {
|
||||||
|
kubectl := &kutil.Kubectl{}
|
||||||
|
|
||||||
|
config, err := kubectl.GetConfig(false)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error getting config from kubectl: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, context := range config.Contexts {
|
||||||
|
if context.Name == contextName {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
|
@ -3,14 +3,14 @@ package main
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
"k8s.io/kops/upup/pkg/kutil"
|
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
type UpgradeClusterCmd struct {
|
type UpgradeClusterCmd struct {
|
||||||
NewClusterName string
|
Yes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var upgradeCluster UpgradeClusterCmd
|
var upgradeCluster UpgradeClusterCmd
|
||||||
|
@ -23,21 +23,26 @@ func init() {
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
err := upgradeCluster.Run()
|
err := upgradeCluster.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Exitf("%v", err)
|
exitWithError(err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
upgradeCmd.AddCommand(cmd)
|
cmd.Flags().BoolVar(&upgradeCluster.Yes, "yes", false, "Apply update")
|
||||||
|
|
||||||
cmd.Flags().StringVar(&upgradeCluster.NewClusterName, "newname", "", "new cluster name")
|
upgradeCmd.AddCommand(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type upgradeAction struct {
|
||||||
|
Item string
|
||||||
|
Property string
|
||||||
|
Old string
|
||||||
|
New string
|
||||||
|
|
||||||
|
apply func()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *UpgradeClusterCmd) Run() error {
|
func (c *UpgradeClusterCmd) Run() error {
|
||||||
if c.NewClusterName == "" {
|
|
||||||
return fmt.Errorf("--newname is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterRegistry, cluster, err := rootCommand.Cluster()
|
clusterRegistry, cluster, err := rootCommand.Cluster()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -50,47 +55,96 @@ func (c *UpgradeClusterCmd) Run() error {
|
||||||
|
|
||||||
instanceGroups, err := instanceGroupRegistry.ReadAll()
|
instanceGroups, err := instanceGroupRegistry.ReadAll()
|
||||||
|
|
||||||
oldClusterName := cluster.Name
|
if cluster.Annotations[api.AnnotationNameManagement] == api.AnnotationValueManagementImported {
|
||||||
if oldClusterName == "" {
|
return fmt.Errorf("upgrade is not for use with imported clusters (did you mean `kops toolbox convert-imported`?)")
|
||||||
return fmt.Errorf("(Old) ClusterName must be set in configuration")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cluster.Spec.Zones) == 0 {
|
latestKubernetesVersion, err := api.FindLatestKubernetesVersion()
|
||||||
return fmt.Errorf("Configuration must include Zones")
|
|
||||||
}
|
|
||||||
|
|
||||||
region := ""
|
|
||||||
for _, zone := range cluster.Spec.Zones {
|
|
||||||
if len(zone.Name) <= 2 {
|
|
||||||
return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
zoneRegion := zone.Name[:len(zone.Name)-1]
|
|
||||||
if region != "" && zoneRegion != region {
|
|
||||||
return fmt.Errorf("Clusters cannot span multiple regions")
|
|
||||||
}
|
|
||||||
|
|
||||||
region = zoneRegion
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := map[string]string{"KubernetesCluster": oldClusterName}
|
|
||||||
cloud, err := awsup.NewAWSCloud(region, tags)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &kutil.UpgradeCluster{}
|
|
||||||
d.NewClusterName = c.NewClusterName
|
|
||||||
d.OldClusterName = oldClusterName
|
|
||||||
d.Cloud = cloud
|
|
||||||
d.ClusterConfig = cluster
|
|
||||||
d.InstanceGroups = instanceGroups
|
|
||||||
d.ClusterRegistry = clusterRegistry
|
|
||||||
|
|
||||||
err = d.Upgrade()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var actions []*upgradeAction
|
||||||
|
if cluster.Spec.KubernetesVersion != latestKubernetesVersion {
|
||||||
|
actions = append(actions, &upgradeAction{
|
||||||
|
Item: "Cluster",
|
||||||
|
Property: "KubernetesVersion",
|
||||||
|
Old: cluster.Spec.KubernetesVersion,
|
||||||
|
New: latestKubernetesVersion,
|
||||||
|
apply: func() {
|
||||||
|
cluster.Spec.KubernetesVersion = latestKubernetesVersion
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(actions) == 0 {
|
||||||
|
// TODO: Allow --force option to force even if not needed?
|
||||||
|
// Note stderr - we try not to print to stdout if no update is needed
|
||||||
|
fmt.Fprintf(os.Stderr, "\nNo upgrade required\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
t := &Table{}
|
||||||
|
t.AddColumn("ITEM", func(a *upgradeAction) string {
|
||||||
|
return a.Item
|
||||||
|
})
|
||||||
|
t.AddColumn("PROPERTY", func(a *upgradeAction) string {
|
||||||
|
return a.Property
|
||||||
|
})
|
||||||
|
t.AddColumn("OLD", func(a *upgradeAction) string {
|
||||||
|
return a.Old
|
||||||
|
})
|
||||||
|
t.AddColumn("NEW", func(a *upgradeAction) string {
|
||||||
|
return a.New
|
||||||
|
})
|
||||||
|
|
||||||
|
err := t.Render(actions, os.Stdout, "ITEM", "PROPERTY", "OLD", "NEW")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Yes {
|
||||||
|
fmt.Printf("\nMust specify --yes to perform upgrade\n")
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
for _, action := range actions {
|
||||||
|
action.apply()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: DRY this chunk
|
||||||
|
err = cluster.PerformAssignments()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error populating configuration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fullCluster, err := cloudup.PopulateClusterSpec(cluster, clusterRegistry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = api.DeepValidate(fullCluster, instanceGroups, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note we perform as much validation as we can, before writing a bad config
|
||||||
|
err = clusterRegistry.Update(cluster)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = clusterRegistry.WriteCompletedConfig(fullCluster)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing completed cluster spec: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nUpdates applied to configuration.\n")
|
||||||
|
|
||||||
|
// TODO: automate this step
|
||||||
|
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.Name)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,17 +6,29 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup"
|
"k8s.io/kops/upup/pkg/fi/nodeup"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// value overwritten during build. This can be used to resolve issues.
|
||||||
|
BuildVersion = "0.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const retryInterval = 30 * time.Second
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
fmt.Printf("nodeup version %s\n", BuildVersion)
|
||||||
|
|
||||||
flagModel := "model"
|
flagModel := "model"
|
||||||
flag.StringVar(&flagModel, "model", flagModel, "directory to use as model for desired configuration")
|
flag.StringVar(&flagModel, "model", flagModel, "directory to use as model for desired configuration")
|
||||||
var flagConf string
|
var flagConf string
|
||||||
flag.StringVar(&flagConf, "conf", "node.yaml", "configuration location")
|
flag.StringVar(&flagConf, "conf", "node.yaml", "configuration location")
|
||||||
var flagAssetDir string
|
var flagCacheDir string
|
||||||
flag.StringVar(&flagAssetDir, "assets", "/var/cache/nodeup", "the location for the local asset cache")
|
flag.StringVar(&flagCacheDir, "cache", "/var/cache/nodeup", "the location for the local asset cache")
|
||||||
var flagRootFS string
|
var flagRootFS string
|
||||||
flag.StringVar(&flagRootFS, "rootfs", "/", "the location of the machine root (for running in a container)")
|
flag.StringVar(&flagRootFS, "rootfs", "/", "the location of the machine root (for running in a container)")
|
||||||
|
var flagRetries int
|
||||||
|
flag.IntVar(&flagRetries, "retries", -1, "maximum number of retries on failure: -1 means retry forever")
|
||||||
|
|
||||||
dryrun := false
|
dryrun := false
|
||||||
flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done")
|
flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done")
|
||||||
|
@ -34,17 +46,32 @@ func main() {
|
||||||
glog.Exitf("--conf is required")
|
glog.Exitf("--conf is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &nodeup.NodeUpCommand{
|
retries := flagRetries
|
||||||
ConfigLocation: flagConf,
|
|
||||||
ModelDir: flagModel,
|
for {
|
||||||
Target: target,
|
cmd := &nodeup.NodeUpCommand{
|
||||||
AssetDir: flagAssetDir,
|
ConfigLocation: flagConf,
|
||||||
FSRoot: flagRootFS,
|
ModelDir: flagModel,
|
||||||
|
Target: target,
|
||||||
|
CacheDir: flagCacheDir,
|
||||||
|
FSRoot: flagRootFS,
|
||||||
|
}
|
||||||
|
err := cmd.Run(os.Stdout)
|
||||||
|
if err == nil {
|
||||||
|
fmt.Printf("success")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retries == 0 {
|
||||||
|
glog.Exitf("error running nodeup: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retries > 0 {
|
||||||
|
retries--
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Warningf("got error running nodeup (will retry in %s): %v", retryInterval, err)
|
||||||
|
time.Sleep(retryInterval)
|
||||||
}
|
}
|
||||||
err := cmd.Run(os.Stdout)
|
|
||||||
if err != nil {
|
|
||||||
glog.Exitf("error running nodeup: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("success")
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
FROM golang:1.6-alpine
|
||||||
|
|
||||||
|
ARG KOPS_SRC=_src
|
||||||
|
COPY $KOPS_SRC/ /go/src/k8s.io/kops/
|
||||||
|
|
||||||
|
# KUBECTL_SOURCE: Change to kubernetes-dev/ci for CI
|
||||||
|
ARG KUBECTL_SOURCE=kubernetes-release/release
|
||||||
|
|
||||||
|
# KUBECTL_TRACK: Currently latest from KUBECTL_SOURCE. Change to latest-1.3.txt, etc. if desired.
|
||||||
|
ARG KUBECTL_TRACK=latest.txt
|
||||||
|
|
||||||
|
ARG KUBECTL_ARCH=linux/amd64
|
||||||
|
|
||||||
|
RUN apk add --no-cache --update build-base curl git mercurial --virtual .kops-deps && \
|
||||||
|
cd "${GOPATH}/src/k8s.io/kops" && make && \
|
||||||
|
MODELS=$(readlink "${GOPATH}/bin/models") && mv "${MODELS}" /usr/local/bin && rm "${GOPATH}/bin/models" && mv ${GOPATH}/bin/* /usr/local/bin && \
|
||||||
|
GITISH=$(git describe --always) && \
|
||||||
|
KUBECTL_VERSION=${KUBECTL_VERSION:-$(curl -SsL --retry 5 "https://storage.googleapis.com/${KUBECTL_SOURCE}/${KUBECTL_TRACK}")} && \
|
||||||
|
echo "=== Fetching kubectl ${KUBECTL_VERSION} ===" && \
|
||||||
|
curl -SsL --retry 5 "https://storage.googleapis.com/${KUBECTL_SOURCE}/${KUBECTL_VERSION}/${KUBECTL_ARCH}/kubectl" > /usr/local/bin/kubectl && \
|
||||||
|
chmod +x /usr/local/bin/kubectl && \
|
||||||
|
/usr/local/bin/kubectl version --client && \
|
||||||
|
cd / && rm -rf "${GOPATH}" && rm -rf /usr/local/go && rm /usr/local/bin/go-wrapper && apk del .kops-deps && \
|
||||||
|
echo "=== Built kops at ${GITISH}, fetched kubectl ${KUBECTL_VERSION} ==="
|
||||||
|
|
||||||
|
CMD "/go/bin/kops"
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Running Kops in Docker
|
||||||
|
|
||||||
|
The Dockerfile here is offered primarily as a way to build continuous
|
||||||
|
integration versions of `kops` until we figure out how we want to
|
||||||
|
release/package it.
|
||||||
|
|
||||||
|
To use it, e.g. (assumes your `$HOME` is correct and that `$KOPS_STATE_STORE` is correct):
|
||||||
|
```shell
|
||||||
|
$ docker build -t kops .
|
||||||
|
$ KOPS="docker run -v $HOME/.aws:/root/.aws:ro -v $HOME/.ssh:/root/.ssh:ro -v $HOME/.kube:/root/.kube -it kops kops --state=$KOPS_STATE_STORE"
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates a shell variable that runs the `kops` container with `~/.aws` mounted in (for AWS credentials), `~/.ssh` mounted in (for SSH keys, for AWS specifically), and `~/.kube` mounted in (so `kubectl` can add newly created clusters).
|
||||||
|
|
||||||
|
After this, you can just use `$KOPS` where you would generally use `kops`, e.g. `$KOPS get cluster`.
|
|
@ -0,0 +1,77 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
readonly DOCKER_ROOT=$(dirname "${BASH_SOURCE}")
|
||||||
|
readonly GITISH="$(git describe --always)"
|
||||||
|
readonly ARCH=${BUILD_ARCH:-"linux/amd64"}
|
||||||
|
readonly NAME=${BUILD_NAME:-"ci-${GITISH}-${ARCH/\//-}"} # e.g. ci-bef7faf-linux-amd64
|
||||||
|
readonly TMPNAME="${NAME}-$(date +%s)" # e.g. ci-bef7faf-linux-amd64-12345678
|
||||||
|
readonly TAG=${BUILD_DOCKER_TAG:-"b.gcr.io/kops-ci/kops:${NAME}"}
|
||||||
|
readonly PUSH_TAG=${BUILD_PUSH_TAG:-"no"}
|
||||||
|
readonly CLEAN_TAG=${BUILD_CLEAN_TAG:-"yes"}
|
||||||
|
readonly TMPTAG="${TAG}-$(date +%s)"
|
||||||
|
readonly LINK=${BUILD_LINK:-} # Also pushes to e.g. ci-{BUILD_LINK}-linux-amd64, i.e. for "latest"
|
||||||
|
readonly SYMBOLIC_TAG=${BUILD_SYMBOLIC_TAG:-"b.gcr.io/kops-ci/kops:ci-${LINK}-${ARCH/\//-}"}
|
||||||
|
|
||||||
|
if [[ "${ARCH}" != "linux/amd64" ]]; then
|
||||||
|
echo "!!! Alternate architecture build not supported yet. !!!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${GITISH}" ]]; then
|
||||||
|
echo "!!! git hash not found, are you sure you're in a git tree and git is installed? !!!"
|
||||||
|
git config -l
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "=== Copying src to docker/_src ==="
|
||||||
|
echo
|
||||||
|
|
||||||
|
rsync -a --exclude=/docker/ "${DOCKER_ROOT}/.." "${DOCKER_ROOT}/_src"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "=== Building at ${GITISH} for ${ARCH} ==="
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Build -> $TMPTAG
|
||||||
|
docker build -t "${TMPTAG}" --build-arg "KUBECTL_ARCH=${ARCH}" --force-rm=true --rm=true --pull=true --no-cache=true "${DOCKER_ROOT}"
|
||||||
|
|
||||||
|
# Squash -> $TAG
|
||||||
|
docker create --name="${TMPNAME}" "${TMPTAG}"
|
||||||
|
docker export "${TMPNAME}" | docker import - "${TAG}"
|
||||||
|
|
||||||
|
if [[ "${PUSH_TAG}" == "yes" ]]; then
|
||||||
|
echo
|
||||||
|
echo "=== Pushing ${TAG} ==="
|
||||||
|
echo
|
||||||
|
|
||||||
|
gcloud docker push "${TAG}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${LINK}" ]]; then
|
||||||
|
echo
|
||||||
|
echo "=== Pushing ${SYMBOLIC_TAG} ==="
|
||||||
|
echo
|
||||||
|
docker tag "${TAG}" "${SYMBOLIC_TAG}"
|
||||||
|
gcloud docker push "${SYMBOLIC_TAG}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "=== Cleaning up ==="
|
||||||
|
echo
|
||||||
|
docker rm "${TMPNAME}" || true
|
||||||
|
docker rmi -f "${TMPTAG}" || true
|
||||||
|
if [[ -n "${LINK}" ]]; then
|
||||||
|
docker rmi -f "${SYMBOLIC_TAG}" || true
|
||||||
|
fi
|
||||||
|
if [[ "${CLEAN_TAG}" == "yes" ]]; then
|
||||||
|
docker rmi -f "${TAG}" || true
|
||||||
|
else
|
||||||
|
echo
|
||||||
|
echo "=== ${TAG} leaked (BUILD_CLEAN_TAG not set) ==="
|
||||||
|
echo
|
||||||
|
fi
|
|
@ -17,14 +17,14 @@ Install using:
|
||||||
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/dashboard/v1.1.0.yaml
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/dashboard/v1.1.0.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
And then navigate to `https://<clustername>/ui`
|
And then navigate to `https://api.<clustername>/ui`
|
||||||
|
|
||||||
(`/ui` is an alias to `https://<clustername>/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard`)
|
(`/ui` is an alias to `https://<clustername>/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard`)
|
||||||
|
|
||||||
The login credentials are:
|
The login credentials are:
|
||||||
|
|
||||||
* Username: `admin`
|
* Username: `admin`
|
||||||
* Password: get by running `kops get secrets kube --type secret -oplaintext`
|
* Password: get by running `kops get secrets kube --type secret -oplaintext` or `kubectl config view --minify`
|
||||||
|
|
||||||
|
|
||||||
### Monitoring - Standalone
|
### Monitoring - Standalone
|
||||||
|
|
|
@ -11,4 +11,13 @@ Because k8s modifies the AWS routing table, this means that realistically kubern
|
||||||
routing table, and thus it requires its own subnet. It is theoretically possible to share a routing table
|
routing table, and thus it requires its own subnet. It is theoretically possible to share a routing table
|
||||||
with other infrastructure (but not a second cluster!), but this is not really recommended.
|
with other infrastructure (but not a second cluster!), but this is not really recommended.
|
||||||
|
|
||||||
kops will support other networking options as they add support for the daemonset method of deployment.
|
kops will support other networking options as they add support for the daemonset method of deployment.
|
||||||
|
|
||||||
|
|
||||||
|
kops currently supports 3 networking modes:
|
||||||
|
|
||||||
|
* `classic` kubernetes native networking, done in-process
|
||||||
|
* `kubenet` kubernetes native networking via a CNI plugin. Also has less reliance on Docker's networking.
|
||||||
|
* `external` networking is done via a Daemonset
|
||||||
|
|
||||||
|
TODO: Explain the difference between pod networking & inter-pod networking.
|
|
@ -0,0 +1,38 @@
|
||||||
|
## Security Notes for Kubernetes
|
||||||
|
|
||||||
|
## SSH Access
|
||||||
|
|
||||||
|
SSH is allowed to the masters and the nodes, by default from anywhere.
|
||||||
|
|
||||||
|
To change the CIDR allowed to access SSH (and HTTPS), set AdminAccess on the cluster spec.
|
||||||
|
|
||||||
|
When using the default images, the SSH username will be `admin`, and the SSH private key is be
|
||||||
|
the private key corresponding to the public key in `kops get secrets --type sshpublickey admin`. When
|
||||||
|
creating a new cluster, the SSH public key can be specified with the `--ssh-public-key` option, and it
|
||||||
|
defaults to `~/.ssh/id_rsa.pub`.
|
||||||
|
|
||||||
|
To change the SSH public key on an existing cluster:
|
||||||
|
|
||||||
|
* `kops delete secret --name <clustername> sshpublickey admin`
|
||||||
|
* `kops create secret --name <clustername> sshpublickey admin -i ~/.ssh/newkey.pub`
|
||||||
|
* `kops update cluster --yes` to reconfigure the auto-scaling groups
|
||||||
|
* `kops rolling-update --yes` to immediately roll all the machines so they have the new key (optional)
|
||||||
|
|
||||||
|
|
||||||
|
## Kubernetes API
|
||||||
|
|
||||||
|
(this section is a work in progress)
|
||||||
|
|
||||||
|
Kubernetes has a number of authentication mechanisms:
|
||||||
|
|
||||||
|
### API Bearer Token
|
||||||
|
|
||||||
|
The API bearer token is a secret named 'admin'.
|
||||||
|
|
||||||
|
`kops get secrets admin -oplaintext` will show it
|
||||||
|
|
||||||
|
### Admin Access
|
||||||
|
|
||||||
|
Access to the administrative API is stored in a secret named 'kube':
|
||||||
|
|
||||||
|
`kops get secrets kube -oplaintext` or `kubectl config view --minify` to reveal
|
|
@ -0,0 +1,74 @@
|
||||||
|
## Building Kubernetes clusters with terraform
|
||||||
|
|
||||||
|
Kops can generate terraform configurations, and you can then apply them using the terraform plan/apply tools.
|
||||||
|
This is very handy if you are already using terraform, or if you want to check in the terraform output into
|
||||||
|
version control.
|
||||||
|
|
||||||
|
The terraform output should be reasonably stable (i.e. the text files should only change where something has actually
|
||||||
|
changed - items should appear in the same order etc).
|
||||||
|
|
||||||
|
|
||||||
|
### Using terraform
|
||||||
|
|
||||||
|
To use terraform, you simple run update with `--target=terraform` (but see below for a workaround for a bug
|
||||||
|
if you are using a terraform version before 0.7)
|
||||||
|
|
||||||
|
For example, a complete setup might be:
|
||||||
|
|
||||||
|
```
|
||||||
|
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||||
|
export CLUSTER_NAME=<kubernetes.mydomain.com>
|
||||||
|
${GOPATH}/bin/kops create cluster ${NAME} --zones us-east-1c
|
||||||
|
${GOPATH}/bin/kops update cluster ${NAME} --target=terraform
|
||||||
|
|
||||||
|
cd out/terraform
|
||||||
|
terraform plan
|
||||||
|
terraform apply
|
||||||
|
```
|
||||||
|
|
||||||
|
When you eventually `terraform delete` the cluster, you should still run `kops delete cluster ${CLUSTER_NAME}`,
|
||||||
|
to remove the kops cluster specification and any dynamically created Kubernetes resources (ELBs or volumes).
|
||||||
|
|
||||||
|
### Workaround for Terraform versions before 0.7
|
||||||
|
|
||||||
|
Before terraform version 0.7, there was a bug where it could not create AWS tags containing a dot.
|
||||||
|
|
||||||
|
We recommend upgrading to version 0.7 or laster, which wil fix this bug.
|
||||||
|
|
||||||
|
However, if you need to use an earlier version:
|
||||||
|
|
||||||
|
This issue only affects the volumes.
|
||||||
|
|
||||||
|
We divide the cloudup model into three parts:
|
||||||
|
* models/config which contains all the options - this is run automatically by "create cluster"
|
||||||
|
* models/proto which sets up the volumes and other data which would be hard to recover (e.g. likely keys & secrets in the near future)
|
||||||
|
* models/cloudup which is the main cloud model for configuring everything else
|
||||||
|
|
||||||
|
So the workaround is that you don't use terraform for the `proto` phase (you can't anyway, because of the bug!):
|
||||||
|
|
||||||
|
```
|
||||||
|
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||||
|
export CLUSTER_NAME=<kubernetes.mydomain.com>
|
||||||
|
${GOPATH}/bin/kops create cluster ${CLUSTER_NAME} --zones=us-east-1c
|
||||||
|
${GOPATH}/bin/kops update cluster ${CLUSTER_NAME} --model=proto --yes
|
||||||
|
```
|
||||||
|
|
||||||
|
And then you can use terraform to do the remainder of the installation:
|
||||||
|
|
||||||
|
```
|
||||||
|
export CLUSTER_NAME=<kubernetes.mydomain.com>
|
||||||
|
${GOPATH}/bin/kops update cluster ${CLUSTER_NAME} --model=cloudup --target=terraform
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, to apply using terraform:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd out/terraform
|
||||||
|
|
||||||
|
terraform plan
|
||||||
|
terraform apply
|
||||||
|
```
|
||||||
|
|
||||||
|
You should still run `kops delete cluster ${CLUSTER_NAME}`, to remove the kops cluster specification and any
|
||||||
|
dynamically created Kubernetes resources (ELBs or volumes), but under this workaround also to remove the primary
|
||||||
|
ELB volumes from the `proto` phase.
|
|
@ -0,0 +1,48 @@
|
||||||
|
## Testing tips
|
||||||
|
|
||||||
|
If you are running kops as part of an e2e test, the following tips may be useful.
|
||||||
|
|
||||||
|
### CI Kubernetes Build
|
||||||
|
|
||||||
|
Set the KubernetesVersion to a `http://` or `https://` base url, such as `https://storage.googleapis.com/kubernetes-release-dev/ci/v1.4.0-alpha.2.677+ea69570f61af8e/`
|
||||||
|
|
||||||
|
We expect the base url to have `bin/linux/amd64` directory containing:
|
||||||
|
|
||||||
|
* kubelet
|
||||||
|
* kubelet.sha1
|
||||||
|
* kubectl
|
||||||
|
* kubectl.sha1
|
||||||
|
* kube-apiserver.docker_tag
|
||||||
|
* kube-apiserver.tar
|
||||||
|
* kube-apiserver.tar.sha1
|
||||||
|
* kube-controller-manager.docker_tag
|
||||||
|
* kube-controller-manager.tar
|
||||||
|
* kube-controller-manager.tar.sha1
|
||||||
|
* kube-proxy.docker_tag
|
||||||
|
* kube-proxy.tar
|
||||||
|
* kube-proxy.tar.sha1
|
||||||
|
* kube-scheduler.docker_tag
|
||||||
|
* kube-scheduler.tar
|
||||||
|
* kube-scheduler.tar.sha1
|
||||||
|
|
||||||
|
|
||||||
|
Do this with `kops edit cluster <clustername>`. The spec should look like
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
spec:
|
||||||
|
kubernetesVersion: "https://storage.googleapis.com/kubernetes-release-dev/ci/v1.4.0-alpha.2.677+ea69570f61af8e/"
|
||||||
|
cloudProvider: aws
|
||||||
|
etcdClusters:
|
||||||
|
- etcdMembers:
|
||||||
|
- name: us-east-1c
|
||||||
|
zone: us-east-1c
|
||||||
|
name: main
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Running the kubernetes e2e test suite
|
||||||
|
|
||||||
|
The [e2e](../e2e/README.md) directory has a docker image and some scripts which make it easy to run
|
||||||
|
the kubernetes e2e tests, using kops.
|
|
@ -0,0 +1,8 @@
|
||||||
|
##How to update Kops - Kubernetes Ops
|
||||||
|
|
||||||
|
Update the latest source code from kubernetes/kops
|
||||||
|
|
||||||
|
```
|
||||||
|
cd ${GOPATH}/src/k8s.io/kops/
|
||||||
|
git pull && make
|
||||||
|
```
|
|
@ -0,0 +1,28 @@
|
||||||
|
## Upgrading Kubernetes
|
||||||
|
|
||||||
|
Upgrading Kubernetes is easy with kops. The cluster spec contains a KubernetesVersion, so you
|
||||||
|
can simply edit it with `kops edit`, and apply the updated configuration to your cluster.
|
||||||
|
The `kops upgrade` command also automates checking for and applying updates.
|
||||||
|
|
||||||
|
Note: if you want to upgrade from a `kube-up` installation, please see the instructions for
|
||||||
|
[how to upgrade kubernetes installed with kube-up](upgrade_from_k8s_12.md).
|
||||||
|
|
||||||
|
### Manual update
|
||||||
|
|
||||||
|
* `kops edit cluster $NAME`
|
||||||
|
* set the KubernetesVersion to the target version (e.g. `v1.3.5`)
|
||||||
|
* `kops update cluster $NAME` to preview, then `kops update cluster $NAME --yes`
|
||||||
|
* `kops rolling-update cluster $NAME` to preview, then `kops rolling-update cluster $NAME --yes`
|
||||||
|
|
||||||
|
### Automated update
|
||||||
|
|
||||||
|
* `kops upgrade cluster $NAME` to preview, then `kops upgrade cluster $NAME --yes`
|
||||||
|
|
||||||
|
In future the upgrade step will likely perform the update immediately (and possibly even without a
|
||||||
|
node restart), but currently you must:
|
||||||
|
|
||||||
|
* `kops update cluster $NAME` to preview, then `kops update cluster $NAME --yes`
|
||||||
|
* `kops rolling-update cluster $NAME` to preview, then `kops rolling-update cluster $NAME --yes`
|
||||||
|
|
||||||
|
|
||||||
|
upgrade uses the latest Kuberentes stable release, published at `https://storage.googleapis.com/kubernetes-release/release/stable.txt`
|
|
@ -44,7 +44,7 @@ Now have a look at the cluster configuration, to make sure it looks right. If i
|
||||||
open an issue.
|
open an issue.
|
||||||
|
|
||||||
```
|
```
|
||||||
kops edit cluster ${OLD_NAME}
|
kops get cluster ${OLD_NAME} -oyaml
|
||||||
````
|
````
|
||||||
|
|
||||||
## Move resources to a new cluster
|
## Move resources to a new cluster
|
||||||
|
@ -62,7 +62,7 @@ The upgrade procedure forces you to choose a new cluster name (e.g. `k8s.mydomai
|
||||||
|
|
||||||
```
|
```
|
||||||
export NEW_NAME=k8s.mydomain.com
|
export NEW_NAME=k8s.mydomain.com
|
||||||
kops upgrade cluster --newname ${NEW_NAME} --name ${OLD_NAME}
|
kops toolbox convert-imported --newname ${NEW_NAME} --name ${OLD_NAME}
|
||||||
```
|
```
|
||||||
|
|
||||||
If you now list the clusters, you should see both the old cluster & the new cluster
|
If you now list the clusters, you should see both the old cluster & the new cluster
|
||||||
|
@ -73,6 +73,14 @@ kops get clusters
|
||||||
|
|
||||||
You can also list the instance groups: `kops get ig --name ${NEW_NAME}`
|
You can also list the instance groups: `kops get ig --name ${NEW_NAME}`
|
||||||
|
|
||||||
|
## Import the SSH public key
|
||||||
|
|
||||||
|
The SSH public key is not easily retrieved from the old cluster, so you must add it:
|
||||||
|
|
||||||
|
```
|
||||||
|
kops create secret --name ${NEW_NAME} sshpublickey admin -i ~/.ssh/id_rsa.pub
|
||||||
|
```
|
||||||
|
|
||||||
## Bring up the new cluster
|
## Bring up the new cluster
|
||||||
|
|
||||||
Use the update command to bring up the new cluster:
|
Use the update command to bring up the new cluster:
|
||||||
|
@ -132,6 +140,10 @@ kubectl delete secret --namespace kube-system default-token-lhfkx
|
||||||
Then restart the kube-dns pod so it picks up a valid secret:
|
Then restart the kube-dns pod so it picks up a valid secret:
|
||||||
`kubectl delete pods --namespace kube-system --selector "k8s-app=kube-dns"`
|
`kubectl delete pods --namespace kube-system --selector "k8s-app=kube-dns"`
|
||||||
|
|
||||||
|
## Other fixes
|
||||||
|
|
||||||
|
* If you're using a manually created ELB, the auto-scaling groups change, so you will need to reconfigure
|
||||||
|
your ELBs to include the new auto-scaling group(s).
|
||||||
|
|
||||||
## Delete remaining resources of the old cluster
|
## Delete remaining resources of the old cluster
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
FROM debian:jessie
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install --yes curl python-pip openssh-client
|
||||||
|
RUN pip install awscli
|
||||||
|
|
||||||
|
RUN curl https://sdk.cloud.google.com | bash
|
||||||
|
|
||||||
|
RUN curl https://storage.googleapis.com/golang/go1.6.3.linux-amd64.tar.gz | tar -C /usr/local -xzf -
|
||||||
|
|
||||||
|
ENV PATH /root/google-cloud-sdk/bin:/usr/local/go/bin:$PATH
|
||||||
|
|
||||||
|
ADD runtests.sh /
|
||||||
|
|
||||||
|
ADD conf /conf
|
|
@ -0,0 +1,22 @@
|
||||||
|
#JOB_NAME=kubernetes-e2e-kops-aws
|
||||||
|
#KUBERNETES_VERSION=v1.3.5
|
||||||
|
#DNS_DOMAIN="mydomain.com"
|
||||||
|
#JENKINS_GCS_LOGS_PATH=gs://kopeio-kubernetes-e2e/logs
|
||||||
|
#KOPS_STATE_STORE=s3://mys3bucket
|
||||||
|
|
||||||
|
test: image
|
||||||
|
docker run -v ${HOME}/.ssh/id_rsa.pub:/root/.ssh/id_rsa.pub \
|
||||||
|
-v ${HOME}/.aws:/root/.aws \
|
||||||
|
-v ${HOME}/.gsutil:/root/.gsutil \
|
||||||
|
-v ${HOME}/.config/gcloud:/root/.config/gcloud \
|
||||||
|
-e "DNS_DOMAIN=${DNS_DOMAIN}" \
|
||||||
|
-e "JENKINS_GCS_LOGS_PATH=${JENKINS_GCS_LOGS_PATH}" \
|
||||||
|
-e "KOPS_STATE_STORE=${KOPS_STATE_STORE}" \
|
||||||
|
-e "JOB_NAME=${JOB_NAME}" \
|
||||||
|
-e "KUBERNETES_VERSION=${KUBERNETES_VERSION}" \
|
||||||
|
kubernetes-e2e-runner-kops \
|
||||||
|
/runtests.sh
|
||||||
|
|
||||||
|
image:
|
||||||
|
docker build -t kubernetes-e2e-runner-kops .
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
## Run kubernetes e2e tests
|
||||||
|
|
||||||
|
This docker image lets you run the kubernetes e2e tests very easily, using kops to create the cluster.
|
||||||
|
|
||||||
|
You simply call make, specifying some variables that controls the build.
|
||||||
|
|
||||||
|
An example:
|
||||||
|
|
||||||
|
`make JOB_NAME=kubernetes-e2e-kops-aws KUBERNETES_VERSION=v1.3.5 DNS_DOMAIN=e2e.mydomain.com JENKINS_GCS_LOGS_PATH=gs://kopeio-kubernetes-e2e/logs KOPS_STATE_STORE=s3://clusters.mydomain.com`
|
||||||
|
|
||||||
|
Variables:
|
||||||
|
|
||||||
|
* `JOB_NAME` the e2e job to run. Corresponds to a conf file in the conf directory.
|
||||||
|
* `KUBERNETES_VERSION` the version of kubernetes to run. Either a version like `v1.3.5`, or a URL prefix like `https://storage.googleapis.com/kubernetes-release-dev/ci/v1.4.0-alpha.2.677+ea69570f61af8e/`. See [testing docs](../docs/testing.md)
|
||||||
|
* `DNS_DOMAIN` the dns domain name to use for the cluster. Must be a real domain name, with a zone registered in DNS (route53)
|
||||||
|
* `JENKINS_GCS_LOGS_PATH` the gs bucket where we should upload the results of the build. Note these will be publicly readable.
|
||||||
|
* `KOPS_STATE_STORE` the url where the kops registry (store of cluster information) lives.
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
##=============================================================
|
||||||
|
# AWS Settings
|
||||||
|
export AWS_DEFAULT_REGION=${AWS_REGION}
|
||||||
|
#export KUBE_AWS_ZONE="us-west-2a"
|
||||||
|
#export PROJECT="k8s-jkns-e2e-aws"
|
||||||
|
export AWS_CONFIG_FILE=~/.aws/credentials
|
||||||
|
export AWS_SSH_KEY=~/.ssh/kube_aws_rsa
|
||||||
|
export KUBE_SSH_USER=admin
|
||||||
|
# This is needed to be able to create PD from the e2e test
|
||||||
|
export AWS_SHARED_CREDENTIALS_FILE=~/.aws/credentials
|
||||||
|
|
||||||
|
export AWS_PROFILE=kubernetes-e2e
|
|
@ -0,0 +1,5 @@
|
||||||
|
# GCE variables
|
||||||
|
#export INSTANCE_PREFIX="${E2E_NAME:-jenkins-e2e}"
|
||||||
|
#export KUBE_GCE_NETWORK="${E2E_NAME:-jenkins-e2e}"
|
||||||
|
#export KUBE_GCE_INSTANCE_PREFIX="${E2E_NAME:-jenkins-e2e}"
|
||||||
|
#export GCE_SERVICE_ACCOUNT=$(gcloud auth list 2> /dev/null | grep active | cut -f3 -d' ')
|
|
@ -0,0 +1,12 @@
|
||||||
|
export KUBERNETES_PROVIDER="aws"
|
||||||
|
|
||||||
|
export E2E_MIN_STARTUP_PODS="1"
|
||||||
|
export MASTER_SIZE="m3.large"
|
||||||
|
export NODE_SIZE="m3.large"
|
||||||
|
export NUM_NODES="3"
|
||||||
|
export GINKGO_TEST_ARGS="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"
|
||||||
|
export GINKGO_PARALLEL="y"
|
||||||
|
|
||||||
|
# Central only has 2 AZs, so we use it for the non-HA test
|
||||||
|
export NODE_ZONES="eu-central-1a"
|
||||||
|
export AWS_REGION="eu-central-1"
|
|
@ -0,0 +1,4 @@
|
||||||
|
. conf/kubernetes-e2e-upup-aws
|
||||||
|
|
||||||
|
export NODE_ZONES="eu-west-1a,eu-west-1b,eu-west-1c"
|
||||||
|
export AWS_REGION="eu-west-1"
|
|
@ -0,0 +1,2 @@
|
||||||
|
# Used to set site-specific settings
|
||||||
|
# Most of these probably are better in the Makefile
|
|
@ -0,0 +1,215 @@
|
||||||
|
#!/bin/bash -ex
|
||||||
|
|
||||||
|
if [[ -z "${JOB_NAME}" ]]; then
|
||||||
|
echo "Must specify JOB_NAME env var"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ -z "${KUBERNETES_VERSION}" ]]; then
|
||||||
|
echo "Must specify KUBERNETES_VERSION env var"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ -z "${DNS_DOMAIN}" ]]; then
|
||||||
|
echo "Must specify DNS_DOMAIN env var"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ -z "${KOPS_STATE_STORE}" ]]; then
|
||||||
|
echo "Must specify KOPS_STATE_STORE env var"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# TODO: Maybe skip if we don't want to upload logs?
|
||||||
|
if [[ -z "${JENKINS_GCS_LOGS_PATH}" ]]; then
|
||||||
|
echo "Must specify JENKINS_GCS_LOGS_PATH env var"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "JOB_NAME=${JOB_NAME}"
|
||||||
|
echo "Loading conf/${JOB_NAME}"
|
||||||
|
|
||||||
|
. conf/${JOB_NAME}
|
||||||
|
|
||||||
|
echo "Loading conf/cloud/${KUBERNETES_PROVIDER}"
|
||||||
|
. conf/cloud/${KUBERNETES_PROVIDER}
|
||||||
|
|
||||||
|
echo "Loading conf/site"
|
||||||
|
. conf/site
|
||||||
|
|
||||||
|
##=============================================================
|
||||||
|
# Global settings
|
||||||
|
export KUBE_GCS_RELEASE_BUCKET=kubernetes-release
|
||||||
|
|
||||||
|
# We download the binaries ourselves
|
||||||
|
# TODO: No way to tell e2e to use a particular release?
|
||||||
|
# TODO: Maybe download and then bring up the cluster?
|
||||||
|
export JENKINS_USE_EXISTING_BINARIES=y
|
||||||
|
|
||||||
|
# This actually just skips kube-up master detection
|
||||||
|
export KUBERNETES_CONFORMANCE_TEST=y
|
||||||
|
|
||||||
|
##=============================================================
|
||||||
|
# System settings (emulate jenkins)
|
||||||
|
export USER=root
|
||||||
|
export WORKSPACE=$HOME
|
||||||
|
# Nothing should want Jenkins $HOME
|
||||||
|
export HOME=${WORKSPACE}
|
||||||
|
export BUILD_NUMBER=`date -u +%Y%m%d%H%M%S`
|
||||||
|
export JENKINS_HOME=${HOME}
|
||||||
|
|
||||||
|
# We'll directly up & down the cluster
|
||||||
|
export E2E_UP="${E2E_UP:-false}"
|
||||||
|
export E2E_TEST="${E2E_TEST:-true}"
|
||||||
|
export E2E_DOWN="${E2E_DOWN:-false}"
|
||||||
|
|
||||||
|
# Skip gcloud update checking
|
||||||
|
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true
|
||||||
|
|
||||||
|
|
||||||
|
##=============================================================
|
||||||
|
|
||||||
|
branch=master
|
||||||
|
|
||||||
|
build_dir=${JENKINS_HOME}/jobs/${JOB_NAME}/builds/${BUILD_NUMBER}/
|
||||||
|
rm -rf ${build_dir}
|
||||||
|
mkdir -p ${build_dir}/workspace
|
||||||
|
|
||||||
|
cd ${build_dir}/workspace
|
||||||
|
|
||||||
|
# Sanity check
|
||||||
|
#gsutil ls ${JENKINS_GCS_LOGS_PATH}
|
||||||
|
|
||||||
|
exit_code=0
|
||||||
|
SECONDS=0 # magic bash timer variable
|
||||||
|
curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" > /tmp/e2e.sh
|
||||||
|
chmod +x /tmp/e2e.sh
|
||||||
|
|
||||||
|
# We need kubectl to write kubecfg from kops
|
||||||
|
curl -fsS --retry 3 "https://storage.googleapis.com/kubernetes-release/release/v1.3.5/bin/linux/amd64/kubectl" > /usr/local/bin/kubectl
|
||||||
|
chmod +x /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
curl -fsS --retry 3 "https://kubeupv2.s3.amazonaws.com/kops/kops-1.3.tar.gz" > /tmp/kops.tar.gz
|
||||||
|
tar zxf /tmp/kops.tar.gz -C /opt
|
||||||
|
|
||||||
|
if [[ ! -e ${AWS_SSH_KEY} ]]; then
|
||||||
|
echo "Creating ssh key ${AWS_SSH_KEY}"
|
||||||
|
ssh-keygen -N "" -t rsa -f ${AWS_SSH_KEY}
|
||||||
|
fi
|
||||||
|
|
||||||
|
function fetch_tars_from_gcs() {
|
||||||
|
local -r bucket="${1}"
|
||||||
|
local -r build_version="${2}"
|
||||||
|
echo "Pulling binaries from GCS; using server version ${bucket}/${build_version}."
|
||||||
|
gsutil -mq cp \
|
||||||
|
"gs://${KUBE_GCS_RELEASE_BUCKET}/${bucket}/${build_version}/kubernetes.tar.gz" \
|
||||||
|
"gs://${KUBE_GCS_RELEASE_BUCKET}/${bucket}/${build_version}/kubernetes-test.tar.gz" \
|
||||||
|
.
|
||||||
|
}
|
||||||
|
|
||||||
|
function unpack_binaries() {
|
||||||
|
md5sum kubernetes*.tar.gz
|
||||||
|
tar -xzf kubernetes.tar.gz
|
||||||
|
tar -xzf kubernetes-test.tar.gz
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fetch_tars_from_gcs release ${KUBERNETES_VERSION}
|
||||||
|
unpack_binaries
|
||||||
|
|
||||||
|
# Clean up everything when we're done
|
||||||
|
function finish {
|
||||||
|
/opt/kops/kops delete cluster \
|
||||||
|
--name ${JOB_NAME}.${DNS_DOMAIN} \
|
||||||
|
--yes 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
}
|
||||||
|
trap finish EXIT
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Create the cluster spec
|
||||||
|
pushd /opt/kops
|
||||||
|
/opt/kops/kops create cluster \
|
||||||
|
--name ${JOB_NAME}.${DNS_DOMAIN} \
|
||||||
|
--cloud ${KUBERNETES_PROVIDER} \
|
||||||
|
--zones ${NODE_ZONES} \
|
||||||
|
--node-size ${NODE_SIZE} \
|
||||||
|
--master-size ${MASTER_SIZE} \
|
||||||
|
--ssh-public-key ${AWS_SSH_KEY}.pub \
|
||||||
|
--kubernetes-version ${KUBERNETES_VERSION} \
|
||||||
|
--v=4 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
exit_code=${PIPESTATUS[0]}
|
||||||
|
popd
|
||||||
|
|
||||||
|
# Apply the cluster spec
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
pushd /opt/kops
|
||||||
|
/opt/kops/kops update cluster \
|
||||||
|
--name ${JOB_NAME}.${DNS_DOMAIN} \
|
||||||
|
--yes --v=4 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
exit_code=${PIPESTATUS[0]}
|
||||||
|
popd
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for kubectl to begin responding (at least master up)
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
attempt=0
|
||||||
|
while true; do
|
||||||
|
kubectl get nodes --show-labels 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
exit_code=${PIPESTATUS[0]}
|
||||||
|
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if (( attempt > 60 )); then
|
||||||
|
echo "Unable to connect to API in 15 minutes (master did not launch?)"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
attempt=$(($attempt+1))
|
||||||
|
sleep 15
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TODO: can we get rid of this?
|
||||||
|
echo "API responded; waiting 450 seconds for DNS to settle"
|
||||||
|
for ((i=1;i<=15;i++)); do
|
||||||
|
kubectl get nodes --show-labels 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
sleep 30
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# Run e2e tests
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
/tmp/e2e.sh 2>&1 | tee -a ${build_dir}/build-log.txt
|
||||||
|
exit_code=${PIPESTATUS[0]}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try to clean up normally so it goes into the logs
|
||||||
|
# (we have an exit hook for abnormal termination, but that does not get logged)
|
||||||
|
finish
|
||||||
|
|
||||||
|
duration=$SECONDS
|
||||||
|
set +e
|
||||||
|
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
success="true"
|
||||||
|
else
|
||||||
|
success="false"
|
||||||
|
fi
|
||||||
|
|
||||||
|
version=`cat kubernetes/version`
|
||||||
|
|
||||||
|
gcs_acl="public-read"
|
||||||
|
gcs_job_path="${JENKINS_GCS_LOGS_PATH}/${JOB_NAME}"
|
||||||
|
gcs_build_path="${gcs_job_path}/${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
gsutil -q cp -a "${gcs_acl}" -z txt "${build_dir}/build-log.txt" "${gcs_build_path}/"
|
||||||
|
|
||||||
|
curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh" | bash -
|
||||||
|
|
||||||
|
|
||||||
|
curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-finished.sh" > upload-finished.sh
|
||||||
|
chmod +x upload-finished.sh
|
||||||
|
|
||||||
|
if [[ ${exit_code} == 0 ]]; then
|
||||||
|
./upload-finished.sh SUCCESS
|
||||||
|
else
|
||||||
|
./upload-finished.sh UNSTABLE
|
||||||
|
fi
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
FROM debian:jessie
|
||||||
|
|
||||||
|
# Install packages:
|
||||||
|
# curl (to download golang)
|
||||||
|
# git (for getting the current head)
|
||||||
|
# gcc make (for compilation)
|
||||||
|
RUN apt-get update && apt-get install --yes curl git gcc make
|
||||||
|
|
||||||
|
# Install golang
|
||||||
|
RUN curl -L https://storage.googleapis.com/golang/go1.6.3.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||||
|
ENV PATH $PATH:/usr/local/go/bin
|
||||||
|
|
||||||
|
COPY images/nodeup-builder/onbuild.sh /onbuild.sh
|
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
mkdir -p /go
|
||||||
|
export GOPATH=/go
|
||||||
|
|
||||||
|
mkdir -p /go/src/k8s.io
|
||||||
|
ln -s /src/ /go/src/k8s.io/kops
|
||||||
|
|
||||||
|
cd /go/src/k8s.io/kops/
|
||||||
|
make nodeup-gocode
|
||||||
|
|
||||||
|
mkdir -p /src/.build/artifacts/
|
||||||
|
cp /go/bin/nodeup /src/.build/artifacts/
|
|
@ -7,7 +7,7 @@ FROM debian:jessie
|
||||||
RUN apt-get update && apt-get install --yes curl git gcc make
|
RUN apt-get update && apt-get install --yes curl git gcc make
|
||||||
|
|
||||||
# Install golang
|
# Install golang
|
||||||
RUN curl -L https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz | tar zx -C /usr/local
|
RUN curl -L https://storage.googleapis.com/golang/go1.6.3.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||||
ENV PATH $PATH:/usr/local/go/bin
|
ENV PATH $PATH:/usr/local/go/bin
|
||||||
|
|
||||||
COPY images/protokube-builder/onbuild.sh /onbuild.sh
|
COPY images/protokube-builder/onbuild.sh /onbuild.sh
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
# ASG for master
|
# ASG for master
|
||||||
launchConfiguration/{{ $m.Name }}.masters.{{ ClusterName }}:
|
launchConfiguration/{{ $m.Name }}.masters.{{ ClusterName }}:
|
||||||
sshKey: sshKey/{{ ClusterName }}
|
sshKey: sshKey/{{ SSHKeyName }}
|
||||||
securityGroups:
|
securityGroups:
|
||||||
- securityGroup/masters.{{ ClusterName }}
|
- securityGroup/masters.{{ ClusterName }}
|
||||||
iamInstanceProfile: iamInstanceProfile/masters.{{ ClusterName }}
|
iamInstanceProfile: iamInstanceProfile/masters.{{ ClusterName }}
|
||||||
|
|
|
@ -28,7 +28,7 @@ instance/master.{{ ClusterName }}:
|
||||||
{{ if not (HasTag "_master_lb") }}
|
{{ if not (HasTag "_master_lb") }}
|
||||||
k8s.io/dns/public: "api.{{ ClusterName }}"
|
k8s.io/dns/public: "api.{{ ClusterName }}"
|
||||||
{{ end }}
|
{{ end }}
|
||||||
sshKey: sshKey/{{ ClusterName }}
|
sshKey: sshKey/{{ SSHKeyName }}
|
||||||
securityGroups:
|
securityGroups:
|
||||||
- securityGroup/master.{{ ClusterName }}
|
- securityGroup/master.{{ ClusterName }}
|
||||||
iamInstanceProfile: iamInstanceProfile/master.{{ ClusterName }}
|
iamInstanceProfile: iamInstanceProfile/master.{{ ClusterName }}
|
||||||
|
|
|
@ -46,7 +46,7 @@ securityGroupRule/all-node-to-master:
|
||||||
|
|
||||||
# LaunchConfiguration & ASG for nodes
|
# LaunchConfiguration & ASG for nodes
|
||||||
launchConfiguration/{{ $nodeset.Name }}.{{ ClusterName }}:
|
launchConfiguration/{{ $nodeset.Name }}.{{ ClusterName }}:
|
||||||
sshKey: sshKey/{{ ClusterName }}
|
sshKey: sshKey/{{ SSHKeyName }}
|
||||||
securityGroups:
|
securityGroups:
|
||||||
- securityGroup/nodes.{{ ClusterName }}
|
- securityGroup/nodes.{{ ClusterName }}
|
||||||
iamInstanceProfile: iamInstanceProfile/nodes.{{ ClusterName }}
|
iamInstanceProfile: iamInstanceProfile/nodes.{{ ClusterName }}
|
||||||
|
|
|
@ -113,7 +113,7 @@ echo "== nodeup node config starting =="
|
||||||
ensure-install-dir
|
ensure-install-dir
|
||||||
|
|
||||||
cat > kube_env.yaml << __EOF_KUBE_ENV
|
cat > kube_env.yaml << __EOF_KUBE_ENV
|
||||||
{{ RenderResource "resources/config.yaml" Args }}
|
{{ RenderNodeUpConfig Args }}
|
||||||
__EOF_KUBE_ENV
|
__EOF_KUBE_ENV
|
||||||
|
|
||||||
download-release
|
download-release
|
||||||
|
|
|
@ -1,3 +1,2 @@
|
||||||
sshKey/{{ ClusterName }}:
|
sshKey/{{ SSHKeyName}}:
|
||||||
name: kubernetes.{{ClusterName}}
|
|
||||||
publicKey: resources/ssh-public-key
|
publicKey: resources/ssh-public-key
|
||||||
|
|
|
@ -1,13 +1 @@
|
||||||
Tags:
|
{{ RenderNodeUpConfig Args }}
|
||||||
{{ range $tag := ComputeNodeTags Args }}
|
|
||||||
- {{ $tag }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
|
|
||||||
Assets:
|
|
||||||
{{ range $asset := Assets }}
|
|
||||||
- {{ $asset }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
ClusterName: {{ ClusterName }}
|
|
||||||
ClusterLocation: {{ ClusterLocation }}
|
|
|
@ -1,5 +0,0 @@
|
||||||
{% set log_level = "--log-level=warn" -%}
|
|
||||||
{% if pillar['docker_test_log_level'] is defined -%}
|
|
||||||
{% set log_level = pillar['docker_test_log_level'] -%}
|
|
||||||
{% endif -%}
|
|
||||||
docker.bridge=
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
Docker:
|
||||||
|
Bridge: ""
|
|
@ -14,4 +14,4 @@ KubeAPIServer:
|
||||||
TokenAuthFile: /srv/kubernetes/known_tokens.csv
|
TokenAuthFile: /srv/kubernetes/known_tokens.csv
|
||||||
LogLevel: 2
|
LogLevel: 2
|
||||||
AllowPrivileged: true
|
AllowPrivileged: true
|
||||||
Image: gcr.io/google_containers/kube-apiserver:v{{ .KubernetesVersion }}
|
Image: {{ Image "kube-apiserver" }}
|
|
@ -0,0 +1,2 @@
|
||||||
|
KubeControllerManager:
|
||||||
|
ConfigureCloudRoutes: true
|
|
@ -0,0 +1,2 @@
|
||||||
|
KubeControllerManager:
|
||||||
|
ConfigureCloudRoutes: false
|
|
@ -0,0 +1,2 @@
|
||||||
|
KubeControllerManager:
|
||||||
|
ConfigureCloudRoutes: true
|
|
@ -2,11 +2,12 @@ KubeControllerManager:
|
||||||
PathSrvKubernetes: /srv/kubernetes
|
PathSrvKubernetes: /srv/kubernetes
|
||||||
Master: 127.0.0.1:8080
|
Master: 127.0.0.1:8080
|
||||||
AllocateNodeCIDRs: true
|
AllocateNodeCIDRs: true
|
||||||
|
ConfigureCloudRoutes: false
|
||||||
ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key
|
ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key
|
||||||
LogLevel: 2
|
LogLevel: 2
|
||||||
RootCAFile: /srv/kubernetes/ca.crt
|
RootCAFile: /srv/kubernetes/ca.crt
|
||||||
ClusterName: {{ ClusterName }}
|
ClusterName: {{ ClusterName }}
|
||||||
Image: gcr.io/google_containers/kube-controller-manager:v{{ .KubernetesVersion }}
|
Image: {{ Image "kube-controller-manager" }}
|
||||||
# Doesn't seem to be any real downside to always doing a leader election
|
# Doesn't seem to be any real downside to always doing a leader election
|
||||||
LeaderElection:
|
LeaderElection:
|
||||||
LeaderElect: true
|
LeaderElect: true
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
KubeDNS:
|
KubeDNS:
|
||||||
Replicas: 1
|
Replicas: 2
|
||||||
ServerIP: {{ WellKnownServiceIP 10 }}
|
ServerIP: {{ WellKnownServiceIP 10 }}
|
||||||
Domain: {{ .ClusterDNSDomain }}
|
Domain: {{ .ClusterDNSDomain }}
|
||||||
|
# TODO: Once we start shipping more images, start using them
|
||||||
|
Image: gcr.io/google_containers/kubedns-amd64:1.3
|
||||||
|
|
|
@ -7,6 +7,6 @@ KubeProxy:
|
||||||
# requests of other per-node add-ons (e.g. fluentd).
|
# requests of other per-node add-ons (e.g. fluentd).
|
||||||
CPURequest: 20m
|
CPURequest: 20m
|
||||||
|
|
||||||
Image: gcr.io/google_containers/kube-proxy:v{{ .KubernetesVersion }}
|
Image: {{ Image "kube-proxy" }}
|
||||||
|
|
||||||
Master: https://{{ .MasterInternalName }}
|
Master: https://{{ .MasterInternalName }}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
KubeScheduler:
|
KubeScheduler:
|
||||||
Master: 127.0.0.1:8080
|
Master: 127.0.0.1:8080
|
||||||
LogLevel: 2
|
LogLevel: 2
|
||||||
Image: gcr.io/google_containers/kube-scheduler:v{{ .KubernetesVersion }}
|
Image: {{ Image "kube-scheduler" }}
|
||||||
# Doesn't seem to be any real downside to always doing a leader election
|
# Doesn't seem to be any real downside to always doing a leader election
|
||||||
LeaderElection:
|
LeaderElection:
|
||||||
LeaderElect: true
|
LeaderElect: true
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
Kubelet:
|
||||||
|
NetworkPluginName: kubenet
|
||||||
|
ReconcileCIDR: true
|
|
@ -5,8 +5,8 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=/etc/sysconfig/protokube
|
EnvironmentFile=/etc/sysconfig/protokube
|
||||||
ExecStartPre=/usr/bin/docker pull kope/protokube:1.3
|
ExecStartPre=/usr/bin/docker pull {{ ProtokubeImage }}
|
||||||
ExecStart=/usr/bin/docker run -v /:/rootfs/ --privileged kope/protokube:1.3 /usr/bin/protokube "$DAEMON_ARGS"
|
ExecStart=/usr/bin/docker run -v /:/rootfs/ --privileged {{ ProtokubeImage }} /usr/bin/protokube "$DAEMON_ARGS"
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2s
|
RestartSec=2s
|
||||||
StartLimitInterval=0
|
StartLimitInterval=0
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"manageState": false
|
|
||||||
}
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
{
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"mode": "0755"
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
{
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"mode": "0755"
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
{
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"mode": "0755"
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
{
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"mode": "0755"
|
||||||
|
}
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||||
k8sapi "k8s.io/kubernetes/pkg/api"
|
k8sapi "k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"net"
|
"net"
|
||||||
|
@ -76,8 +77,6 @@ type ClusterSpec struct {
|
||||||
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
|
// ClusterName is a unique identifier for the cluster, and currently must be a DNS name
|
||||||
//ClusterName string `json:",omitempty"`
|
//ClusterName string `json:",omitempty"`
|
||||||
|
|
||||||
//AllocateNodeCIDRs *bool `json:"allocateNodeCIDRs,omitempty"`
|
|
||||||
|
|
||||||
Multizone *bool `json:"multizone,omitempty"`
|
Multizone *bool `json:"multizone,omitempty"`
|
||||||
|
|
||||||
//ClusterIPRange string `json:",omitempty"`
|
//ClusterIPRange string `json:",omitempty"`
|
||||||
|
@ -103,8 +102,6 @@ type ClusterSpec struct {
|
||||||
// * enable debugging handlers on the master, so kubectl logs works
|
// * enable debugging handlers on the master, so kubectl logs works
|
||||||
IsolateMasters *bool `json:"isolateMasters,omitempty"`
|
IsolateMasters *bool `json:"isolateMasters,omitempty"`
|
||||||
|
|
||||||
//NetworkProvider string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
//HairpinMode string `json:",omitempty"`
|
//HairpinMode string `json:",omitempty"`
|
||||||
//
|
//
|
||||||
//OpencontrailTag string `json:",omitempty"`
|
//OpencontrailTag string `json:",omitempty"`
|
||||||
|
@ -209,9 +206,15 @@ type ClusterSpec struct {
|
||||||
KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty"`
|
KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty"`
|
||||||
Kubelet *KubeletConfig `json:"kubelet,omitempty"`
|
Kubelet *KubeletConfig `json:"kubelet,omitempty"`
|
||||||
MasterKubelet *KubeletConfig `json:"masterKubelet,omitempty"`
|
MasterKubelet *KubeletConfig `json:"masterKubelet,omitempty"`
|
||||||
|
|
||||||
|
// Networking configuration
|
||||||
|
Networking *NetworkingSpec `json:"networking,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type KubeDNSConfig struct {
|
type KubeDNSConfig struct {
|
||||||
|
// Image is the name of the docker image to run
|
||||||
|
Image string `json:"image,omitempty"`
|
||||||
|
|
||||||
Replicas int `json:"replicas,omitempty"`
|
Replicas int `json:"replicas,omitempty"`
|
||||||
Domain string `json:"domain,omitempty"`
|
Domain string `json:"domain,omitempty"`
|
||||||
ServerIP string `json:"serverIP,omitempty"`
|
ServerIP string `json:"serverIP,omitempty"`
|
||||||
|
@ -265,6 +268,7 @@ type ClusterZoneSpec struct {
|
||||||
// PerformAssignments populates values that are required and immutable
|
// PerformAssignments populates values that are required and immutable
|
||||||
// For example, it assigns stable Keys to NodeSets & Masters, and
|
// For example, it assigns stable Keys to NodeSets & Masters, and
|
||||||
// it assigns CIDRs to subnets
|
// it assigns CIDRs to subnets
|
||||||
|
// We also assign KubernetesVersion, because we want it to be explicit
|
||||||
func (c *Cluster) PerformAssignments() error {
|
func (c *Cluster) PerformAssignments() error {
|
||||||
if c.Spec.NetworkCIDR == "" && !c.SharedVPC() {
|
if c.Spec.NetworkCIDR == "" && !c.SharedVPC() {
|
||||||
// TODO: Choose non-overlapping networking CIDRs for VPCs?
|
// TODO: Choose non-overlapping networking CIDRs for VPCs?
|
||||||
|
@ -286,6 +290,11 @@ func (c *Cluster) PerformAssignments() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err := c.ensureKubernetesVersion()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,9 +306,55 @@ func (c *Cluster) FillDefaults() error {
|
||||||
c.Spec.AdminAccess = append(c.Spec.AdminAccess, "0.0.0.0/0")
|
c.Spec.AdminAccess = append(c.Spec.AdminAccess, "0.0.0.0/0")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Spec.Networking == nil {
|
||||||
|
c.Spec.Networking = &NetworkingSpec{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.Networking.Classic != nil {
|
||||||
|
// OK
|
||||||
|
} else if c.Spec.Networking.Kubenet != nil {
|
||||||
|
// OK
|
||||||
|
} else if c.Spec.Networking.External != nil {
|
||||||
|
// OK
|
||||||
|
} else {
|
||||||
|
// No networking model selected; choose Classic
|
||||||
|
c.Spec.Networking.Classic = &ClassicNetworkingSpec{}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.ensureKubernetesVersion()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensureKubernetesVersion populates KubernetesVersion, if it is not already set
|
||||||
|
// It will be populated with the latest stable kubernetes version
|
||||||
|
func (c *Cluster) ensureKubernetesVersion() error {
|
||||||
|
if c.Spec.KubernetesVersion == "" {
|
||||||
|
latestVersion, err := FindLatestKubernetesVersion()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.Infof("Using kubernetes latest stable version: %s", latestVersion)
|
||||||
|
c.Spec.KubernetesVersion = latestVersion
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLatestKubernetesVersion returns the latest kubernetes version,
|
||||||
|
// as stored at https://storage.googleapis.com/kubernetes-release/release/stable.txt
|
||||||
|
func FindLatestKubernetesVersion() (string, error) {
|
||||||
|
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
|
||||||
|
b, err := vfs.Context.ReadFile(stableURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("KubernetesVersion not specified, and unable to download latest version from %q: %v", stableURL, err)
|
||||||
|
}
|
||||||
|
latestVersion := strings.TrimSpace(string(b))
|
||||||
|
return latestVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (z *ClusterZoneSpec) performAssignments(c *Cluster) error {
|
func (z *ClusterZoneSpec) performAssignments(c *Cluster) error {
|
||||||
if z.CIDR == "" {
|
if z.CIDR == "" {
|
||||||
cidr, err := z.assignCIDR(c)
|
cidr, err := z.assignCIDR(c)
|
||||||
|
|
|
@ -147,9 +147,9 @@ type KubeletConfig struct {
|
||||||
//LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
|
//LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
|
||||||
//// How frequently to calculate and cache volume disk usage for all pods
|
//// How frequently to calculate and cache volume disk usage for all pods
|
||||||
//VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
|
//VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
|
||||||
//// networkPluginName is the name of the network plugin to be invoked for
|
// networkPluginName is the name of the network plugin to be invoked for
|
||||||
//// various events in kubelet/pod lifecycle
|
// various events in kubelet/pod lifecycle
|
||||||
//NetworkPluginName string `json:"networkPluginName"`
|
NetworkPluginName string `json:"networkPluginName,omitempty" flag:"network-plugin"`
|
||||||
//// networkPluginDir is the full path of the directory in which to search
|
//// networkPluginDir is the full path of the directory in which to search
|
||||||
//// for network plugins
|
//// for network plugins
|
||||||
//NetworkPluginDir string `json:"networkPluginDir"`
|
//NetworkPluginDir string `json:"networkPluginDir"`
|
||||||
|
@ -337,11 +337,13 @@ type KubeProxyConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DockerConfig struct {
|
type DockerConfig struct {
|
||||||
Bridge string `json:"bridge,omitempty" flag:"bridge"`
|
Bridge string `json:"bridge,omitempty" flag:"bridge"`
|
||||||
LogLevel string `json:"logLevel,omitempty" flag:"log-level"`
|
LogLevel string `json:"logLevel,omitempty" flag:"log-level"`
|
||||||
IPTables bool `json:"ipTables,omitempty" flag:"iptables"`
|
IPTables *bool `json:"ipTables,omitempty" flag:"iptables"`
|
||||||
IPMasq bool `json:"ipMasq,omitempty" flag:"ip-masq"`
|
IPMasq *bool `json:"ipMasq,omitempty" flag:"ip-masq"`
|
||||||
Storage string `json:"storage,omitempty" flag:"s"`
|
Storage string `json:"storage,omitempty" flag:"storage-driver"`
|
||||||
|
InsecureRegistry string `json:"insecureRegistry,omitempty" flag:"insecure-registry"`
|
||||||
|
MTU *int `json:"mtu,omitempty" flag:"mtu"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type KubeAPIServerConfig struct {
|
type KubeAPIServerConfig struct {
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// AnnotationNameManagement is the annotation that indicates that a cluster is under external or non-standard management
|
||||||
|
const AnnotationNameManagement = "kops.kubernetes.io/management"
|
||||||
|
|
||||||
|
// AnnotationValueManagementImported is the annotation value that indicates a cluster was imported, typically as part of an upgrade
|
||||||
|
const AnnotationValueManagementImported = "imported"
|
|
@ -0,0 +1,22 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// NetworkingSpec allows selection and configuration of a networking plugin
|
||||||
|
type NetworkingSpec struct {
|
||||||
|
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
|
||||||
|
Kubenet *KubenetNetworkingSpec `json:"kubenet,omitempty"`
|
||||||
|
External *ExternalNetworkingSpec `json:"external,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClassicNetworkingSpec is the specification of classic networking mode, integrated into kubernetes
|
||||||
|
type ClassicNetworkingSpec struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// KubenetNetworkingSpec is the specification for kubenet networking, largely integrated but intended to replace classic
|
||||||
|
type KubenetNetworkingSpec struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalNetworkingSpec is the specification for networking that is implemented by a Daemonset
|
||||||
|
// Networking is not managed by kops - we can create options here that directly configure e.g. weave
|
||||||
|
// but this is useful for arbitrary network modes or for modes that don't need additional configuration.
|
||||||
|
type ExternalNetworkingSpec struct {
|
||||||
|
}
|
|
@ -2,8 +2,10 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/kubernetes/pkg/util/validation"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Cluster) Validate(strict bool) error {
|
func (c *Cluster) Validate(strict bool) error {
|
||||||
|
@ -13,6 +15,18 @@ func (c *Cluster) Validate(strict bool) error {
|
||||||
return fmt.Errorf("Cluster Name is required (e.g. --name=mycluster.myzone.com)")
|
return fmt.Errorf("Cluster Name is required (e.g. --name=mycluster.myzone.com)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Must be a dns name
|
||||||
|
errs := validation.IsDNS1123Subdomain(c.Name)
|
||||||
|
if len(errs) != 0 {
|
||||||
|
return fmt.Errorf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(c.Name, ".") {
|
||||||
|
return fmt.Errorf("Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(c.Spec.Zones) == 0 {
|
if len(c.Spec.Zones) == 0 {
|
||||||
// TODO: Auto choose zones from region?
|
// TODO: Auto choose zones from region?
|
||||||
return fmt.Errorf("must configure at least one Zone (use --zones)")
|
return fmt.Errorf("must configure at least one Zone (use --zones)")
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/kubernetes/pkg/util/validation"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_Validate_DNS(t *testing.T) {
|
||||||
|
for _, name := range []string{"test.-", "!", "-"} {
|
||||||
|
errs := validation.IsDNS1123Subdomain(name)
|
||||||
|
if len(errs) == 0 {
|
||||||
|
t.Fatalf("Expected errors validating name %q", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -69,13 +69,13 @@ func (r *assetResource) GetSource() *Source {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AssetStore struct {
|
type AssetStore struct {
|
||||||
assetDir string
|
cacheDir string
|
||||||
assets []*asset
|
assets []*asset
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAssetStore(assetDir string) *AssetStore {
|
func NewAssetStore(cacheDir string) *AssetStore {
|
||||||
a := &AssetStore{
|
a := &AssetStore{
|
||||||
assetDir: assetDir,
|
cacheDir: cacheDir,
|
||||||
}
|
}
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -163,7 +163,7 @@ func (a *AssetStore) addURL(url string, hash *hashing.Hash) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
localFile := path.Join(a.assetDir, hash.String()+"_"+utils.SanitizeString(url))
|
localFile := path.Join(a.cacheDir, hash.String()+"_"+utils.SanitizeString(url))
|
||||||
_, err = DownloadURL(url, localFile, hash)
|
_, err = DownloadURL(url, localFile, hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -237,7 +237,7 @@ func (a *AssetStore) addURL(url string, hash *hashing.Hash) error {
|
||||||
//}
|
//}
|
||||||
|
|
||||||
func (a *AssetStore) addArchive(archiveSource *Source, archiveFile string) error {
|
func (a *AssetStore) addArchive(archiveSource *Source, archiveFile string) error {
|
||||||
extracted := path.Join(a.assetDir, "extracted/"+path.Base(archiveFile))
|
extracted := path.Join(a.cacheDir, "extracted/"+path.Base(archiveFile))
|
||||||
|
|
||||||
// TODO: Use a temp file so this is atomic
|
// TODO: Use a temp file so this is atomic
|
||||||
if _, err := os.Stat(extracted); os.IsNotExist(err) {
|
if _, err := os.Stat(extracted); os.IsNotExist(err) {
|
||||||
|
|
|
@ -32,6 +32,9 @@ const (
|
||||||
SecretTypeSSHPublicKey = "SSHPublicKey"
|
SecretTypeSSHPublicKey = "SSHPublicKey"
|
||||||
SecretTypeKeypair = "Keypair"
|
SecretTypeKeypair = "Keypair"
|
||||||
SecretTypeSecret = "Secret"
|
SecretTypeSecret = "Secret"
|
||||||
|
|
||||||
|
// Name for the primary SSH key
|
||||||
|
SecretNameSSHPrimary = "admin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type KeystoreItem struct {
|
type KeystoreItem struct {
|
||||||
|
|
|
@ -3,7 +3,6 @@ package cloudup
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"io/ioutil"
|
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||||
|
@ -12,11 +11,16 @@ import (
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
|
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
|
||||||
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
|
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
|
||||||
"k8s.io/kops/upup/pkg/fi/fitasks"
|
"k8s.io/kops/upup/pkg/fi/fitasks"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/hashing"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/nodeup"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const MaxAttemptsWithNoProgress = 3
|
||||||
|
|
||||||
type ApplyClusterCmd struct {
|
type ApplyClusterCmd struct {
|
||||||
Cluster *api.Cluster
|
Cluster *api.Cluster
|
||||||
|
|
||||||
|
@ -28,12 +32,12 @@ type ApplyClusterCmd struct {
|
||||||
// Models is a list of cloudup models to apply
|
// Models is a list of cloudup models to apply
|
||||||
Models []string
|
Models []string
|
||||||
|
|
||||||
// Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
|
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
|
||||||
Target string
|
TargetName string
|
||||||
//// The node model to use
|
|
||||||
//NodeModel string
|
// Target is the fi.Target we will operate against
|
||||||
// The SSH public key (file) to use
|
Target fi.Target
|
||||||
SSHPublicKey string
|
|
||||||
// OutDir is a local directory in which we place output, can cache files etc
|
// OutDir is a local directory in which we place output, can cache files etc
|
||||||
OutDir string
|
OutDir string
|
||||||
|
|
||||||
|
@ -56,10 +60,6 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Make these configurable?
|
|
||||||
useMasterASG := true
|
|
||||||
useMasterLB := false
|
|
||||||
|
|
||||||
err = api.DeepValidate(c.Cluster, c.InstanceGroups, true)
|
err = api.DeepValidate(c.Cluster, c.InstanceGroups, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -78,8 +78,6 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
return fmt.Errorf("ClusterRegistry is required")
|
return fmt.Errorf("ClusterRegistry is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := make(map[string]struct{})
|
|
||||||
|
|
||||||
l := &Loader{}
|
l := &Loader{}
|
||||||
l.Init()
|
l.Init()
|
||||||
l.Cluster = c.Cluster
|
l.Cluster = c.Cluster
|
||||||
|
@ -99,8 +97,16 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Assets) == 0 {
|
if len(c.Assets) == 0 {
|
||||||
|
var baseURL string
|
||||||
|
if isBaseURL(cluster.Spec.KubernetesVersion) {
|
||||||
|
baseURL = cluster.Spec.KubernetesVersion
|
||||||
|
} else {
|
||||||
|
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + cluster.Spec.KubernetesVersion
|
||||||
|
}
|
||||||
|
baseURL = strings.TrimSuffix(baseURL, "/")
|
||||||
|
|
||||||
{
|
{
|
||||||
defaultKubeletAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubelet", cluster.Spec.KubernetesVersion)
|
defaultKubeletAsset := baseURL + "/bin/linux/amd64/kubelet"
|
||||||
glog.Infof("Adding default kubelet release asset: %s", defaultKubeletAsset)
|
glog.Infof("Adding default kubelet release asset: %s", defaultKubeletAsset)
|
||||||
|
|
||||||
hash, err := findHash(defaultKubeletAsset)
|
hash, err := findHash(defaultKubeletAsset)
|
||||||
|
@ -111,7 +117,7 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
defaultKubectlAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubectl", cluster.Spec.KubernetesVersion)
|
defaultKubectlAsset := baseURL + "/bin/linux/amd64/kubectl"
|
||||||
glog.Infof("Adding default kubectl release asset: %s", defaultKubectlAsset)
|
glog.Infof("Adding default kubectl release asset: %s", defaultKubectlAsset)
|
||||||
|
|
||||||
hash, err := findHash(defaultKubectlAsset)
|
hash, err := findHash(defaultKubectlAsset)
|
||||||
|
@ -120,39 +126,34 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
c.Assets = append(c.Assets, hash.Hex()+"@"+defaultKubectlAsset)
|
c.Assets = append(c.Assets, hash.Hex()+"@"+defaultKubectlAsset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if usesCNI(cluster) {
|
||||||
|
defaultCNIAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-8a936732094c0941e1543ef5d292a1f4fffa1ac5.tar.gz")
|
||||||
|
glog.Infof("Adding default CNI asset: %s", defaultCNIAsset)
|
||||||
|
|
||||||
|
hashString := "86966c78cc9265ee23f7892c5cad0ec7590cec93"
|
||||||
|
//hash, err := findHash(defaultCNIAsset)
|
||||||
|
//if err != nil {
|
||||||
|
// return err
|
||||||
|
//}
|
||||||
|
//hashString := hash.Hex()
|
||||||
|
c.Assets = append(c.Assets, hashString+"@"+defaultCNIAsset)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.NodeUpSource == "" {
|
if c.NodeUpSource == "" {
|
||||||
location := "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup-1.3.tar.gz"
|
location := os.Getenv("NODEUP_URL")
|
||||||
glog.Infof("Using default nodeup location: %q", location)
|
if location == "" {
|
||||||
|
location = "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup-1.3.tar.gz"
|
||||||
|
glog.Infof("Using default nodeup location: %q", location)
|
||||||
|
} else {
|
||||||
|
glog.Warningf("Using nodeup location from NODEUP_URL env var: %q", location)
|
||||||
|
}
|
||||||
c.NodeUpSource = location
|
c.NodeUpSource = location
|
||||||
}
|
}
|
||||||
|
|
||||||
checkExisting := true
|
checkExisting := true
|
||||||
|
|
||||||
var nodeUpTags []string
|
|
||||||
nodeUpTags = append(nodeUpTags, "_protokube")
|
|
||||||
|
|
||||||
if useMasterASG {
|
|
||||||
tags["_master_asg"] = struct{}{}
|
|
||||||
} else {
|
|
||||||
tags["_master_single"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if useMasterLB {
|
|
||||||
tags["_master_lb"] = struct{}{}
|
|
||||||
} else {
|
|
||||||
tags["_not_master_lb"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cluster.Spec.MasterPublicName != "" {
|
|
||||||
tags["_master_dns"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.BoolValue(cluster.Spec.IsolateMasters) {
|
|
||||||
tags["_isolate_masters"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
l.AddTypes(map[string]interface{}{
|
l.AddTypes(map[string]interface{}{
|
||||||
"keypair": &fitasks.Keypair{},
|
"keypair": &fitasks.Keypair{},
|
||||||
"secret": &fitasks.Secret{},
|
"secret": &fitasks.Secret{},
|
||||||
|
@ -166,6 +167,18 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
region := ""
|
region := ""
|
||||||
project := ""
|
project := ""
|
||||||
|
|
||||||
|
var sshPublicKeys [][]byte
|
||||||
|
{
|
||||||
|
keys, err := keyStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range keys {
|
||||||
|
sshPublicKeys = append(sshPublicKeys, k.Data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch cluster.Spec.CloudProvider {
|
switch cluster.Spec.CloudProvider {
|
||||||
case "gce":
|
case "gce":
|
||||||
{
|
{
|
||||||
|
@ -174,8 +187,6 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
project = gceCloud.Project
|
project = gceCloud.Project
|
||||||
|
|
||||||
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
||||||
tags["_gce"] = struct{}{}
|
|
||||||
nodeUpTags = append(nodeUpTags, "_gce")
|
|
||||||
|
|
||||||
l.AddTypes(map[string]interface{}{
|
l.AddTypes(map[string]interface{}{
|
||||||
"persistentDisk": &gcetasks.PersistentDisk{},
|
"persistentDisk": &gcetasks.PersistentDisk{},
|
||||||
|
@ -193,9 +204,6 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
awsCloud := cloud.(*awsup.AWSCloud)
|
awsCloud := cloud.(*awsup.AWSCloud)
|
||||||
region = awsCloud.Region
|
region = awsCloud.Region
|
||||||
|
|
||||||
tags["_aws"] = struct{}{}
|
|
||||||
nodeUpTags = append(nodeUpTags, "_aws")
|
|
||||||
|
|
||||||
l.AddTypes(map[string]interface{}{
|
l.AddTypes(map[string]interface{}{
|
||||||
// EC2
|
// EC2
|
||||||
"elasticIP": &awstasks.ElasticIP{},
|
"elasticIP": &awstasks.ElasticIP{},
|
||||||
|
@ -237,8 +245,25 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
"dnsZone": &awstasks.DNSZone{},
|
"dnsZone": &awstasks.DNSZone{},
|
||||||
})
|
})
|
||||||
|
|
||||||
if c.SSHPublicKey == "" {
|
if len(sshPublicKeys) == 0 {
|
||||||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sshPublicKeys) != 1 {
|
||||||
|
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
|
||||||
|
} else {
|
||||||
|
l.Resources["ssh-public-key"] = fi.NewStringResource(string(sshPublicKeys[0]))
|
||||||
|
|
||||||
|
// SSHKeyName computes a unique SSH key name, combining the cluster name and the SSH public key fingerprint
|
||||||
|
l.TemplateFunctions["SSHKeyName"] = func() (string, error) {
|
||||||
|
fingerprint, err := awstasks.ComputeOpenSSHKeyFingerprint(string(sshPublicKeys[0]))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := "kubernetes." + cluster.Name + "-" + fingerprint
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
|
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
|
||||||
|
@ -248,13 +273,18 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
|
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clusterTags, err := buildClusterTags(cluster)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
tf := &TemplateFunctions{
|
tf := &TemplateFunctions{
|
||||||
cluster: cluster,
|
cluster: cluster,
|
||||||
tags: tags,
|
tags: clusterTags,
|
||||||
region: region,
|
region: region,
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Tags = tags
|
l.Tags = clusterTags
|
||||||
l.WorkDir = c.OutDir
|
l.WorkDir = c.OutDir
|
||||||
l.ModelStore = modelStore
|
l.ModelStore = modelStore
|
||||||
|
|
||||||
|
@ -265,26 +295,98 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
return secretStore
|
return secretStore
|
||||||
}
|
}
|
||||||
|
|
||||||
l.TemplateFunctions["ComputeNodeTags"] = func(args []string) []string {
|
// RenderNodeUpConfig returns the NodeUp config, in YAML format
|
||||||
var tags []string
|
l.TemplateFunctions["RenderNodeUpConfig"] = func(args []string) (string, error) {
|
||||||
for _, tag := range nodeUpTags {
|
var role api.InstanceGroupRole
|
||||||
tags = append(tags, tag)
|
for _, arg := range args {
|
||||||
|
if arg == "_kubernetes_master" {
|
||||||
|
if role != "" {
|
||||||
|
return "", fmt.Errorf("found duplicate role tags in args: %v", args)
|
||||||
|
}
|
||||||
|
role = api.InstanceGroupRoleMaster
|
||||||
|
}
|
||||||
|
if arg == "_kubernetes_pool" {
|
||||||
|
if role != "" {
|
||||||
|
return "", fmt.Errorf("found duplicate role tags in args: %v", args)
|
||||||
|
}
|
||||||
|
role = api.InstanceGroupRoleNode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if role == "" {
|
||||||
|
return "", fmt.Errorf("cannot determine role from args: %v", args)
|
||||||
}
|
}
|
||||||
|
|
||||||
isMaster := false
|
nodeUpTags, err := buildNodeupTags(role, tf.cluster, tf.tags)
|
||||||
for _, arg := range args {
|
if err != nil {
|
||||||
tags = append(tags, arg)
|
return "", err
|
||||||
if arg == "_kubernetes_master" {
|
}
|
||||||
isMaster = true
|
|
||||||
|
config := &nodeup.NodeUpConfig{}
|
||||||
|
for _, tag := range args {
|
||||||
|
config.Tags = append(config.Tags, tag)
|
||||||
|
}
|
||||||
|
for _, tag := range nodeUpTags {
|
||||||
|
config.Tags = append(config.Tags, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Assets = c.Assets
|
||||||
|
|
||||||
|
config.ClusterName = cluster.Name
|
||||||
|
|
||||||
|
configPath, err := c.ClusterRegistry.ConfigurationPath(cluster.Name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
config.ClusterLocation = configPath.Path()
|
||||||
|
|
||||||
|
var images []*nodeup.Image
|
||||||
|
|
||||||
|
if isBaseURL(cluster.Spec.KubernetesVersion) {
|
||||||
|
baseURL := cluster.Spec.KubernetesVersion
|
||||||
|
baseURL = strings.TrimSuffix(baseURL, "/")
|
||||||
|
|
||||||
|
// TODO: pull kube-dns image
|
||||||
|
// When using a custom version, we want to preload the images over http
|
||||||
|
components := []string{"kube-proxy"}
|
||||||
|
if role == api.InstanceGroupRoleMaster {
|
||||||
|
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
|
||||||
|
}
|
||||||
|
for _, component := range components {
|
||||||
|
imagePath := baseURL + "/bin/linux/amd64/" + component + ".tar"
|
||||||
|
glog.Infof("Adding docker image: %s", imagePath)
|
||||||
|
|
||||||
|
hash, err := findHash(imagePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
image := &nodeup.Image{
|
||||||
|
Source: imagePath,
|
||||||
|
Hash: hash.Hex(),
|
||||||
|
}
|
||||||
|
images = append(images, image)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if isMaster && !fi.BoolValue(cluster.Spec.IsolateMasters) {
|
config.Images = images
|
||||||
// Run this master as a pool node also (start kube-proxy etc)
|
|
||||||
tags = append(tags, "_kubernetes_pool")
|
{
|
||||||
|
protokubeImage := os.Getenv("PROTOKUBE_IMAGE")
|
||||||
|
if protokubeImage != "" {
|
||||||
|
glog.Warningf("Using protokube image specified in PROTOKUBE_IMAGE env var: %q", protokubeImage)
|
||||||
|
} else {
|
||||||
|
protokubeImage = nodeup.DefaultProtokubeImage
|
||||||
|
}
|
||||||
|
config.ProtokubeImage = &nodeup.Image{
|
||||||
|
Source: protokubeImage,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tags
|
yaml, err := api.ToYaml(config)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(yaml), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//// TotalNodeCount computes the total count of nodes
|
//// TotalNodeCount computes the total count of nodes
|
||||||
|
@ -335,28 +437,9 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
l.TemplateFunctions["NodeUpSourceHash"] = func() string {
|
l.TemplateFunctions["NodeUpSourceHash"] = func() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
l.TemplateFunctions["ClusterLocation"] = func() (string, error) {
|
|
||||||
configPath, err := c.ClusterRegistry.ConfigurationPath(cluster.Name)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return configPath.Path(), nil
|
|
||||||
}
|
|
||||||
l.TemplateFunctions["Assets"] = func() []string {
|
|
||||||
return c.Assets
|
|
||||||
}
|
|
||||||
|
|
||||||
tf.AddTo(l.TemplateFunctions)
|
tf.AddTo(l.TemplateFunctions)
|
||||||
|
|
||||||
if c.SSHPublicKey != "" {
|
|
||||||
authorized, err := ioutil.ReadFile(c.SSHPublicKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
|
|
||||||
}
|
|
||||||
|
|
||||||
taskMap, err := l.BuildTasks(modelStore, c.Models)
|
taskMap, err := l.BuildTasks(modelStore, c.Models)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building tasks: %v", err)
|
return fmt.Errorf("error building tasks: %v", err)
|
||||||
|
@ -364,7 +447,7 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
|
|
||||||
var target fi.Target
|
var target fi.Target
|
||||||
|
|
||||||
switch c.Target {
|
switch c.TargetName {
|
||||||
case TargetDirect:
|
case TargetDirect:
|
||||||
switch cluster.Spec.CloudProvider {
|
switch cluster.Spec.CloudProvider {
|
||||||
case "gce":
|
case "gce":
|
||||||
|
@ -383,8 +466,9 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
case TargetDryRun:
|
case TargetDryRun:
|
||||||
target = fi.NewDryRunTarget(os.Stdout)
|
target = fi.NewDryRunTarget(os.Stdout)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported target type %q", c.Target)
|
return fmt.Errorf("unsupported target type %q", c.TargetName)
|
||||||
}
|
}
|
||||||
|
c.Target = target
|
||||||
|
|
||||||
context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
|
context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -392,7 +476,7 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
}
|
}
|
||||||
defer context.Close()
|
defer context.Close()
|
||||||
|
|
||||||
err = context.RunTasks(taskMap)
|
err = context.RunTasks(taskMap, MaxAttemptsWithNoProgress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error running tasks: %v", err)
|
return fmt.Errorf("error running tasks: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -404,3 +488,23 @@ func (c *ApplyClusterCmd) Run() error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isBaseURL(kubernetesVersion string) bool {
|
||||||
|
return strings.HasPrefix(kubernetesVersion, "http:") || strings.HasPrefix(kubernetesVersion, "https:")
|
||||||
|
}
|
||||||
|
|
||||||
|
func findHash(url string) (*hashing.Hash, error) {
|
||||||
|
for _, ext := range []string{".sha1"} {
|
||||||
|
hashURL := url + ext
|
||||||
|
b, err := vfs.Context.ReadFile(hashURL)
|
||||||
|
if err != nil {
|
||||||
|
glog.Infof("error reading hash file %q: %v", hashURL, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashString := strings.TrimSpace(string(b))
|
||||||
|
glog.Infof("Found hash %q for %q", hashString, url)
|
||||||
|
|
||||||
|
return hashing.FromString(hashString)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cannot determine hash for %v (have you specified a valid KubernetesVersion?)", url)
|
||||||
|
}
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) {
|
||||||
request.Attribute = aws.String("userData")
|
request.Attribute = aws.String("userData")
|
||||||
response, err := cloud.EC2.DescribeInstanceAttribute(request)
|
response, err := cloud.EC2.DescribeInstanceAttribute(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error querying EC2 for user metadata for instance %q: %v", *i.InstanceId)
|
return nil, fmt.Errorf("error querying EC2 for user metadata for instance %q: %v", *i.InstanceId, err)
|
||||||
}
|
}
|
||||||
if response.UserData != nil {
|
if response.UserData != nil {
|
||||||
b, err := base64.StdEncoding.DecodeString(aws.StringValue(response.UserData.Value))
|
b, err := base64.StdEncoding.DecodeString(aws.StringValue(response.UserData.Value))
|
||||||
|
|
|
@ -79,30 +79,28 @@ func (e *SSHKey) Find(c *fi.Context) (*SSHKey, error) {
|
||||||
return actual, nil
|
return actual, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeAWSKeyFingerprint computes the AWS-specific fingerprint of the SSH public key
|
// parseSSHPublicKey parses the SSH public key string
|
||||||
func computeAWSKeyFingerprint(publicKey string) (string, error) {
|
func parseSSHPublicKey(publicKey string) (ssh.PublicKey, error) {
|
||||||
tokens := strings.Fields(publicKey)
|
tokens := strings.Fields(publicKey)
|
||||||
if len(tokens) < 2 {
|
if len(tokens) < 2 {
|
||||||
return "", fmt.Errorf("error parsing SSH public key: %q", publicKey)
|
return nil, fmt.Errorf("error parsing SSH public key: %q", publicKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
sshPublicKeyBytes, err := base64.StdEncoding.DecodeString(tokens[1])
|
sshPublicKeyBytes, err := base64.StdEncoding.DecodeString(tokens[1])
|
||||||
if len(tokens) < 2 {
|
if len(tokens) < 2 {
|
||||||
return "", fmt.Errorf("error decoding SSH public key: %q", publicKey)
|
return nil, fmt.Errorf("error decoding SSH public key: %q", publicKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
sshPublicKey, err := ssh.ParsePublicKey(sshPublicKeyBytes)
|
sshPublicKey, err := ssh.ParsePublicKey(sshPublicKeyBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("error parsing SSH public key: %v", err)
|
return nil, fmt.Errorf("error parsing SSH public key: %v", err)
|
||||||
}
|
}
|
||||||
|
return sshPublicKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
der, err := toDER(sshPublicKey)
|
// colonSeparatedHex formats the byte slice SSH-fingerprint style: hex bytes separated by colons
|
||||||
if err != nil {
|
func colonSeparatedHex(data []byte) string {
|
||||||
return "", fmt.Errorf("error computing fingerprint for SSH public key: %v", err)
|
sshKeyFingerprint := fmt.Sprintf("%x", data)
|
||||||
}
|
|
||||||
h := md5.Sum(der)
|
|
||||||
sshKeyFingerprint := fmt.Sprintf("%x", h)
|
|
||||||
|
|
||||||
var colonSeparated bytes.Buffer
|
var colonSeparated bytes.Buffer
|
||||||
for i := 0; i < len(sshKeyFingerprint); i++ {
|
for i := 0; i < len(sshKeyFingerprint); i++ {
|
||||||
if (i%2) == 0 && i != 0 {
|
if (i%2) == 0 && i != 0 {
|
||||||
|
@ -111,7 +109,34 @@ func computeAWSKeyFingerprint(publicKey string) (string, error) {
|
||||||
colonSeparated.WriteByte(sshKeyFingerprint[i])
|
colonSeparated.WriteByte(sshKeyFingerprint[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
return colonSeparated.String(), nil
|
return colonSeparated.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeAWSKeyFingerprint computes the AWS-specific fingerprint of the SSH public key
|
||||||
|
func computeAWSKeyFingerprint(publicKey string) (string, error) {
|
||||||
|
sshPublicKey, err := parseSSHPublicKey(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
der, err := toDER(sshPublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error computing fingerprint for SSH public key: %v", err)
|
||||||
|
}
|
||||||
|
h := md5.Sum(der)
|
||||||
|
|
||||||
|
return colonSeparatedHex(h[:]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeOpenSSHKeyFingerprint computes the OpenSSH fingerprint of the SSH public key
|
||||||
|
func ComputeOpenSSHKeyFingerprint(publicKey string) (string, error) {
|
||||||
|
sshPublicKey, err := parseSSHPublicKey(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
h := md5.Sum(sshPublicKey.Marshal())
|
||||||
|
return colonSeparatedHex(h[:]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toDER gets the DER encoding of the SSH public key
|
// toDER gets the DER encoding of the SSH public key
|
||||||
|
@ -195,6 +220,7 @@ func (_ *SSHKey) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *SSHKey) error {
|
||||||
e.KeyFingerprint = response.KeyFingerprint
|
e.KeyFingerprint = response.KeyFingerprint
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// No tags on SSH public key
|
||||||
return nil //return output.AddAWSTags(cloud.Tags(), v, "vpc")
|
return nil //return output.AddAWSTags(cloud.Tags(), v, "vpc")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +230,8 @@ type terraformSSHKey struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SSHKey) error {
|
func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SSHKey) error {
|
||||||
publicKey, err := t.AddFile("aws_key_pair", *e.Name, "public_key", e.PublicKey)
|
tfName := strings.Replace(*e.Name, ":", "", -1)
|
||||||
|
publicKey, err := t.AddFile("aws_key_pair", tfName, "public_key", e.PublicKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error rendering PublicKey: %v", err)
|
return fmt.Errorf("error rendering PublicKey: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -214,9 +241,10 @@ func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SS
|
||||||
PublicKey: publicKey,
|
PublicKey: publicKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.RenderResource("aws_key_pair", *e.Name, tf)
|
return t.RenderResource("aws_key_pair", tfName, tf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SSHKey) TerraformLink() *terraform.Literal {
|
func (e *SSHKey) TerraformLink() *terraform.Literal {
|
||||||
return terraform.LiteralProperty("aws_key_pair", *e.Name, "id")
|
tfName := strings.Replace(*e.Name, ":", "", -1)
|
||||||
|
return terraform.LiteralProperty("aws_key_pair", tfName, "id")
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func checkFingerprintEqual(t *testing.T, publicKey string, fingerprint string) {
|
func checkAWSFingerprintEqual(t *testing.T, publicKey string, fingerprint string) {
|
||||||
actual, err := computeAWSKeyFingerprint(publicKey)
|
actual, err := computeAWSKeyFingerprint(publicKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error computing AWS key fingerprint: %v", err)
|
t.Fatalf("Unexpected error computing AWS key fingerprint: %v", err)
|
||||||
|
@ -16,7 +16,7 @@ func checkFingerprintEqual(t *testing.T, publicKey string, fingerprint string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFingerprintError(t *testing.T, publicKey string, message string) {
|
func checkAWSFingerprintError(t *testing.T, publicKey string, message string) {
|
||||||
_, err := computeAWSKeyFingerprint(publicKey)
|
_, err := computeAWSKeyFingerprint(publicKey)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected error %q computing AWS key fingerprint", message)
|
t.Fatalf("Expected error %q computing AWS key fingerprint", message)
|
||||||
|
@ -27,29 +27,44 @@ func checkFingerprintError(t *testing.T, publicKey string, message string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFingerprint_RsaKey1(t *testing.T) {
|
func Test_AWSFingerprint_RsaKey1(t *testing.T) {
|
||||||
key := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCySdqIU+FhCWl3BNrAvPaOe5VfL2aCARUWwy91ZP+T7LBwFa9lhdttfjp/VX1D1/PVwntn2EhN079m8c2kfdmiZ/iCHqrLyIGSd+BOiCz0lT47znvANSfxYjLUuKrWWWeaXqerJkOsAD4PHchRLbZGPdbfoBKwtb/WT4GMRQmb9vmiaZYjsfdPPM9KkWI9ECoWFGjGehA8D+iYIPR711kRacb1xdYmnjHqxAZHFsb5L8wDWIeAyhy49cBD+lbzTiioq2xWLorXuFmXh6Do89PgzvHeyCLY6816f/kCX6wIFts8A2eaEHFL4rAOsuh6qHmSxGCR9peSyuRW8DxV725x justin@test"
|
key := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCySdqIU+FhCWl3BNrAvPaOe5VfL2aCARUWwy91ZP+T7LBwFa9lhdttfjp/VX1D1/PVwntn2EhN079m8c2kfdmiZ/iCHqrLyIGSd+BOiCz0lT47znvANSfxYjLUuKrWWWeaXqerJkOsAD4PHchRLbZGPdbfoBKwtb/WT4GMRQmb9vmiaZYjsfdPPM9KkWI9ECoWFGjGehA8D+iYIPR711kRacb1xdYmnjHqxAZHFsb5L8wDWIeAyhy49cBD+lbzTiioq2xWLorXuFmXh6Do89PgzvHeyCLY6816f/kCX6wIFts8A2eaEHFL4rAOsuh6qHmSxGCR9peSyuRW8DxV725x justin@test"
|
||||||
checkFingerprintEqual(t, key, "85:a6:f4:64:b7:8f:4a:75:f1:ed:f9:26:1b:67:5f:f2")
|
checkAWSFingerprintEqual(t, key, "85:a6:f4:64:b7:8f:4a:75:f1:ed:f9:26:1b:67:5f:f2")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFingerprint_RsaKeyEncrypted(t *testing.T) {
|
func Test_AWSFingerprint_RsaKeyEncrypted(t *testing.T) {
|
||||||
// The private key is encrypted; the public key isn't
|
// The private key is encrypted; the public key isn't
|
||||||
key := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrLzpTNk5r3RWrzhRuFH8wkOQ+3mOEdaFosPzgDzQtriGU3JZ9Y3UHN4ltUOYUlapyFRaB27Pvyd48GkOSym7ZMn4/kyWn1SvXumJmW8bbX5+pTGK6p3Xu0elBPYMJHWEdZLK5gV6r15uRie9vhxknS9mOzxMcG9gdyyY3DdC3LiiRr6I8wTojP9MsWseZdPPZ5o6tMR/Zp2Q0fOb/DOhNuzunauMos+iu76YPORRFF1PaT1LoLxH7+/HwSX993JDzKytakuCoDFQ2/JvoMxkIvnVIz+MGsLKUZgmxJYQRaIL+fRR+ZBGFrOTqI72NXDmjT7aKjHHxYPfrsSggPh1J justin@machine"
|
key := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrLzpTNk5r3RWrzhRuFH8wkOQ+3mOEdaFosPzgDzQtriGU3JZ9Y3UHN4ltUOYUlapyFRaB27Pvyd48GkOSym7ZMn4/kyWn1SvXumJmW8bbX5+pTGK6p3Xu0elBPYMJHWEdZLK5gV6r15uRie9vhxknS9mOzxMcG9gdyyY3DdC3LiiRr6I8wTojP9MsWseZdPPZ5o6tMR/Zp2Q0fOb/DOhNuzunauMos+iu76YPORRFF1PaT1LoLxH7+/HwSX993JDzKytakuCoDFQ2/JvoMxkIvnVIz+MGsLKUZgmxJYQRaIL+fRR+ZBGFrOTqI72NXDmjT7aKjHHxYPfrsSggPh1J justin@machine"
|
||||||
checkFingerprintEqual(t, key, "c9:c5:05:5e:ea:54:fc:a4:7c:7c:75:5c:d2:71:5e:40")
|
checkAWSFingerprintEqual(t, key, "c9:c5:05:5e:ea:54:fc:a4:7c:7c:75:5c:d2:71:5e:40")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFingerprint_TrickyWhitespace(t *testing.T) {
|
func Test_AWSFingerprint_TrickyWhitespace(t *testing.T) {
|
||||||
// No name, \r instead of whitespace
|
// No name, \r instead of whitespace
|
||||||
key := "ssh-rsa\rAAAAB3NzaC1yc2EAAAADAQABAAABAQCySdqIU+FhCWl3BNrAvPaOe5VfL2aCARUWwy91ZP+T7LBwFa9lhdttfjp/VX1D1/PVwntn2EhN079m8c2kfdmiZ/iCHqrLyIGSd+BOiCz0lT47znvANSfxYjLUuKrWWWeaXqerJkOsAD4PHchRLbZGPdbfoBKwtb/WT4GMRQmb9vmiaZYjsfdPPM9KkWI9ECoWFGjGehA8D+iYIPR711kRacb1xdYmnjHqxAZHFsb5L8wDWIeAyhy49cBD+lbzTiioq2xWLorXuFmXh6Do89PgzvHeyCLY6816f/kCX6wIFts8A2eaEHFL4rAOsuh6qHmSxGCR9peSyuRW8DxV725x\r"
|
key := "ssh-rsa\rAAAAB3NzaC1yc2EAAAADAQABAAABAQCySdqIU+FhCWl3BNrAvPaOe5VfL2aCARUWwy91ZP+T7LBwFa9lhdttfjp/VX1D1/PVwntn2EhN079m8c2kfdmiZ/iCHqrLyIGSd+BOiCz0lT47znvANSfxYjLUuKrWWWeaXqerJkOsAD4PHchRLbZGPdbfoBKwtb/WT4GMRQmb9vmiaZYjsfdPPM9KkWI9ECoWFGjGehA8D+iYIPR711kRacb1xdYmnjHqxAZHFsb5L8wDWIeAyhy49cBD+lbzTiioq2xWLorXuFmXh6Do89PgzvHeyCLY6816f/kCX6wIFts8A2eaEHFL4rAOsuh6qHmSxGCR9peSyuRW8DxV725x\r"
|
||||||
checkFingerprintEqual(t, key, "85:a6:f4:64:b7:8f:4a:75:f1:ed:f9:26:1b:67:5f:f2")
|
checkAWSFingerprintEqual(t, key, "85:a6:f4:64:b7:8f:4a:75:f1:ed:f9:26:1b:67:5f:f2")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFingerprint_DsaKey(t *testing.T) {
|
func Test_AWSFingerprint_DsaKey(t *testing.T) {
|
||||||
key := "ssh-dss AAAAB3NzaC1kc3MAAACBAIcCTu3vi9rNjsnhCrHeII7jSN6/FmnIdy09pQAsMAGGvCS9HBOteCKbIyYQQ0+Gi76Oui7cJ2VQojdxOxeZPoSP+QYnA+CVYhnowVVLeRA9VBQG3ZLInoXaqe3nR4/OXhY75GmYShBBPTQ+/fWGX9ltoXfygSc4KjhBNudvj75VAAAAFQDiw8A4MhY0aHSX/mtpa7XV8+iS6wAAAIAXyQaxM/dk0o1vBV3H0V0lGhog3mF7EJPdw7jagYvXQP1tAhzNofxZVhXHr4wGfiTQv9j5plDqQzCI/15a6DRyo9zI+zdPTR41W3dGrk56O2/Qxsz3/vNip5OwpOJ88yMmBX9m36gg0WrOXcZDgErhvZWRt5cXa9QjVg/KpxYLPAAAAIB8e5M82IiRLi+k1k4LsELKArQGzVkPgynESfnEXX0TKGiR7PJvBNGaKnPJtJ0Rrc38w/hLTeklroJt9Rdey/NI9b6tc+ur2pmJdnYppnNCm03WszU4oFD/7KIqR84Hf0fMbWd1hRvznpZhngZ505KNsL+ck0+Tlq6Hdhe2baXJcA== justin@machine"
|
key := "ssh-dss AAAAB3NzaC1kc3MAAACBAIcCTu3vi9rNjsnhCrHeII7jSN6/FmnIdy09pQAsMAGGvCS9HBOteCKbIyYQQ0+Gi76Oui7cJ2VQojdxOxeZPoSP+QYnA+CVYhnowVVLeRA9VBQG3ZLInoXaqe3nR4/OXhY75GmYShBBPTQ+/fWGX9ltoXfygSc4KjhBNudvj75VAAAAFQDiw8A4MhY0aHSX/mtpa7XV8+iS6wAAAIAXyQaxM/dk0o1vBV3H0V0lGhog3mF7EJPdw7jagYvXQP1tAhzNofxZVhXHr4wGfiTQv9j5plDqQzCI/15a6DRyo9zI+zdPTR41W3dGrk56O2/Qxsz3/vNip5OwpOJ88yMmBX9m36gg0WrOXcZDgErhvZWRt5cXa9QjVg/KpxYLPAAAAIB8e5M82IiRLi+k1k4LsELKArQGzVkPgynESfnEXX0TKGiR7PJvBNGaKnPJtJ0Rrc38w/hLTeklroJt9Rdey/NI9b6tc+ur2pmJdnYppnNCm03WszU4oFD/7KIqR84Hf0fMbWd1hRvznpZhngZ505KNsL+ck0+Tlq6Hdhe2baXJcA== justin@machine"
|
||||||
checkFingerprintError(t, key, "AWS can only import RSA keys")
|
checkAWSFingerprintError(t, key, "AWS can only import RSA keys")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFingerprint_Ed25519Key(t *testing.T) {
|
func Test_AWSFingerprint_Ed25519Key(t *testing.T) {
|
||||||
key := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpyraYd4rUFftiEKzUO4wKFAgTkXxuJcRZwVcsuZJ8G justin@machine"
|
key := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpyraYd4rUFftiEKzUO4wKFAgTkXxuJcRZwVcsuZJ8G justin@machine"
|
||||||
checkFingerprintError(t, key, "AWS can only import RSA keys")
|
checkAWSFingerprintError(t, key, "AWS can only import RSA keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkOpenSSHFingerprintEqual(t *testing.T, publicKey string, fingerprint string) {
|
||||||
|
actual, err := ComputeOpenSSHKeyFingerprint(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error computing OpenSSH key fingerprint: %v", err)
|
||||||
|
}
|
||||||
|
if actual != fingerprint {
|
||||||
|
t.Fatalf("Expected fingerprint %q, got %q", fingerprint, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_OpenSSHFingerprint_RsaKey1(t *testing.T) {
|
||||||
|
key := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCySdqIU+FhCWl3BNrAvPaOe5VfL2aCARUWwy91ZP+T7LBwFa9lhdttfjp/VX1D1/PVwntn2EhN079m8c2kfdmiZ/iCHqrLyIGSd+BOiCz0lT47znvANSfxYjLUuKrWWWeaXqerJkOsAD4PHchRLbZGPdbfoBKwtb/WT4GMRQmb9vmiaZYjsfdPPM9KkWI9ECoWFGjGehA8D+iYIPR711kRacb1xdYmnjHqxAZHFsb5L8wDWIeAyhy49cBD+lbzTiioq2xWLorXuFmXh6Do89PgzvHeyCLY6816f/kCX6wIFts8A2eaEHFL4rAOsuh6qHmSxGCR9peSyuRW8DxV725x justin@test"
|
||||||
|
checkOpenSSHFingerprintEqual(t, key, "be:ba:ec:2b:9e:a0:68:b8:19:6b:9a:26:cc:b1:58:ff")
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (c *AWSCloud) Tags() map[string]string {
|
||||||
func isTagsEventualConsistencyError(err error) bool {
|
func isTagsEventualConsistencyError(err error) bool {
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
switch awsErr.Code() {
|
switch awsErr.Code() {
|
||||||
case "InvalidInstanceID.NotFound", "InvalidRouteTableID.NotFound", "InvalidVpcID.NotFound", "InvalidGroup.NotFound", "InvalidSubnetID.NotFound", "InvalidInternetGatewayID.NotFound":
|
case "InvalidInstanceID.NotFound", "InvalidRouteTableID.NotFound", "InvalidVpcID.NotFound", "InvalidGroup.NotFound", "InvalidSubnetID.NotFound", "InvalidInternetGatewayID.NotFound", "InvalidDhcpOptionsID.NotFound":
|
||||||
return true
|
return true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -520,5 +520,5 @@ func (c *AWSCloud) FindDNSHostedZone(clusterDNSName string) (string, error) {
|
||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("Found multiple hosted zones matching cluster %q; please specify the ID of the zone to use")
|
return "", fmt.Errorf("Found multiple hosted zones matching cluster %q; please specify the ID of the zone to use", clusterDNSName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,6 +239,71 @@ var MachineTypes []AWSMachineTypeInfo = []AWSMachineTypeInfo{
|
||||||
EphemeralDisks: nil,
|
EphemeralDisks: nil,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// cc2 family
|
||||||
|
{
|
||||||
|
Name: "cc2.8xlarge",
|
||||||
|
MemoryGB: 60.5,
|
||||||
|
ECU: 88,
|
||||||
|
Cores: 32,
|
||||||
|
EphemeralDisks: []int{840, 840, 840, 840},
|
||||||
|
},
|
||||||
|
|
||||||
|
// cg1 family
|
||||||
|
{
|
||||||
|
Name: "cg1.4xlarge",
|
||||||
|
MemoryGB: 22.5,
|
||||||
|
ECU: 33.5,
|
||||||
|
Cores: 16,
|
||||||
|
EphemeralDisks: []int{840, 840},
|
||||||
|
},
|
||||||
|
|
||||||
|
// cr1 family
|
||||||
|
{
|
||||||
|
Name: "cr1.8xlarge",
|
||||||
|
MemoryGB: 244.0,
|
||||||
|
ECU: 88,
|
||||||
|
Cores: 32,
|
||||||
|
EphemeralDisks: []int{120, 120},
|
||||||
|
},
|
||||||
|
|
||||||
|
// d2 family
|
||||||
|
{
|
||||||
|
Name: "d2.xlarge",
|
||||||
|
MemoryGB: 30.5,
|
||||||
|
ECU: 14,
|
||||||
|
Cores: 4,
|
||||||
|
EphemeralDisks: []int{2000, 2000, 2000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "d2.2xlarge",
|
||||||
|
MemoryGB: 61.0,
|
||||||
|
ECU: 28,
|
||||||
|
Cores: 8,
|
||||||
|
EphemeralDisks: []int{2000, 2000, 2000, 2000, 2000, 2000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "d2.4xlarge",
|
||||||
|
MemoryGB: 122.0,
|
||||||
|
ECU: 56,
|
||||||
|
Cores: 16,
|
||||||
|
EphemeralDisks: []int{
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "d2.8xlarge",
|
||||||
|
MemoryGB: 244.0,
|
||||||
|
ECU: 116,
|
||||||
|
Cores: 36,
|
||||||
|
EphemeralDisks: []int{
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
2000, 2000, 2000, 2000, 2000, 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
// g2 family
|
// g2 family
|
||||||
{
|
{
|
||||||
Name: "g2.2xlarge",
|
Name: "g2.2xlarge",
|
||||||
|
@ -255,6 +320,15 @@ var MachineTypes []AWSMachineTypeInfo = []AWSMachineTypeInfo{
|
||||||
EphemeralDisks: []int{120, 120},
|
EphemeralDisks: []int{120, 120},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// hi1 family
|
||||||
|
{
|
||||||
|
Name: "hi1.4xlarge",
|
||||||
|
MemoryGB: 60.5,
|
||||||
|
ECU: 35,
|
||||||
|
Cores: 16,
|
||||||
|
EphemeralDisks: []int{1024, 1024},
|
||||||
|
},
|
||||||
|
|
||||||
// i2 family
|
// i2 family
|
||||||
{
|
{
|
||||||
Name: "i2.xlarge",
|
Name: "i2.xlarge",
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
package cloudup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func usesCNI(c *api.Cluster) bool {
|
||||||
|
networkConfig := c.Spec.Networking
|
||||||
|
if networkConfig == nil || networkConfig.Classic != nil {
|
||||||
|
// classic
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if networkConfig.Kubenet != nil {
|
||||||
|
// kubenet
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if networkConfig.External != nil {
|
||||||
|
// external: assume uses CNI
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume other modes also use CNI
|
||||||
|
glog.Warningf("Unknown networking mode configured")
|
||||||
|
return true
|
||||||
|
}
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/hashing"
|
|
||||||
"k8s.io/kops/upup/pkg/fi/loader"
|
"k8s.io/kops/upup/pkg/fi/loader"
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
"k8s.io/kops/upup/pkg/fi/vfs"
|
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||||
|
@ -87,10 +86,6 @@ func PopulateClusterSpec(cluster *api.Cluster, clusterRegistry *api.ClusterRegis
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *populateClusterSpec) run() error {
|
func (c *populateClusterSpec) run() error {
|
||||||
// TODO: Make these configurable?
|
|
||||||
useMasterASG := true
|
|
||||||
useMasterLB := false
|
|
||||||
|
|
||||||
err := c.InputCluster.Validate(false)
|
err := c.InputCluster.Validate(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -170,8 +165,6 @@ func (c *populateClusterSpec) run() error {
|
||||||
return fmt.Errorf("ClusterRegistry is required")
|
return fmt.Errorf("ClusterRegistry is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := make(map[string]struct{})
|
|
||||||
|
|
||||||
keyStore := c.ClusterRegistry.KeyStore(cluster.Name)
|
keyStore := c.ClusterRegistry.KeyStore(cluster.Name)
|
||||||
// Always assume a dry run during this phase
|
// Always assume a dry run during this phase
|
||||||
keyStore.(*fi.VFSCAStore).DryRun = true
|
keyStore.(*fi.VFSCAStore).DryRun = true
|
||||||
|
@ -223,19 +216,6 @@ func (c *populateClusterSpec) run() error {
|
||||||
// We do support this...
|
// We do support this...
|
||||||
}
|
}
|
||||||
|
|
||||||
if cluster.Spec.KubernetesVersion == "" {
|
|
||||||
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
|
|
||||||
b, err := vfs.Context.ReadFile(stableURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("--kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
|
|
||||||
}
|
|
||||||
latestVersion := strings.TrimSpace(string(b))
|
|
||||||
glog.Infof("Using kubernetes latest stable version: %s", latestVersion)
|
|
||||||
|
|
||||||
cluster.Spec.KubernetesVersion = latestVersion
|
|
||||||
//return fmt.Errorf("Must either specify a KubernetesVersion (-kubernetes-version) or provide an asset with the release bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize k8s version
|
// Normalize k8s version
|
||||||
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
|
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
|
||||||
if strings.HasPrefix(versionWithoutV, "v") {
|
if strings.HasPrefix(versionWithoutV, "v") {
|
||||||
|
@ -246,47 +226,11 @@ func (c *populateClusterSpec) run() error {
|
||||||
cluster.Spec.KubernetesVersion = versionWithoutV
|
cluster.Spec.KubernetesVersion = versionWithoutV
|
||||||
}
|
}
|
||||||
|
|
||||||
if useMasterASG {
|
|
||||||
tags["_master_asg"] = struct{}{}
|
|
||||||
} else {
|
|
||||||
tags["_master_single"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if useMasterLB {
|
|
||||||
tags["_master_lb"] = struct{}{}
|
|
||||||
} else {
|
|
||||||
tags["_not_master_lb"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cluster.Spec.MasterPublicName != "" {
|
|
||||||
tags["_master_dns"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.BoolValue(cluster.Spec.IsolateMasters) {
|
|
||||||
tags["_isolate_masters"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
cloud, err := BuildCloud(cluster)
|
cloud, err := BuildCloud(cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch cluster.Spec.CloudProvider {
|
|
||||||
case "gce":
|
|
||||||
{
|
|
||||||
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
|
||||||
tags["_gce"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "aws":
|
|
||||||
{
|
|
||||||
tags["_aws"] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cluster.Spec.DNSZone == "" {
|
if cluster.Spec.DNSZone == "" {
|
||||||
dnsZone, err := cloud.FindDNSHostedZone(cluster.Name)
|
dnsZone, err := cloud.FindDNSHostedZone(cluster.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -296,6 +240,11 @@ func (c *populateClusterSpec) run() error {
|
||||||
cluster.Spec.DNSZone = dnsZone
|
cluster.Spec.DNSZone = dnsZone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tags, err := buildClusterTags(cluster)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
tf := &TemplateFunctions{
|
tf := &TemplateFunctions{
|
||||||
cluster: cluster,
|
cluster: cluster,
|
||||||
tags: tags,
|
tags: tags,
|
||||||
|
@ -329,22 +278,6 @@ func (c *populateClusterSpec) run() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func findHash(url string) (*hashing.Hash, error) {
|
|
||||||
for _, ext := range []string{".sha1"} {
|
|
||||||
hashURL := url + ext
|
|
||||||
b, err := vfs.Context.ReadFile(hashURL)
|
|
||||||
if err != nil {
|
|
||||||
glog.Infof("error reading hash file %q: %v", hashURL, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hashString := strings.TrimSpace(string(b))
|
|
||||||
glog.Infof("Found hash %q for %q", hashString, url)
|
|
||||||
|
|
||||||
return hashing.FromString(hashString)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("cannot determine hash for %v (have you specified a valid KubernetesVersion?)", url)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *populateClusterSpec) assignSubnets(cluster *api.Cluster) error {
|
func (c *populateClusterSpec) assignSubnets(cluster *api.Cluster) error {
|
||||||
if cluster.Spec.NonMasqueradeCIDR == "" {
|
if cluster.Spec.NonMasqueradeCIDR == "" {
|
||||||
glog.Warningf("NonMasqueradeCIDR not set; can't auto-assign dependent subnets")
|
glog.Warningf("NonMasqueradeCIDR not set; can't auto-assign dependent subnets")
|
||||||
|
|
|
@ -28,6 +28,26 @@ func buildMinimalCluster() *api.Cluster {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addEtcdClusters(c *api.Cluster) {
|
||||||
|
zones := sets.NewString()
|
||||||
|
for _, z := range c.Spec.Zones {
|
||||||
|
zones.Insert(z.Name)
|
||||||
|
}
|
||||||
|
etcdZones := zones.List()
|
||||||
|
|
||||||
|
for _, etcdCluster := range EtcdClusters {
|
||||||
|
etcd := &api.EtcdClusterSpec{}
|
||||||
|
etcd.Name = etcdCluster
|
||||||
|
for _, zone := range etcdZones {
|
||||||
|
m := &api.EtcdMemberSpec{}
|
||||||
|
m.Name = zone
|
||||||
|
m.Zone = zone
|
||||||
|
etcd.Members = append(etcd.Members, m)
|
||||||
|
}
|
||||||
|
c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPopulateCluster_Default_NoError(t *testing.T) {
|
func TestPopulateCluster_Default_NoError(t *testing.T) {
|
||||||
c := buildMinimalCluster()
|
c := buildMinimalCluster()
|
||||||
|
|
||||||
|
@ -36,25 +56,7 @@ func TestPopulateCluster_Default_NoError(t *testing.T) {
|
||||||
t.Fatalf("error from PerformAssignments: %v", err)
|
t.Fatalf("error from PerformAssignments: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Spec.EtcdClusters) == 0 {
|
addEtcdClusters(c)
|
||||||
zones := sets.NewString()
|
|
||||||
for _, z := range c.Spec.Zones {
|
|
||||||
zones.Insert(z.Name)
|
|
||||||
}
|
|
||||||
etcdZones := zones.List()
|
|
||||||
|
|
||||||
for _, etcdCluster := range EtcdClusters {
|
|
||||||
etcd := &api.EtcdClusterSpec{}
|
|
||||||
etcd.Name = etcdCluster
|
|
||||||
for _, zone := range etcdZones {
|
|
||||||
m := &api.EtcdMemberSpec{}
|
|
||||||
m.Name = zone
|
|
||||||
m.Zone = zone
|
|
||||||
etcd.Members = append(etcd.Members, m)
|
|
||||||
}
|
|
||||||
c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := buildInmemoryClusterRegistry()
|
registry := buildInmemoryClusterRegistry()
|
||||||
_, err = PopulateClusterSpec(c, registry)
|
_, err = PopulateClusterSpec(c, registry)
|
||||||
|
@ -63,6 +65,76 @@ func TestPopulateCluster_Default_NoError(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPopulateCluster_Docker_Spec(t *testing.T) {
|
||||||
|
c := buildMinimalCluster()
|
||||||
|
c.Spec.Docker = &api.DockerConfig{
|
||||||
|
MTU: 5678,
|
||||||
|
InsecureRegistry: "myregistry.com:1234",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.PerformAssignments()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from PerformAssignments: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addEtcdClusters(c)
|
||||||
|
|
||||||
|
registry := buildInmemoryClusterRegistry()
|
||||||
|
full, err := PopulateClusterSpec(c, registry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error from PopulateCluster: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if full.Spec.Docker.MTU != 5678 {
|
||||||
|
t.Fatalf("Unexpected Docker MTU: %v", full.Spec.Docker.MTU)
|
||||||
|
}
|
||||||
|
|
||||||
|
if full.Spec.Docker.InsecureRegistry != "myregistry.com:1234" {
|
||||||
|
t.Fatalf("Unexpected Docker InsecureRegistry: %v", full.Spec.Docker.InsecureRegistry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check default values not changed
|
||||||
|
if full.Spec.Docker.Bridge != "cbr0" {
|
||||||
|
t.Fatalf("Unexpected Docker Bridge: %v", full.Spec.Docker.Bridge)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func build(c *api.Cluster) (*api.Cluster, error) {
|
||||||
|
err := c.PerformAssignments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error from PerformAssignments: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addEtcdClusters(c)
|
||||||
|
registry := buildInmemoryClusterRegistry()
|
||||||
|
full, err := PopulateClusterSpec(c, registry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unexpected error from PopulateCluster: %v", err)
|
||||||
|
}
|
||||||
|
return full, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPopulateCluster_Kubenet(t *testing.T) {
|
||||||
|
c := buildMinimalCluster()
|
||||||
|
|
||||||
|
full, err := build(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error during build: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if full.Spec.Kubelet.NetworkPluginName != "kubenet" {
|
||||||
|
t.Fatalf("Unexpected NetworkPluginName: %v", full.Spec.Kubelet.NetworkPluginName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.BoolValue(full.Spec.Kubelet.ReconcileCIDR) != true {
|
||||||
|
t.Fatalf("Unexpected ReconcileCIDR: %v", full.Spec.Kubelet.ReconcileCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.BoolValue(full.Spec.KubeControllerManager.ConfigureCloudRoutes) != true {
|
||||||
|
t.Fatalf("Unexpected ConfigureCloudRoutes: %v", full.Spec.KubeControllerManager.ConfigureCloudRoutes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPopulateCluster_Custom_CIDR(t *testing.T) {
|
func TestPopulateCluster_Custom_CIDR(t *testing.T) {
|
||||||
c := buildMinimalCluster()
|
c := buildMinimalCluster()
|
||||||
c.Spec.NetworkCIDR = "172.20.2.0/24"
|
c.Spec.NetworkCIDR = "172.20.2.0/24"
|
||||||
|
@ -77,25 +149,7 @@ func TestPopulateCluster_Custom_CIDR(t *testing.T) {
|
||||||
t.Fatalf("error from PerformAssignments: %v", err)
|
t.Fatalf("error from PerformAssignments: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Spec.EtcdClusters) == 0 {
|
addEtcdClusters(c)
|
||||||
zones := sets.NewString()
|
|
||||||
for _, z := range c.Spec.Zones {
|
|
||||||
zones.Insert(z.Name)
|
|
||||||
}
|
|
||||||
etcdZones := zones.List()
|
|
||||||
|
|
||||||
for _, etcdCluster := range EtcdClusters {
|
|
||||||
etcd := &api.EtcdClusterSpec{}
|
|
||||||
etcd.Name = etcdCluster
|
|
||||||
for _, zone := range etcdZones {
|
|
||||||
m := &api.EtcdMemberSpec{}
|
|
||||||
m.Name = zone
|
|
||||||
m.Zone = zone
|
|
||||||
etcd.Members = append(etcd.Members, m)
|
|
||||||
}
|
|
||||||
c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := buildInmemoryClusterRegistry()
|
registry := buildInmemoryClusterRegistry()
|
||||||
full, err := PopulateClusterSpec(c, registry)
|
full, err := PopulateClusterSpec(c, registry)
|
||||||
|
@ -116,25 +170,7 @@ func TestPopulateCluster_IsolateMasters(t *testing.T) {
|
||||||
t.Fatalf("error from PerformAssignments: %v", err)
|
t.Fatalf("error from PerformAssignments: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Spec.EtcdClusters) == 0 {
|
addEtcdClusters(c)
|
||||||
zones := sets.NewString()
|
|
||||||
for _, z := range c.Spec.Zones {
|
|
||||||
zones.Insert(z.Name)
|
|
||||||
}
|
|
||||||
etcdZones := zones.List()
|
|
||||||
|
|
||||||
for _, etcdCluster := range EtcdClusters {
|
|
||||||
etcd := &api.EtcdClusterSpec{}
|
|
||||||
etcd.Name = etcdCluster
|
|
||||||
for _, zone := range etcdZones {
|
|
||||||
m := &api.EtcdMemberSpec{}
|
|
||||||
m.Name = zone
|
|
||||||
m.Zone = zone
|
|
||||||
etcd.Members = append(etcd.Members, m)
|
|
||||||
}
|
|
||||||
c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := buildInmemoryClusterRegistry()
|
registry := buildInmemoryClusterRegistry()
|
||||||
full, err := PopulateClusterSpec(c, registry)
|
full, err := PopulateClusterSpec(c, registry)
|
||||||
|
@ -158,25 +194,7 @@ func TestPopulateCluster_IsolateMastersFalse(t *testing.T) {
|
||||||
t.Fatalf("error from PerformAssignments: %v", err)
|
t.Fatalf("error from PerformAssignments: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Spec.EtcdClusters) == 0 {
|
addEtcdClusters(c)
|
||||||
zones := sets.NewString()
|
|
||||||
for _, z := range c.Spec.Zones {
|
|
||||||
zones.Insert(z.Name)
|
|
||||||
}
|
|
||||||
etcdZones := zones.List()
|
|
||||||
|
|
||||||
for _, etcdCluster := range EtcdClusters {
|
|
||||||
etcd := &api.EtcdClusterSpec{}
|
|
||||||
etcd.Name = etcdCluster
|
|
||||||
for _, zone := range etcdZones {
|
|
||||||
m := &api.EtcdMemberSpec{}
|
|
||||||
m.Name = zone
|
|
||||||
m.Zone = zone
|
|
||||||
etcd.Members = append(etcd.Members, m)
|
|
||||||
}
|
|
||||||
c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := buildInmemoryClusterRegistry()
|
registry := buildInmemoryClusterRegistry()
|
||||||
full, err := PopulateClusterSpec(c, registry)
|
full, err := PopulateClusterSpec(c, registry)
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
package cloudup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildClusterTags(cluster *api.Cluster) (map[string]struct{}, error) {
|
||||||
|
// TODO: Make these configurable?
|
||||||
|
useMasterASG := true
|
||||||
|
useMasterLB := false
|
||||||
|
|
||||||
|
tags := make(map[string]struct{})
|
||||||
|
|
||||||
|
networking := cluster.Spec.Networking
|
||||||
|
if networking == nil || networking.Classic != nil {
|
||||||
|
tags["_networking_classic"] = struct{}{}
|
||||||
|
} else if networking.Kubenet != nil {
|
||||||
|
tags["_networking_kubenet"] = struct{}{}
|
||||||
|
} else if networking.External != nil {
|
||||||
|
// external is based on kubenet
|
||||||
|
tags["_networking_kubenet"] = struct{}{}
|
||||||
|
tags["_networking_external"] = struct{}{}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("No networking mode set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if useMasterASG {
|
||||||
|
tags["_master_asg"] = struct{}{}
|
||||||
|
} else {
|
||||||
|
tags["_master_single"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if useMasterLB {
|
||||||
|
tags["_master_lb"] = struct{}{}
|
||||||
|
} else {
|
||||||
|
tags["_not_master_lb"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.Spec.MasterPublicName != "" {
|
||||||
|
tags["_master_dns"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.BoolValue(cluster.Spec.IsolateMasters) {
|
||||||
|
tags["_isolate_masters"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cluster.Spec.CloudProvider {
|
||||||
|
case "gce":
|
||||||
|
{
|
||||||
|
glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
|
||||||
|
tags["_gce"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
case "aws":
|
||||||
|
{
|
||||||
|
tags["_aws"] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTags map[string]struct{}) ([]string, error) {
|
||||||
|
var tags []string
|
||||||
|
|
||||||
|
switch role {
|
||||||
|
case api.InstanceGroupRoleNode:
|
||||||
|
// No special tags
|
||||||
|
|
||||||
|
// TODO: Should we run _protokube on the nodes?
|
||||||
|
tags = append(tags, "_protokube")
|
||||||
|
|
||||||
|
case api.InstanceGroupRoleMaster:
|
||||||
|
if !fi.BoolValue(cluster.Spec.IsolateMasters) {
|
||||||
|
// Run this master as a pool node also (start kube-proxy etc)
|
||||||
|
tags = append(tags, "_kubernetes_pool")
|
||||||
|
}
|
||||||
|
tags = append(tags, "_protokube")
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unrecognized role: %v", role)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Replace with list of CNI plugins ?
|
||||||
|
if usesCNI(cluster) {
|
||||||
|
tags = append(tags, "_cni_bridge")
|
||||||
|
tags = append(tags, "_cni_host_local")
|
||||||
|
tags = append(tags, "_cni_loopback")
|
||||||
|
tags = append(tags, "_cni_ptp")
|
||||||
|
//tags = append(tags, "_cni_tuning")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := clusterTags["_gce"]; found {
|
||||||
|
tags = append(tags, "_gce")
|
||||||
|
}
|
||||||
|
if _, found := clusterTags["_aws"]; found {
|
||||||
|
tags = append(tags, "_aws")
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags, nil
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package cloudup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuildTags_CloudProvider_AWS(t *testing.T) {
|
||||||
|
c := &api.Cluster{
|
||||||
|
Spec: api.ClusterSpec{
|
||||||
|
CloudProvider: "aws",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, err := buildClusterTags(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("buildTags error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := tags["_aws"]; !found {
|
||||||
|
t.Fatalf("tag _aws not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("buildNodeupTags error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stringSliceContains(nodeUpTags, "_aws") {
|
||||||
|
t.Fatalf("nodeUpTag _aws not found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceContains(haystack []string, needle string) bool {
|
||||||
|
for _, s := range haystack {
|
||||||
|
if needle == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -4,7 +4,9 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/golang/glog"
|
||||||
"k8s.io/kops/upup/pkg/api"
|
"k8s.io/kops/upup/pkg/api"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||||
"math/big"
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -78,6 +80,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
|
||||||
dest["IAMPrefix"] = tf.IAMPrefix
|
dest["IAMPrefix"] = tf.IAMPrefix
|
||||||
dest["IAMServiceEC2"] = tf.IAMServiceEC2
|
dest["IAMServiceEC2"] = tf.IAMServiceEC2
|
||||||
|
|
||||||
|
dest["Image"] = tf.Image
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string {
|
func (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string {
|
||||||
|
@ -137,3 +140,30 @@ func (tf *TemplateFunctions) IAMPrefix() string {
|
||||||
return "arn:aws"
|
return "arn:aws"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Image returns the docker image name for the specified component
|
||||||
|
func (tf *TemplateFunctions) Image(component string) (string, error) {
|
||||||
|
if component == "kube-dns" {
|
||||||
|
// TODO: Once we are shipping different versions, start to use them
|
||||||
|
return "gcr.io/google_containers/kubedns-amd64:1.3", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isBaseURL(tf.cluster.Spec.KubernetesVersion) {
|
||||||
|
return "gcr.io/google_containers/" + component + ":" + "v" + tf.cluster.Spec.KubernetesVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
baseURL := tf.cluster.Spec.KubernetesVersion
|
||||||
|
baseURL = strings.TrimSuffix(baseURL, "/")
|
||||||
|
|
||||||
|
tagURL := baseURL + "/bin/linux/amd64/" + component + ".docker_tag"
|
||||||
|
glog.V(2).Infof("Downloading docker tag for %s from: %s", component, tagURL)
|
||||||
|
|
||||||
|
b, err := vfs.Context.ReadFile(tagURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error reading tag file %q: %v", tagURL, err)
|
||||||
|
}
|
||||||
|
tag := strings.TrimSpace(string(b))
|
||||||
|
glog.V(2).Infof("Found tag %q for %q", tag, component)
|
||||||
|
|
||||||
|
return "gcr.io/google_containers/" + component + ":" + tag, nil
|
||||||
|
}
|
||||||
|
|
|
@ -104,6 +104,18 @@ func TestValidateFull_Default_Validates(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateFull_ClusterName_InvalidDNS_NoDot(t *testing.T) {
|
||||||
|
c := buildDefaultCluster(t)
|
||||||
|
c.Name = "test"
|
||||||
|
expectErrorFromValidate(t, c, "DNS name")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateFull_ClusterName_InvalidDNS_Invalid(t *testing.T) {
|
||||||
|
c := buildDefaultCluster(t)
|
||||||
|
c.Name = "test.-"
|
||||||
|
expectErrorFromValidate(t, c, "DNS name")
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateFull_ClusterName_Required(t *testing.T) {
|
func TestValidateFull_ClusterName_Required(t *testing.T) {
|
||||||
c := buildDefaultCluster(t)
|
c := buildDefaultCluster(t)
|
||||||
c.Name = ""
|
c.Name = ""
|
||||||
|
|
|
@ -38,11 +38,11 @@ func NewContext(target Target, cloud Cloud, castore CAStore, secretStore SecretS
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) RunTasks(taskMap map[string]Task) error {
|
func (c *Context) RunTasks(taskMap map[string]Task, maxAttemptsWithNoProgress int) error {
|
||||||
e := &executor{
|
e := &executor{
|
||||||
context: c,
|
context: c,
|
||||||
}
|
}
|
||||||
return e.RunTasks(taskMap)
|
return e.RunTasks(taskMap, maxAttemptsWithNoProgress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) Close() {
|
func (c *Context) Close() {
|
||||||
|
|
|
@ -257,3 +257,8 @@ func ValueAsString(value reflect.Value) string {
|
||||||
func (t *DryRunTarget) Finish(taskMap map[string]Task) error {
|
func (t *DryRunTarget) Finish(taskMap map[string]Task) error {
|
||||||
return t.PrintReport(taskMap, t.out)
|
return t.PrintReport(taskMap, t.out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasChanges returns true iff any changes would have been made
|
||||||
|
func (t *DryRunTarget) HasChanges() bool {
|
||||||
|
return len(t.changes) != 0
|
||||||
|
}
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MaxAttemptsWithNoProgress = 3
|
|
||||||
|
|
||||||
type executor struct {
|
type executor struct {
|
||||||
context *Context
|
context *Context
|
||||||
}
|
}
|
||||||
|
@ -22,8 +20,8 @@ type taskState struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunTasks executes all the tasks, considering their dependencies
|
// RunTasks executes all the tasks, considering their dependencies
|
||||||
// It will perform some re-execution on error, retrying as long as progess is still being made
|
// It will perform some re-execution on error, retrying as long as progress is still being made
|
||||||
func (e *executor) RunTasks(taskMap map[string]Task) error {
|
func (e *executor) RunTasks(taskMap map[string]Task, maxAttemptsWithNoProgress int) error {
|
||||||
dependencies := FindTaskDependencies(taskMap)
|
dependencies := FindTaskDependencies(taskMap)
|
||||||
|
|
||||||
taskStates := make(map[string]*taskState)
|
taskStates := make(map[string]*taskState)
|
||||||
|
@ -94,7 +92,7 @@ func (e *executor) RunTasks(taskMap map[string]Task) error {
|
||||||
if !progress {
|
if !progress {
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
noProgressCount++
|
noProgressCount++
|
||||||
if noProgressCount == MaxAttemptsWithNoProgress {
|
if noProgressCount == maxAttemptsWithNoProgress {
|
||||||
return fmt.Errorf("did not make any progress executing task. Example error: %v", errors[0])
|
return fmt.Errorf("did not make any progress executing task. Example error: %v", errors[0])
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("No progress made, sleeping before retrying failed tasks")
|
glog.Infof("No progress made, sleeping before retrying failed tasks")
|
||||||
|
|
|
@ -65,7 +65,7 @@ func (ha HashAlgorithm) FromString(s string) (*Hash, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s) != l {
|
if len(s) != l {
|
||||||
return nil, fmt.Errorf("invalid %q hash - unexpected length %s", ha, len(s))
|
return nil, fmt.Errorf("invalid %q hash - unexpected length %d", ha, len(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
hashValue, err := hex.DecodeString(s)
|
hashValue, err := hex.DecodeString(s)
|
||||||
|
|
|
@ -15,12 +15,13 @@ func buildFlags(options interface{}) (string, error) {
|
||||||
|
|
||||||
walker := func(path string, field *reflect.StructField, val reflect.Value) error {
|
walker := func(path string, field *reflect.StructField, val reflect.Value) error {
|
||||||
if field == nil {
|
if field == nil {
|
||||||
glog.V(4).Infof("not writing non-field: %s", path)
|
glog.V(8).Infof("ignoring non-field: %s", path)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
tag := field.Tag.Get("flag")
|
tag := field.Tag.Get("flag")
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
glog.V(4).Infof("not writing field with no flag tag: %s", path)
|
glog.V(4).Infof("not writing field with no flag tag: %s", path)
|
||||||
|
// We want to descend - it could be a structure containing flags
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if tag == "-" {
|
if tag == "-" {
|
||||||
|
@ -29,6 +30,8 @@ func buildFlags(options interface{}) (string, error) {
|
||||||
}
|
}
|
||||||
flagName := tag
|
flagName := tag
|
||||||
|
|
||||||
|
// We do have to do this, even though the recursive walk will do it for us
|
||||||
|
// because when we descend we won't have `field` set
|
||||||
if val.Kind() == reflect.Ptr {
|
if val.Kind() == reflect.Ptr {
|
||||||
if val.IsNil() {
|
if val.IsNil() {
|
||||||
return nil
|
return nil
|
||||||
|
@ -54,7 +57,8 @@ func buildFlags(options interface{}) (string, error) {
|
||||||
if flag != "" {
|
if flag != "" {
|
||||||
flags = append(flags, flag)
|
flags = append(flags, flag)
|
||||||
}
|
}
|
||||||
return nil
|
// Nothing more to do here
|
||||||
|
return utils.SkipReflection
|
||||||
}
|
}
|
||||||
err := utils.ReflectRecursive(reflect.ValueOf(options), walker)
|
err := utils.ReflectRecursive(reflect.ValueOf(options), walker)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -8,17 +8,22 @@ import (
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
"k8s.io/kops/upup/pkg/fi/vfs"
|
"k8s.io/kops/upup/pkg/fi/vfs"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// We should probably retry for a long time - there is not really any great fallback
|
||||||
|
const MaxAttemptsWithNoProgress = 100
|
||||||
|
|
||||||
type NodeUpCommand struct {
|
type NodeUpCommand struct {
|
||||||
config *NodeUpConfig
|
config *NodeUpConfig
|
||||||
cluster *api.Cluster
|
cluster *api.Cluster
|
||||||
ConfigLocation string
|
ConfigLocation string
|
||||||
ModelDir string
|
ModelDir string
|
||||||
AssetDir string
|
CacheDir string
|
||||||
Target string
|
Target string
|
||||||
FSRoot string
|
FSRoot string
|
||||||
}
|
}
|
||||||
|
@ -42,10 +47,10 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
return fmt.Errorf("ConfigLocation is required")
|
return fmt.Errorf("ConfigLocation is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.AssetDir == "" {
|
if c.CacheDir == "" {
|
||||||
return fmt.Errorf("AssetDir is required")
|
return fmt.Errorf("CacheDir is required")
|
||||||
}
|
}
|
||||||
assets := fi.NewAssetStore(c.AssetDir)
|
assets := fi.NewAssetStore(c.CacheDir)
|
||||||
for _, asset := range c.config.Assets {
|
for _, asset := range c.config.Assets {
|
||||||
err := assets.Add(asset)
|
err := assets.Add(asset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -135,6 +140,13 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
return fmt.Errorf("error building loader: %v", err)
|
return fmt.Errorf("error building loader: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, image := range c.config.Images {
|
||||||
|
taskMap["LoadImage."+strconv.Itoa(i)] = &nodetasks.LoadImageTask{
|
||||||
|
Source: image.Source,
|
||||||
|
Hash: image.Hash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var cloud fi.Cloud
|
var cloud fi.Cloud
|
||||||
var caStore fi.CAStore
|
var caStore fi.CAStore
|
||||||
var secretStore fi.SecretStore
|
var secretStore fi.SecretStore
|
||||||
|
@ -143,7 +155,9 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
|
|
||||||
switch c.Target {
|
switch c.Target {
|
||||||
case "direct":
|
case "direct":
|
||||||
target = &local.LocalTarget{}
|
target = &local.LocalTarget{
|
||||||
|
CacheDir: c.CacheDir,
|
||||||
|
}
|
||||||
case "dryrun":
|
case "dryrun":
|
||||||
target = fi.NewDryRunTarget(out)
|
target = fi.NewDryRunTarget(out)
|
||||||
case "cloudinit":
|
case "cloudinit":
|
||||||
|
@ -159,7 +173,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
}
|
}
|
||||||
defer context.Close()
|
defer context.Close()
|
||||||
|
|
||||||
err = context.RunTasks(taskMap)
|
err = context.RunTasks(taskMap, MaxAttemptsWithNoProgress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Exitf("error running tasks: %v", err)
|
glog.Exitf("error running tasks: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,9 @@ type NodeUpConfig struct {
|
||||||
// TODO: Remove once everything is in containers?
|
// TODO: Remove once everything is in containers?
|
||||||
Assets []string `json:",omitempty"`
|
Assets []string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Images are a list of images we should preload
|
||||||
|
Images []*Image `json:"images,omitempty"`
|
||||||
|
|
||||||
// ClusterLocation is the VFS path to the cluster spec
|
// ClusterLocation is the VFS path to the cluster spec
|
||||||
ClusterLocation string `json:",omitempty"`
|
ClusterLocation string `json:",omitempty"`
|
||||||
|
|
||||||
|
@ -18,40 +21,16 @@ type NodeUpConfig struct {
|
||||||
// Technically this is redundant - it is in ClusterLocation, but this can serve as a cross-check,
|
// Technically this is redundant - it is in ClusterLocation, but this can serve as a cross-check,
|
||||||
// and it allows us to more easily identify the cluster, for example when we are deleting resources.
|
// and it allows us to more easily identify the cluster, for example when we are deleting resources.
|
||||||
ClusterName string `json:",omitempty"`
|
ClusterName string `json:",omitempty"`
|
||||||
|
|
||||||
|
// ProtokubeImage is the docker image to load for protokube (bootstrapping)
|
||||||
|
ProtokubeImage *Image `json:"protokubeImage"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Our client configuration structure
|
// Image is a docker image we should pre-load
|
||||||
// Wherever possible, we try to use the types & names in https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
type Image struct {
|
||||||
|
// Source is the URL from which we should download the image
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
|
|
||||||
//type NodeConfig struct {
|
// Hash is the hash of the file, to verify image integrity (even over http)
|
||||||
// Kubelet *KubeletConfig `json:",omitempty"`
|
Hash string `json:"hash,omitempty"`
|
||||||
// KubeProxy *KubeProxyConfig `json:",omitempty"`
|
}
|
||||||
// KubeControllerManager *KubeControllerManagerConfig `json:",omitempty"`
|
|
||||||
// KubeScheduler *KubeSchedulerConfig `json:",omitempty"`
|
|
||||||
// Docker *DockerConfig `json:",omitempty"`
|
|
||||||
// APIServer *APIServerConfig `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// DNS *DNSConfig `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// // NodeConfig can directly access a store of secrets, keys or configuration
|
|
||||||
// // (for example on S3) and then configure based on that
|
|
||||||
// // This supports (limited) dynamic reconfiguration also
|
|
||||||
// SecretStore string `json:",omitempty"`
|
|
||||||
// KeyStore string `json:",omitempty"`
|
|
||||||
// ConfigStore string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// KubeUser string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// Tags []string `json:",omitempty"`
|
|
||||||
// Assets []string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// MasterInternalName string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// // The DNS zone to use if configuring a cloud provided DNS zone
|
|
||||||
// DNSZone string `json:",omitempty"`
|
|
||||||
//
|
|
||||||
// // Deprecated in favor of KeyStore / SecretStore
|
|
||||||
// Tokens map[string]string `json:",omitempty"`
|
|
||||||
// Certificates map[string]*fi.Certificate `json:",omitempty"`
|
|
||||||
// PrivateKeys map[string]*fi.PrivateKey `json:",omitempty"`
|
|
||||||
//}
|
|
||||||
|
|
|
@ -124,7 +124,18 @@ func (r *Loader) newTaskHandler(prefix string, builder TaskBuilder) loader.Handl
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
task, err := builder(i.Name, contents, i.Meta)
|
name := i.Name
|
||||||
|
if strings.HasSuffix(name, ".template") {
|
||||||
|
name = strings.TrimSuffix(name, ".template")
|
||||||
|
expanded, err := r.executeTemplate(name, contents)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error executing template %q: %v", i.RelativePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contents = expanded
|
||||||
|
}
|
||||||
|
|
||||||
|
task, err := builder(name, contents, i.Meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building %s for %q: %v", i.Name, i.Path, err)
|
return fmt.Errorf("error building %s for %q: %v", i.Name, i.Path, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package local
|
||||||
import "k8s.io/kops/upup/pkg/fi"
|
import "k8s.io/kops/upup/pkg/fi"
|
||||||
|
|
||||||
type LocalTarget struct {
|
type LocalTarget struct {
|
||||||
|
CacheDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fi.Target = &LocalTarget{}
|
var _ fi.Target = &LocalTarget{}
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
package nodetasks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/hashing"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
||||||
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoadImageTask is responsible for downloading a docker image
|
||||||
|
type LoadImageTask struct {
|
||||||
|
Source string
|
||||||
|
Hash string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fi.Task = &LoadImageTask{}
|
||||||
|
|
||||||
|
func (t *LoadImageTask) String() string {
|
||||||
|
return fmt.Sprintf("LoadImageTask: %s", t.Source)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *LoadImageTask) Find(c *fi.Context) (*LoadImageTask, error) {
|
||||||
|
glog.Warningf("LoadImageTask checking if image present not yet implemented")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *LoadImageTask) Run(c *fi.Context) error {
|
||||||
|
return fi.DefaultDeltaRunMethod(e, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *LoadImageTask) CheckChanges(a, e, changes *LoadImageTask) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *LoadImageTask) RenderLocal(t *local.LocalTarget, a, e, changes *LoadImageTask) error {
|
||||||
|
hash, err := hashing.FromString(e.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
url := e.Source
|
||||||
|
|
||||||
|
localFile := path.Join(t.CacheDir, hash.String()+"_"+utils.SanitizeString(url))
|
||||||
|
_, err = fi.DownloadURL(url, localFile, hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the image into docker
|
||||||
|
args := []string{"docker", "load", "-i", localFile}
|
||||||
|
human := strings.Join(args, " ")
|
||||||
|
|
||||||
|
glog.Infof("running command %s", human)
|
||||||
|
cmd := exec.Command(args[0], args[1:]...)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error loading docker image with '%s': %v: %s", human, err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *LoadImageTask) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *LoadImageTask) error {
|
||||||
|
return fmt.Errorf("LoadImageTask::RenderCloudInit not implemented")
|
||||||
|
}
|
|
@ -41,7 +41,7 @@ func (p *Service) GetDependencies(tasks map[string]fi.Task) []fi.Task {
|
||||||
// We assume that services depend on basically everything
|
// We assume that services depend on basically everything
|
||||||
typeName := utils.BuildTypeName(reflect.TypeOf(v))
|
typeName := utils.BuildTypeName(reflect.TypeOf(v))
|
||||||
switch typeName {
|
switch typeName {
|
||||||
case "*CopyAssetTask", "*File", "*Package", "*Sysctl", "*UpdatePackages", "*User", "*Disk":
|
case "*CopyAssetTask", "*File", "*Package", "*Sysctl", "*UpdatePackages", "*UserTask", "*Disk":
|
||||||
deps = append(deps, v)
|
deps = append(deps, v)
|
||||||
case "*Service":
|
case "*Service":
|
||||||
// ignore
|
// ignore
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue