Merge branch 'master' into patch-1

This commit is contained in:
Leandro Repolho 2017-04-20 21:28:03 +10:00
commit 77bf343a5e
2447 changed files with 441814 additions and 4012 deletions

33
.gitmodules vendored
View File

@ -52,9 +52,6 @@
[submodule "_vendor/github.com/google/gofuzz"]
path = _vendor/github.com/google/gofuzz
url = https://github.com/google/gofuzz
[submodule "_vendor/github.com/golang/protobuf"]
path = _vendor/github.com/golang/protobuf
url = https://github.com/golang/protobuf
[submodule "_vendor/github.com/gogo/protobuf"]
path = _vendor/github.com/gogo/protobuf
url = https://github.com/gogo/protobuf
@ -259,3 +256,33 @@
[submodule "_vendor/github.com/pelletier/go-buffruneio"]
path = _vendor/github.com/pelletier/go-buffruneio
url = https://github.com/pelletier/go-buffruneio
[submodule "_vendor/github.com/coreos/etcd"]
path = _vendor/github.com/coreos/etcd
url = https://github.com/coreos/etcd
[submodule "_vendor/google.golang.org/grpc"]
path = _vendor/google.golang.org/grpc
url = https://github.com/grpc/grpc-go.git
[submodule "_vendor/github.com/elazarl/go-bindata-assetfs"]
path = _vendor/github.com/elazarl/go-bindata-assetfs
url = https://github.com/elazarl/go-bindata-assetfs
[submodule "_vendor/github.com/grpc-ecosystem"]
path = _vendor/github.com/grpc-ecosystem
url = https://github.com/grpc-ecosystem/grpc-gateway
[submodule "_vendor/gopkg.in/natefinch/lumberjack.v2"]
path = _vendor/gopkg.in/natefinch/lumberjack.v2
url = https://gopkg.in/natefinch/lumberjack.v2
[submodule "_vendor/github.com/golang/protobuf"]
path = _vendor/github.com/golang/protobuf
url = https://github.com/golang/protobuf
[submodule "_vendor/github.com/vmware/govmomi"]
path = _vendor/github.com/vmware/govmomi
url = https://github.com/vmware/govmomi.git
[submodule "_vendor/github.com/coreos/go-semver"]
path = _vendor/github.com/coreos/go-semver
url = https://github.com/coreos/go-semver.git
[submodule "_vendor/github.com/miekg/coredns"]
path = _vendor/github.com/miekg/coredns
url = https://github.com/miekg/coredns.git
[submodule "_vendor/github.com/miekg/dns"]
path = _vendor/github.com/miekg/dns
url = https://github.com/miekg/dns.git

View File

@ -23,16 +23,16 @@ GCS_URL=$(GCS_LOCATION:gs://%=https://storage.googleapis.com/%)
LATEST_FILE?=latest-ci.txt
GOPATH_1ST=$(shell echo ${GOPATH} | cut -d : -f 1)
UNIQUE:=$(shell date +%s)
GOVERSION=1.7.4
GOVERSION=1.7.5
# See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile
MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
# Keep in sync with upup/models/cloudup/resources/addons/dns-controller/
DNS_CONTROLLER_TAG=1.5.2
DNS_CONTROLLER_TAG=1.6.0
KOPS_RELEASE_VERSION=1.5.3
KOPS_CI_VERSION=1.6.0-alpha.0
KOPS_RELEASE_VERSION=1.6.0-alpha.2
KOPS_CI_VERSION=1.6.0-alpha.3
GITSHA := $(shell cd ${GOPATH_1ST}/src/k8s.io/kops; git describe --always)
@ -105,11 +105,14 @@ codegen: kops-gobindata
test:
go test k8s.io/kops/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/nodeup/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/upup/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/nodeup/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/protokube/... -args -v=1 -logtostderr
go test k8s.io/kops/dns-controller/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
go test k8s.io/kops/tests/... -args -v=1 -logtostderr
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
go test k8s.io/kops/channels/... -args -v=1 -logtostderr
go test k8s.io/kops/util/... -args -v=1 -logtostderr
crossbuild-nodeup:
@ -126,6 +129,8 @@ crossbuild:
GOOS=darwin GOARCH=amd64 go build -a ${EXTRA_BUILDFLAGS} -o .build/dist/darwin/amd64/kops -ldflags "${EXTRA_LDFLAGS} -X k8s.io/kops.Version=${VERSION} -X k8s.io/kops.GitVersion=${GITSHA}" k8s.io/kops/cmd/kops
GOOS=linux GOARCH=amd64 go build -a ${EXTRA_BUILDFLAGS} -o .build/dist/linux/amd64/kops -ldflags "${EXTRA_LDFLAGS} -X k8s.io/kops.Version=${VERSION} -X k8s.io/kops.GitVersion=${GITSHA}" k8s.io/kops/cmd/kops
crossbuild-in-docker:
docker pull golang:${GOVERSION} # Keep golang image up to date
docker run --name=kops-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${MAKEDIR}:/go/src/k8s.io/kops golang:${GOVERSION} make -f /go/src/k8s.io/kops/Makefile crossbuild
@ -170,7 +175,7 @@ gcs-publish-ci: gcs-upload
gsutil -h "Cache-Control:private, max-age=0, no-transform" cp .build/upload/${LATEST_FILE} ${GCS_LOCATION}
gen-cli-docs:
KOPS_STATE_STORE= kops genhelpdocs --out docs/cli
@kops genhelpdocs --out docs/cli
# Will always push a linux-based build up to the server
push: crossbuild-nodeup
@ -183,7 +188,8 @@ push-aws-dry: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8
push-gce-run: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=metadata://gce/config --v=8
ssh ${TARGET} sudo cp /tmp/nodeup /home/kubernetes/bin/nodeup
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /home/kubernetes/bin/nodeup --conf=/var/lib/toolbox/kubernetes-install/kube_env.yaml --v=8
# -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password
push-aws-run: push
@ -321,6 +327,15 @@ channels: channels-gocode
channels-gocode:
go install ${EXTRA_BUILDFLAGS} -ldflags "-X k8s.io/kops.Version=${VERSION} ${EXTRA_LDFLAGS}" k8s.io/kops/channels/cmd/channels
# --------------------------------------------------
# release tasks
release-tag:
git tag ${KOPS_RELEASE_VERSION}
release-github:
shipbot -tag ${KOPS_RELEASE_VERSION} -config .shipbot.yaml
# --------------------------------------------------
# API / embedding examples
@ -341,3 +356,20 @@ apimachinery:
#cd pkg/apis/kops/v1alpha2/ && ~/k8s/bin/codecgen -d 1234 -o types.generated.go instancegroup.go cluster.go federation.go
#cd pkg/apis/kops/v1alpha1/ && ~/k8s/bin/codecgen -d 1234 -o types.generated.go instancegroup.go cluster.go federation.go
#cd pkg/apis/kops/ && ~/k8s/bin/codecgen -d 1234 -o types.generated.go instancegroup.go cluster.go federation.go
# -----------------------------------------------------
# kops-server
kops-server-docker-compile:
GOOS=linux GOARCH=amd64 go build -a ${EXTRA_BUILDFLAGS} -o .build/dist/linux/amd64/kops-server -ldflags "${EXTRA_LDFLAGS} -X k8s.io/kops-server.Version=${VERSION} -X k8s.io/kops-server.GitVersion=${GITSHA}" k8s.io/kops/cmd/kops-server
kops-server-build:
# Compile the API binary in linux, and copy to local filesystem
docker pull golang:${GOVERSION}
docker run --name=kops-server-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${GOPATH}/src:/go/src -v ${MAKEDIR}:/go/src/k8s.io/kops golang:${GOVERSION} make -f /go/src/k8s.io/kops/Makefile kops-server-docker-compile
docker cp kops-server-build-${UNIQUE}:/go/.build .
docker build -t ${DOCKER_REGISTRY}/kops-server:latest -f images/kops-server/Dockerfile .
kops-server-push: kops-server-build
docker push ${DOCKER_REGISTRY}/kops-server:latest

1
_vendor/github.com/coreos/etcd generated Submodule

@ -0,0 +1 @@
Subproject commit cc198e22d3b8fd7ec98304c95e68ee375be54589

1
_vendor/github.com/coreos/go-semver generated Submodule

@ -0,0 +1 @@
Subproject commit 5e3acbb5668c4c3deb4842615c4098eb61fb6b1e

@ -0,0 +1 @@
Subproject commit 30f82fa23fd844bd5bb1e5f216db87fd77b5eb43

@ -1 +1 @@
Subproject commit 8616e8ee5e20a1704615e6c8d7afcdac06087a67
Subproject commit c9c7427a2a70d2eb3bafa0ab2dc163e45f143317

1
_vendor/github.com/grpc-ecosystem generated Submodule

@ -0,0 +1 @@
Subproject commit 04870f0741c24d2bfc76a7c7db112f4e107e9ada

1
_vendor/github.com/miekg/coredns generated Submodule

@ -0,0 +1 @@
Subproject commit 757f49d8ff2687d289468b00835f360614357252

1
_vendor/github.com/miekg/dns generated Submodule

@ -0,0 +1 @@
Subproject commit 25ac7f171497271bc74ad3c6b5e1f86b4bab54fa

1
_vendor/github.com/vmware/govmomi generated Submodule

@ -0,0 +1 @@
Subproject commit 2d7d7b3702fddc23a76ac283c3a3d56bb0375e62

1
_vendor/google.golang.org/grpc generated Submodule

@ -0,0 +1 @@
Subproject commit 231b4cfea0e79843053a33f5fe90bd4d84b23cd3

@ -0,0 +1 @@
Subproject commit dd45e6a67c53f673bb49ca8a001fd3a63ceb640e

View File

@ -15,6 +15,7 @@ spec:
labels:
k8s-app: cluster-autoscaler
annotations:
# For 1.6, we keep the old tolerations in case of a downgrade to 1.5
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated", "value":"master"}]'
spec:
containers:
@ -43,4 +44,7 @@ spec:
hostPath:
path: {{SSL_CERT_PATH}}
nodeSelector:
kubernetes.io/role: master
node-role.kubernetes.io/master: ""
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule

View File

@ -14,4 +14,8 @@ spec:
- version: 1.5.0
selector:
k8s-addon: kubernetes-dashboard.addons.k8s.io
manifest: v1.5.0.yaml
manifest: v1.5.0.yaml
- version: 1.6.0
selector:
k8s-addon: kubernetes-dashboard.addons.k8s.io
manifest: v1.6.0.yaml

View File

@ -0,0 +1,62 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-addon: kubernetes-dashboard.addons.k8s.io
k8s-app: kubernetes-dashboard
version: v1.6.0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-addon: kubernetes-dashboard.addons.k8s.io
k8s-app: kubernetes-dashboard
version: v1.6.0
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 9090
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-addon: kubernetes-dashboard.addons.k8s.io
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090

View File

@ -14,9 +14,7 @@ The project is created by wearemolecule, and maintained at
### Deploy To Cluster
```
# Version 1.2.0
# https://github.com/wearemolecule/route53-kubernetes/tree/v1.2.0
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.2.0.yml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.3.0.yml
```
**Important:**

View File

@ -7,3 +7,7 @@ spec:
selector:
k8s-addon: route53-mapper.addons.k8s.io
manifest: v1.2.0.yaml
- version: 1.3.0
selector:
k8s-addon: route53-mapper.addons.k8s.io
manifest: v1.3.0.yaml

View File

@ -0,0 +1,26 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: route53-mapper
namespace: kube-system
labels:
app: route53-mapper
k8s-addon: route53-mapper.addons.k8s.io
spec:
replicas: 1
selector:
matchLabels:
app: route53-mapper
template:
metadata:
labels:
app: route53-mapper
annotations:
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated", "value":"master"}]'
spec:
nodeSelector:
kubernetes.io/role: master
containers:
- image: quay.io/molecule/route53-kubernetes:v1.3.0
name: route53-mapper

View File

@ -7,18 +7,24 @@ spec:
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
providerID: aws
kubernetesVersion: ">=1.5.0"
- providerID: gce
name: "cos-cloud/cos-stable-56-9000-84-2"
cluster:
kubernetesVersion: v1.5.4
kubernetesVersion: v1.5.6
networking:
kubenet: {}
kubernetesVersions:
- range: ">=1.5.0"
recommendedVersion: 1.5.4
recommendedVersion: 1.5.6
requiredVersion: 1.5.1
- range: "<1.5.0"
recommendedVersion: 1.4.9
requiredVersion: 1.4.2
kopsVersions:
- range: ">=1.6.0-alpha.1"
#recommendedVersion: 1.6.0
#requiredVersion: 1.6.0
kubernetesVersion: 1.6.0
- range: ">=1.5.0-alpha1"
recommendedVersion: 1.5.1
#requiredVersion: 1.5.1

View File

@ -18,16 +18,14 @@ package main
import (
"fmt"
"k8s.io/kops/channels/pkg/cmd"
"os"
)
func main() {
Execute()
}
// exitWithError will terminate execution with an error result
// It prints the error to stderr and exits with a non-zero exit code
func exitWithError(err error) {
fmt.Fprintf(os.Stderr, "\n%v\n", err)
os.Exit(1)
f := &cmd.DefaultFactory{}
if err := cmd.Execute(f, os.Stdout); err != nil {
fmt.Fprintf(os.Stderr, "\n%v\n", err)
os.Exit(1)
}
}

View File

@ -45,6 +45,17 @@ type AddonSpec struct {
// Version is a semver version
Version *string `json:"version,omitempty"`
// Manifest is a strings containing the URL to the manifest that should be applied
// Manifest is the URL to the manifest that should be applied
Manifest *string `json:"manifest,omitempty"`
// KubernetesVersion is a semver version range on which this version of the addon can be applied
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Id is an optional value which can be used to force a refresh even if the Version matches
// This is useful for when we have two manifests expressing the same addon version for two
// different kubernetes api versions. For example, we might label the 1.5 version "k8s-1.5"
// and the 1.6 version "k8s-1.6". Both would have the same Version, determined by the
// version of the software we are packaging. But we always want to reinstall when we
// switch kubernetes versions.
Id string `json:"id,omitempty"`
}

View File

@ -25,6 +25,7 @@ import (
"net/url"
)
// Addon is a wrapper around a single version of an addon
type Addon struct {
Name string
ChannelName string
@ -32,16 +33,42 @@ type Addon struct {
Spec *api.AddonSpec
}
// AddonUpdate holds data about a proposed update to an addon
type AddonUpdate struct {
Name string
ExistingVersion *ChannelVersion
NewVersion *ChannelVersion
}
// AddonMenu is a collection of addons, with helpers for computing the latest versions
type AddonMenu struct {
Addons map[string]*Addon
}
func NewAddonMenu() *AddonMenu {
return &AddonMenu{
Addons: make(map[string]*Addon),
}
}
func (m *AddonMenu) MergeAddons(o *AddonMenu) {
for k, v := range o.Addons {
existing := m.Addons[k]
if existing == nil {
m.Addons[k] = v
} else {
if existing.ChannelVersion().replaces(v.ChannelVersion()) {
m.Addons[k] = v
}
}
}
}
func (a *Addon) ChannelVersion() *ChannelVersion {
return &ChannelVersion{
Channel: &a.ChannelName,
Version: a.Spec.Version,
Id: a.Spec.Id,
}
}
@ -67,7 +94,7 @@ func (a *Addon) GetRequiredUpdates(k8sClient kubernetes.Interface) (*AddonUpdate
return nil, err
}
if existingVersion != nil && !newVersion.Replaces(existingVersion) {
if existingVersion != nil && !newVersion.replaces(existingVersion) {
return nil, nil
}

View File

@ -18,6 +18,7 @@ package channels
import (
"fmt"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/kops/channels/pkg/api"
"k8s.io/kops/upup/pkg/fi/utils"
@ -58,28 +59,29 @@ func ParseAddons(name string, location *url.URL, data []byte) (*Addons, error) {
return &Addons{ChannelName: name, ChannelLocation: *location, APIObject: apiObject}, nil
}
func (a *Addons) GetCurrent() ([]*Addon, error) {
all, err := a.All()
func (a *Addons) GetCurrent(kubernetesVersion semver.Version) (*AddonMenu, error) {
all, err := a.wrapInAddons()
if err != nil {
return nil, err
}
specs := make(map[string]*Addon)
menu := NewAddonMenu()
for _, addon := range all {
if !addon.matches(kubernetesVersion) {
continue
}
name := addon.Name
existing := specs[name]
if existing == nil || addon.ChannelVersion().Replaces(existing.ChannelVersion()) {
specs[name] = addon
existing := menu.Addons[name]
if existing == nil || addon.ChannelVersion().replaces(existing.ChannelVersion()) {
menu.Addons[name] = addon
}
}
var addons []*Addon
for _, addon := range specs {
addons = append(addons, addon)
}
return addons, nil
return menu, nil
}
func (a *Addons) All() ([]*Addon, error) {
func (a *Addons) wrapInAddons() ([]*Addon, error) {
var addons []*Addon
for _, s := range a.APIObject.Spec.Addons {
name := a.APIObject.ObjectMeta.Name
@ -98,3 +100,19 @@ func (a *Addons) All() ([]*Addon, error) {
}
return addons, nil
}
func (s *Addon) matches(kubernetesVersion semver.Version) bool {
if s.Spec.KubernetesVersion != "" {
versionRange, err := semver.ParseRange(s.Spec.KubernetesVersion)
if err != nil {
glog.Warningf("unable to parse KubernetesVersion %q; skipping", s.Spec.KubernetesVersion)
return false
}
if !versionRange(kubernetesVersion) {
glog.V(4).Infof("Skipping version range %q that does not match current version %s", s.Spec.KubernetesVersion, kubernetesVersion)
return false
}
}
return true
}

View File

@ -0,0 +1,154 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package channels
import (
"github.com/blang/semver"
"k8s.io/kops/channels/pkg/api"
"testing"
)
func Test_Filtering(t *testing.T) {
grid := []struct {
Input api.AddonSpec
KubernetesVersion string
Expected bool
}{
{
Input: api.AddonSpec{
KubernetesVersion: ">=1.6.0",
},
KubernetesVersion: "1.6.0",
Expected: true,
},
{
Input: api.AddonSpec{
KubernetesVersion: "<1.6.0",
},
KubernetesVersion: "1.6.0",
Expected: false,
},
{
Input: api.AddonSpec{
KubernetesVersion: ">=1.6.0",
},
KubernetesVersion: "1.5.9",
Expected: false,
},
{
Input: api.AddonSpec{
KubernetesVersion: ">=1.4.0 <1.6.0",
},
KubernetesVersion: "1.5.9",
Expected: true,
},
{
Input: api.AddonSpec{
KubernetesVersion: ">=1.4.0 <1.6.0",
},
KubernetesVersion: "1.6.0",
Expected: false,
},
}
for _, g := range grid {
k8sVersion := semver.MustParse(g.KubernetesVersion)
addon := &Addon{
Spec: &g.Input,
}
actual := addon.matches(k8sVersion)
if actual != g.Expected {
t.Errorf("unexpected result from %v, %s. got %v", g.Input.KubernetesVersion, g.KubernetesVersion, actual)
}
}
}
func Test_Replacement(t *testing.T) {
grid := []struct {
Old *ChannelVersion
New *ChannelVersion
Replaces bool
}{
// With no id, update iff newer semver
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: ""},
New: &ChannelVersion{Version: s("1.0.0"), Id: ""},
Replaces: false,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: ""},
New: &ChannelVersion{Version: s("1.0.1"), Id: ""},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.0.1"), Id: ""},
New: &ChannelVersion{Version: s("1.0.0"), Id: ""},
Replaces: false,
},
{
Old: &ChannelVersion{Version: s("1.1.0"), Id: ""},
New: &ChannelVersion{Version: s("1.1.1"), Id: ""},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.1.1"), Id: ""},
New: &ChannelVersion{Version: s("1.1.0"), Id: ""},
Replaces: false,
},
// With id, update if different id and same version, otherwise follow semver
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
New: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
Replaces: false,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
New: &ChannelVersion{Version: s("1.0.0"), Id: "b"},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "b"},
New: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
Replaces: true,
},
{
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
Replaces: true,
},
}
for _, g := range grid {
actual := g.New.replaces(g.Old)
if actual != g.Replaces {
t.Errorf("unexpected result from %v -> %v, expect %t. actual %v", g.Old, g.New, g.Replaces, actual)
}
}
}
func s(v string) *string {
return &v
}

View File

@ -38,6 +38,7 @@ type Channel struct {
type ChannelVersion struct {
Version *string `json:"version,omitempty"`
Channel *string `json:"channel,omitempty"`
Id string `json:"id,omitempty"`
}
func stringValue(s *string) string {
@ -48,7 +49,11 @@ func stringValue(s *string) string {
}
func (c *ChannelVersion) String() string {
return "Version=" + stringValue(c.Version) + " Channel=" + stringValue(c.Channel)
s := "Version=" + stringValue(c.Version) + " Channel=" + stringValue(c.Channel)
if c.Id != "" {
s += " Id=" + c.Id
}
return s
}
func ParseChannelVersion(s string) (*ChannelVersion, error) {
@ -91,7 +96,7 @@ func (c *Channel) AnnotationName() string {
return AnnotationPrefix + c.Name
}
func (c *ChannelVersion) Replaces(existing *ChannelVersion) bool {
func (c *ChannelVersion) replaces(existing *ChannelVersion) bool {
if existing.Version != nil {
if c.Version == nil {
return false
@ -106,13 +111,25 @@ func (c *ChannelVersion) Replaces(existing *ChannelVersion) bool {
glog.Warningf("error parsing existing version %q", *existing.Version)
return true
}
return cVersion.GT(existingVersion)
if cVersion.LT(existingVersion) {
return false
} else if cVersion.GT(existingVersion) {
return true
} else {
// Same version; check ids
if c.Id == existing.Id {
return false
} else {
glog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
}
}
}
glog.Warningf("ChannelVersion did not have a version; can't perform real version check")
if c.Version == nil {
return false
}
return true
}

View File

@ -14,18 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package main
package cmd
import (
"github.com/spf13/cobra"
"io"
)
// applyCmd represents the apply command
var applyCmd = &cobra.Command{
Use: "apply",
Short: "apply resources from a channel",
}
func NewCmdApply(f Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "apply",
Short: "apply resources from a channel",
}
func init() {
rootCommand.AddCommand(applyCmd)
// create subcommands
cmd.AddCommand(NewCmdApplyChannel(f, out))
return cmd
}

View File

@ -14,11 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package main
package cmd
import (
"fmt"
"github.com/blang/semver"
"github.com/spf13/cobra"
"io"
"k8s.io/kops/channels/pkg/channels"
"k8s.io/kops/util/pkg/tables"
"net/url"
@ -26,38 +28,54 @@ import (
"strings"
)
type ApplyChannelCmd struct {
type ApplyChannelOptions struct {
Yes bool
Files []string
}
var applyChannel ApplyChannelCmd
func NewCmdApplyChannel(f Factory, out io.Writer) *cobra.Command {
var options ApplyChannelOptions
func init() {
cmd := &cobra.Command{
Use: "channel",
Short: "Apply channel",
Run: func(cmd *cobra.Command, args []string) {
err := applyChannel.Run(args)
if err != nil {
exitWithError(err)
}
RunE: func(cmd *cobra.Command, args []string) error {
return RunApplyChannel(f, out, &options, args)
},
}
cmd.Flags().BoolVar(&applyChannel.Yes, "yes", false, "Apply update")
cmd.Flags().StringSliceVar(&applyChannel.Files, "f", []string{}, "Apply from a local file")
cmd.Flags().BoolVar(&options.Yes, "yes", false, "Apply update")
cmd.Flags().StringSliceVarP(&options.Files, "filename", "f", []string{}, "Apply from a local file")
applyCmd.AddCommand(cmd)
return cmd
}
func (c *ApplyChannelCmd) Run(args []string) error {
k8sClient, err := rootCommand.KubernetesClient()
func RunApplyChannel(f Factory, out io.Writer, options *ApplyChannelOptions, args []string) error {
k8sClient, err := f.KubernetesClient()
if err != nil {
return err
}
var addons []*channels.Addon
kubernetesVersionInfo, err := k8sClient.Discovery().ServerVersion()
if err != nil {
return fmt.Errorf("error querying kubernetes version: %v", err)
}
//kubernetesVersion, err := semver.Parse(kubernetesVersionInfo.Major + "." + kubernetesVersionInfo.Minor + ".0")
//if err != nil {
// return fmt.Errorf("cannot parse kubernetes version %q", kubernetesVersionInfo.Major+"."+kubernetesVersionInfo.Minor + ".0")
//}
kubernetesVersion, err := semver.ParseTolerant(kubernetesVersionInfo.GitVersion)
if err != nil {
return fmt.Errorf("cannot parse kubernetes version %q", kubernetesVersionInfo.GitVersion)
}
// Remove Pre and Patch, as they make semver comparisons impractical
kubernetesVersion.Pre = nil
menu := channels.NewAddonMenu()
for _, name := range args {
location, err := url.Parse(name)
if err != nil {
@ -80,14 +98,14 @@ func (c *ApplyChannelCmd) Run(args []string) error {
return fmt.Errorf("error loading channel %q: %v", location, err)
}
current, err := o.GetCurrent()
current, err := o.GetCurrent(kubernetesVersion)
if err != nil {
return fmt.Errorf("error processing latest versions in %q: %v", location, err)
}
addons = append(addons, current...)
menu.MergeAddons(current)
}
for _, f := range c.Files {
for _, f := range options.Files {
location, err := url.Parse(f)
if err != nil {
return fmt.Errorf("unable to parse argument %q as url", f)
@ -108,16 +126,16 @@ func (c *ApplyChannelCmd) Run(args []string) error {
return fmt.Errorf("error loading file %q: %v", f, err)
}
current, err := o.GetCurrent()
current, err := o.GetCurrent(kubernetesVersion)
if err != nil {
return fmt.Errorf("error processing latest versions in %q: %v", f, err)
}
addons = append(addons, current...)
menu.MergeAddons(current)
}
var updates []*channels.AddonUpdate
var needUpdates []*channels.Addon
for _, addon := range addons {
for _, addon := range menu.Addons {
// TODO: Cache lookups to prevent repeated lookups?
update, err := addon.GetRequiredUpdates(k8sClient)
if err != nil {
@ -165,7 +183,7 @@ func (c *ApplyChannelCmd) Run(args []string) error {
}
}
if !c.Yes {
if !options.Yes {
fmt.Printf("\nMust specify --yes to update\n")
return nil
}
@ -178,7 +196,7 @@ func (c *ApplyChannelCmd) Run(args []string) error {
// Could have been a concurrent request
if update != nil {
if update.NewVersion.Version != nil {
fmt.Printf("Updated %q to %d\n", update.Name, *update.NewVersion)
fmt.Printf("Updated %q to %s\n", update.Name, *update.NewVersion.Version)
} else {
fmt.Printf("Updated %q\n", update.Name)
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
type Factory interface {
KubernetesClient() (kubernetes.Interface, error)
}
type DefaultFactory struct {
kubernetesClient kubernetes.Interface
}
var _ Factory = &DefaultFactory{}
func (f *DefaultFactory) KubernetesClient() (kubernetes.Interface, error) {
if f.kubernetesClient == nil {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
configOverrides := &clientcmd.ConfigOverrides{
ClusterDefaults: clientcmd.ClusterDefaults,
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := kubeConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings: %v", err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("cannot build kube client: %v", err)
}
f.kubernetesClient = k8sClient
}
return f.kubernetesClient, nil
}

View File

@ -14,36 +14,22 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package main
package cmd
import (
"github.com/spf13/cobra"
"io"
)
// GetCmd represents the get command
type GetCmd struct {
output string
cobraCommand *cobra.Command
}
var getCmd = GetCmd{
cobraCommand: &cobra.Command{
func NewCmdGet(f Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "get",
SuggestFor: []string{"list"},
Short: "list or get objects",
},
}
const (
OutputYaml = "yaml"
OutputTable = "table"
)
func init() {
cmd := getCmd.cobraCommand
rootCommand.AddCommand(cmd)
cmd.PersistentFlags().StringVarP(&getCmd.output, "output", "o", OutputTable, "output format. One of: table, yaml")
}
// create subcommands
cmd.AddCommand(NewCmdGetAddons(f, out))
return cmd
}

View File

@ -14,11 +14,28 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package main
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/channels/pkg/channels"
@ -26,26 +43,23 @@ import (
"os"
)
type GetAddonsCmd struct {
type GetAddonsOptions struct {
}
var getAddonsCmd GetAddonsCmd
func NewCmdGetAddons(f Factory, out io.Writer) *cobra.Command {
var options GetAddonsOptions
func init() {
cmd := &cobra.Command{
Use: "addons",
Aliases: []string{"addon"},
Short: "get addons",
Long: `List or get addons.`,
Run: func(cmd *cobra.Command, args []string) {
err := getAddonsCmd.Run(args)
if err != nil {
exitWithError(err)
}
RunE: func(cmd *cobra.Command, args []string) error {
return RunGetAddons(f, out, &options)
},
}
getCmd.cobraCommand.AddCommand(cmd)
return cmd
}
type addonInfo struct {
@ -54,8 +68,8 @@ type addonInfo struct {
Namespace *v1.Namespace
}
func (c *GetAddonsCmd) Run(args []string) error {
k8sClient, err := rootCommand.KubernetesClient()
func RunGetAddons(f Factory, out io.Writer, options *GetAddonsOptions) error {
k8sClient, err := f.KubernetesClient()
if err != nil {
return err
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package main
package cmd
import (
goflag "flag"
@ -22,48 +22,44 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"io"
)
type RootCmd struct {
type CmdRootOptions struct {
configFile string
cobraCommand *cobra.Command
}
var rootCommand = RootCmd{
cobraCommand: &cobra.Command{
Use: "channels",
Short: "channels applies software from a channel",
},
}
func Execute() {
goflag.Set("logtostderr", "true")
goflag.CommandLine.Parse([]string{})
if err := rootCommand.cobraCommand.Execute(); err != nil {
exitWithError(err)
}
}
func init() {
func Execute(f Factory, out io.Writer) error {
cobra.OnInitialize(initConfig)
cmd := rootCommand.cobraCommand
cmd := NewCmdRoot(f, out)
goflag.Set("logtostderr", "true")
goflag.CommandLine.Parse([]string{})
return cmd.Execute()
}
func NewCmdRoot(f Factory, out io.Writer) *cobra.Command {
options := &CmdRootOptions{}
cmd := &cobra.Command{
Use: "channels",
Short: "channels applies software from a channel",
}
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.channels.yaml)")
cmd.PersistentFlags().StringVar(&options.configFile, "config", "", "config file (default is $HOME/.channels.yaml)")
// create subcommands
cmd.AddCommand(NewCmdApply(f, out))
cmd.AddCommand(NewCmdGet(f, out))
return cmd
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if rootCommand.configFile != "" {
// enable ability to specify config file via flag
viper.SetConfigFile(rootCommand.configFile)
}
viper.SetConfigName(".channels") // name of config file (without extension)
viper.AddConfigPath("$HOME") // adding home directory as first search path
viper.AutomaticEnv() // read in environment variables that match
@ -73,28 +69,3 @@ func initConfig() {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func (c *RootCmd) AddCommand(cmd *cobra.Command) {
c.cobraCommand.AddCommand(cmd)
}
func (c *RootCmd) KubernetesClient() (kubernetes.Interface, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
configOverrides := &clientcmd.ConfigOverrides{
ClusterDefaults: clientcmd.ClusterDefaults,
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := kubeConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings: %v", err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("cannot build kube client: %v", err)
}
return k8sClient, err
}

7
charts/kops/Chart.yaml Normal file
View File

@ -0,0 +1,7 @@
name: kops
home: https://github.com/kubernetes/kops
version: 1.0.0
description: Kubernetes kops API server
maintainers:
- name: Kris Nova
email: kris@nivenly.com

View File

@ -0,0 +1,16 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kops-api-server
spec:
replicas: 1
template:
metadata:
labels:
app: kops
spec:
containers:
- name: kops
image: krisnova/kops:latest
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kops-etcd
spec:
replicas: 1
template:
metadata:
labels:
app: etcd
spec:
containers:
- name: etcd
image: quay.io/coreos/etcd:latest
ports:
- containerPort: 2379

5
charts/kops/values.yaml Normal file
View File

@ -0,0 +1,5 @@
# kops global configuration
global:
# This is just a dummy directive for now..
storage_backend: "etcd"

View File

@ -17,13 +17,15 @@ limitations under the License.
package mockautoscaling
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
)
func (m *MockAutoscaling) AttachInstances(input *autoscaling.AttachInstancesInput) (*autoscaling.AttachInstancesOutput, error) {
for _, group := range m.Groups {
if group.AutoScalingGroupName == input.AutoScalingGroupName {
if aws.StringValue(group.AutoScalingGroupName) == aws.StringValue(input.AutoScalingGroupName) {
for _, instanceID := range input.InstanceIds {
group.Instances = append(group.Instances, &autoscaling.Instance{InstanceId: instanceID})
}
@ -44,13 +46,36 @@ func (m *MockAutoscaling) CreateAutoScalingGroup(input *autoscaling.CreateAutoSc
return nil, nil
}
func (m *MockAutoscaling) DescribeAutoScalingGroups(input *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) {
if len(input.AutoScalingGroupNames) == 0 {
return &autoscaling.DescribeAutoScalingGroupsOutput{
AutoScalingGroups: m.Groups,
}, nil
}
groups := []*autoscaling.Group{}
for _, group := range m.Groups {
for _, inputGroupName := range input.AutoScalingGroupNames {
if aws.StringValue(group.AutoScalingGroupName) == aws.StringValue(inputGroupName) {
groups = append(groups, group)
}
}
}
return &autoscaling.DescribeAutoScalingGroupsOutput{
AutoScalingGroups: groups,
}, nil
}
func (m *MockAutoscaling) TerminateInstanceInAutoScalingGroup(input *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) {
for _, group := range m.Groups {
for i := range group.Instances {
if aws.StringValue(group.Instances[i].InstanceId) == aws.StringValue(input.InstanceId) {
group.Instances = append(group.Instances[:i], group.Instances[i+1:]...)
return nil, nil
}
}
}
return nil, nil
return nil, fmt.Errorf("Instance not found")
}

View File

@ -23,562 +23,557 @@ import (
"github.com/aws/aws-sdk-go/service/autoscaling"
)
func AttachInstancesRequest(*autoscaling.AttachInstancesInput) (*request.Request, *autoscaling.AttachInstancesOutput) {
func (m *MockAutoscaling) AttachInstancesRequest(*autoscaling.AttachInstancesInput) (*request.Request, *autoscaling.AttachInstancesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func AttachLoadBalancerTargetGroupsRequest(*autoscaling.AttachLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.AttachLoadBalancerTargetGroupsOutput) {
func (m *MockAutoscaling) AttachLoadBalancerTargetGroupsRequest(*autoscaling.AttachLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.AttachLoadBalancerTargetGroupsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func AttachLoadBalancerTargetGroups(*autoscaling.AttachLoadBalancerTargetGroupsInput) (*autoscaling.AttachLoadBalancerTargetGroupsOutput, error) {
func (m *MockAutoscaling) AttachLoadBalancerTargetGroups(*autoscaling.AttachLoadBalancerTargetGroupsInput) (*autoscaling.AttachLoadBalancerTargetGroupsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func AttachLoadBalancersRequest(*autoscaling.AttachLoadBalancersInput) (*request.Request, *autoscaling.AttachLoadBalancersOutput) {
func (m *MockAutoscaling) AttachLoadBalancersRequest(*autoscaling.AttachLoadBalancersInput) (*request.Request, *autoscaling.AttachLoadBalancersOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func AttachLoadBalancers(*autoscaling.AttachLoadBalancersInput) (*autoscaling.AttachLoadBalancersOutput, error) {
func (m *MockAutoscaling) AttachLoadBalancers(*autoscaling.AttachLoadBalancersInput) (*autoscaling.AttachLoadBalancersOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func CompleteLifecycleActionRequest(*autoscaling.CompleteLifecycleActionInput) (*request.Request, *autoscaling.CompleteLifecycleActionOutput) {
func (m *MockAutoscaling) CompleteLifecycleActionRequest(*autoscaling.CompleteLifecycleActionInput) (*request.Request, *autoscaling.CompleteLifecycleActionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func CompleteLifecycleAction(*autoscaling.CompleteLifecycleActionInput) (*autoscaling.CompleteLifecycleActionOutput, error) {
func (m *MockAutoscaling) CompleteLifecycleAction(*autoscaling.CompleteLifecycleActionInput) (*autoscaling.CompleteLifecycleActionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func CreateAutoScalingGroupRequest(*autoscaling.CreateAutoScalingGroupInput) (*request.Request, *autoscaling.CreateAutoScalingGroupOutput) {
func (m *MockAutoscaling) CreateAutoScalingGroupRequest(*autoscaling.CreateAutoScalingGroupInput) (*request.Request, *autoscaling.CreateAutoScalingGroupOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func CreateLaunchConfigurationRequest(*autoscaling.CreateLaunchConfigurationInput) (*request.Request, *autoscaling.CreateLaunchConfigurationOutput) {
func (m *MockAutoscaling) CreateLaunchConfigurationRequest(*autoscaling.CreateLaunchConfigurationInput) (*request.Request, *autoscaling.CreateLaunchConfigurationOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func CreateLaunchConfiguration(*autoscaling.CreateLaunchConfigurationInput) (*autoscaling.CreateLaunchConfigurationOutput, error) {
func (m *MockAutoscaling) CreateLaunchConfiguration(*autoscaling.CreateLaunchConfigurationInput) (*autoscaling.CreateLaunchConfigurationOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func CreateOrUpdateTagsRequest(*autoscaling.CreateOrUpdateTagsInput) (*request.Request, *autoscaling.CreateOrUpdateTagsOutput) {
func (m *MockAutoscaling) CreateOrUpdateTagsRequest(*autoscaling.CreateOrUpdateTagsInput) (*request.Request, *autoscaling.CreateOrUpdateTagsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func CreateOrUpdateTags(*autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) {
func (m *MockAutoscaling) CreateOrUpdateTags(*autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteAutoScalingGroupRequest(*autoscaling.DeleteAutoScalingGroupInput) (*request.Request, *autoscaling.DeleteAutoScalingGroupOutput) {
func (m *MockAutoscaling) DeleteAutoScalingGroupRequest(*autoscaling.DeleteAutoScalingGroupInput) (*request.Request, *autoscaling.DeleteAutoScalingGroupOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteAutoScalingGroup(*autoscaling.DeleteAutoScalingGroupInput) (*autoscaling.DeleteAutoScalingGroupOutput, error) {
func (m *MockAutoscaling) DeleteAutoScalingGroup(*autoscaling.DeleteAutoScalingGroupInput) (*autoscaling.DeleteAutoScalingGroupOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteLaunchConfigurationRequest(*autoscaling.DeleteLaunchConfigurationInput) (*request.Request, *autoscaling.DeleteLaunchConfigurationOutput) {
func (m *MockAutoscaling) DeleteLaunchConfigurationRequest(*autoscaling.DeleteLaunchConfigurationInput) (*request.Request, *autoscaling.DeleteLaunchConfigurationOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteLaunchConfiguration(*autoscaling.DeleteLaunchConfigurationInput) (*autoscaling.DeleteLaunchConfigurationOutput, error) {
func (m *MockAutoscaling) DeleteLaunchConfiguration(*autoscaling.DeleteLaunchConfigurationInput) (*autoscaling.DeleteLaunchConfigurationOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteLifecycleHookRequest(*autoscaling.DeleteLifecycleHookInput) (*request.Request, *autoscaling.DeleteLifecycleHookOutput) {
func (m *MockAutoscaling) DeleteLifecycleHookRequest(*autoscaling.DeleteLifecycleHookInput) (*request.Request, *autoscaling.DeleteLifecycleHookOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteLifecycleHook(*autoscaling.DeleteLifecycleHookInput) (*autoscaling.DeleteLifecycleHookOutput, error) {
func (m *MockAutoscaling) DeleteLifecycleHook(*autoscaling.DeleteLifecycleHookInput) (*autoscaling.DeleteLifecycleHookOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteNotificationConfigurationRequest(*autoscaling.DeleteNotificationConfigurationInput) (*request.Request, *autoscaling.DeleteNotificationConfigurationOutput) {
func (m *MockAutoscaling) DeleteNotificationConfigurationRequest(*autoscaling.DeleteNotificationConfigurationInput) (*request.Request, *autoscaling.DeleteNotificationConfigurationOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteNotificationConfiguration(*autoscaling.DeleteNotificationConfigurationInput) (*autoscaling.DeleteNotificationConfigurationOutput, error) {
func (m *MockAutoscaling) DeleteNotificationConfiguration(*autoscaling.DeleteNotificationConfigurationInput) (*autoscaling.DeleteNotificationConfigurationOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeletePolicyRequest(*autoscaling.DeletePolicyInput) (*request.Request, *autoscaling.DeletePolicyOutput) {
func (m *MockAutoscaling) DeletePolicyRequest(*autoscaling.DeletePolicyInput) (*request.Request, *autoscaling.DeletePolicyOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeletePolicy(*autoscaling.DeletePolicyInput) (*autoscaling.DeletePolicyOutput, error) {
func (m *MockAutoscaling) DeletePolicy(*autoscaling.DeletePolicyInput) (*autoscaling.DeletePolicyOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteScheduledActionRequest(*autoscaling.DeleteScheduledActionInput) (*request.Request, *autoscaling.DeleteScheduledActionOutput) {
func (m *MockAutoscaling) DeleteScheduledActionRequest(*autoscaling.DeleteScheduledActionInput) (*request.Request, *autoscaling.DeleteScheduledActionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteScheduledAction(*autoscaling.DeleteScheduledActionInput) (*autoscaling.DeleteScheduledActionOutput, error) {
func (m *MockAutoscaling) DeleteScheduledAction(*autoscaling.DeleteScheduledActionInput) (*autoscaling.DeleteScheduledActionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) {
func (m *MockAutoscaling) DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DeleteTags(*autoscaling.DeleteTagsInput) (*autoscaling.DeleteTagsOutput, error) {
func (m *MockAutoscaling) DeleteTags(*autoscaling.DeleteTagsInput) (*autoscaling.DeleteTagsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) {
func (m *MockAutoscaling) DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) {
func (m *MockAutoscaling) DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAdjustmentTypesRequest(*autoscaling.DescribeAdjustmentTypesInput) (*request.Request, *autoscaling.DescribeAdjustmentTypesOutput) {
func (m *MockAutoscaling) DescribeAdjustmentTypesRequest(*autoscaling.DescribeAdjustmentTypesInput) (*request.Request, *autoscaling.DescribeAdjustmentTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAdjustmentTypes(*autoscaling.DescribeAdjustmentTypesInput) (*autoscaling.DescribeAdjustmentTypesOutput, error) {
func (m *MockAutoscaling) DescribeAdjustmentTypes(*autoscaling.DescribeAdjustmentTypesInput) (*autoscaling.DescribeAdjustmentTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingGroupsRequest(*autoscaling.DescribeAutoScalingGroupsInput) (*request.Request, *autoscaling.DescribeAutoScalingGroupsOutput) {
func (m *MockAutoscaling) DescribeAutoScalingGroupsRequest(*autoscaling.DescribeAutoScalingGroupsInput) (*request.Request, *autoscaling.DescribeAutoScalingGroupsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingGroupsPages(*autoscaling.DescribeAutoScalingGroupsInput, func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeAutoScalingGroupsPages(*autoscaling.DescribeAutoScalingGroupsInput, func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeAutoScalingInstancesRequest(*autoscaling.DescribeAutoScalingInstancesInput) (*request.Request, *autoscaling.DescribeAutoScalingInstancesOutput) {
func (m *MockAutoscaling) DescribeAutoScalingInstancesRequest(*autoscaling.DescribeAutoScalingInstancesInput) (*request.Request, *autoscaling.DescribeAutoScalingInstancesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingInstances(*autoscaling.DescribeAutoScalingInstancesInput) (*autoscaling.DescribeAutoScalingInstancesOutput, error) {
func (m *MockAutoscaling) DescribeAutoScalingInstances(*autoscaling.DescribeAutoScalingInstancesInput) (*autoscaling.DescribeAutoScalingInstancesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingInstancesPages(*autoscaling.DescribeAutoScalingInstancesInput, func(*autoscaling.DescribeAutoScalingInstancesOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeAutoScalingInstancesPages(*autoscaling.DescribeAutoScalingInstancesInput, func(*autoscaling.DescribeAutoScalingInstancesOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeAutoScalingNotificationTypesRequest(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*request.Request, *autoscaling.DescribeAutoScalingNotificationTypesOutput) {
func (m *MockAutoscaling) DescribeAutoScalingNotificationTypesRequest(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*request.Request, *autoscaling.DescribeAutoScalingNotificationTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeAutoScalingNotificationTypes(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*autoscaling.DescribeAutoScalingNotificationTypesOutput, error) {
func (m *MockAutoscaling) DescribeAutoScalingNotificationTypes(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*autoscaling.DescribeAutoScalingNotificationTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLaunchConfigurationsRequest(*autoscaling.DescribeLaunchConfigurationsInput) (*request.Request, *autoscaling.DescribeLaunchConfigurationsOutput) {
func (m *MockAutoscaling) DescribeLaunchConfigurationsRequest(*autoscaling.DescribeLaunchConfigurationsInput) (*request.Request, *autoscaling.DescribeLaunchConfigurationsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLaunchConfigurations(*autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.DescribeLaunchConfigurationsOutput, error) {
func (m *MockAutoscaling) DescribeLaunchConfigurations(*autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.DescribeLaunchConfigurationsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLaunchConfigurationsPages(*autoscaling.DescribeLaunchConfigurationsInput, func(*autoscaling.DescribeLaunchConfigurationsOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeLaunchConfigurationsPages(*autoscaling.DescribeLaunchConfigurationsInput, func(*autoscaling.DescribeLaunchConfigurationsOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeLifecycleHookTypesRequest(*autoscaling.DescribeLifecycleHookTypesInput) (*request.Request, *autoscaling.DescribeLifecycleHookTypesOutput) {
func (m *MockAutoscaling) DescribeLifecycleHookTypesRequest(*autoscaling.DescribeLifecycleHookTypesInput) (*request.Request, *autoscaling.DescribeLifecycleHookTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLifecycleHookTypes(*autoscaling.DescribeLifecycleHookTypesInput) (*autoscaling.DescribeLifecycleHookTypesOutput, error) {
func (m *MockAutoscaling) DescribeLifecycleHookTypes(*autoscaling.DescribeLifecycleHookTypesInput) (*autoscaling.DescribeLifecycleHookTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLifecycleHooksRequest(*autoscaling.DescribeLifecycleHooksInput) (*request.Request, *autoscaling.DescribeLifecycleHooksOutput) {
func (m *MockAutoscaling) DescribeLifecycleHooksRequest(*autoscaling.DescribeLifecycleHooksInput) (*request.Request, *autoscaling.DescribeLifecycleHooksOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLifecycleHooks(*autoscaling.DescribeLifecycleHooksInput) (*autoscaling.DescribeLifecycleHooksOutput, error) {
func (m *MockAutoscaling) DescribeLifecycleHooks(*autoscaling.DescribeLifecycleHooksInput) (*autoscaling.DescribeLifecycleHooksOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLoadBalancerTargetGroupsRequest(*autoscaling.DescribeLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DescribeLoadBalancerTargetGroupsOutput) {
func (m *MockAutoscaling) DescribeLoadBalancerTargetGroupsRequest(*autoscaling.DescribeLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DescribeLoadBalancerTargetGroupsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLoadBalancerTargetGroups(*autoscaling.DescribeLoadBalancerTargetGroupsInput) (*autoscaling.DescribeLoadBalancerTargetGroupsOutput, error) {
func (m *MockAutoscaling) DescribeLoadBalancerTargetGroups(*autoscaling.DescribeLoadBalancerTargetGroupsInput) (*autoscaling.DescribeLoadBalancerTargetGroupsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLoadBalancersRequest(*autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) {
func (m *MockAutoscaling) DescribeLoadBalancersRequest(*autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeLoadBalancers(*autoscaling.DescribeLoadBalancersInput) (*autoscaling.DescribeLoadBalancersOutput, error) {
func (m *MockAutoscaling) DescribeLoadBalancers(*autoscaling.DescribeLoadBalancersInput) (*autoscaling.DescribeLoadBalancersOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeMetricCollectionTypesRequest(*autoscaling.DescribeMetricCollectionTypesInput) (*request.Request, *autoscaling.DescribeMetricCollectionTypesOutput) {
func (m *MockAutoscaling) DescribeMetricCollectionTypesRequest(*autoscaling.DescribeMetricCollectionTypesInput) (*request.Request, *autoscaling.DescribeMetricCollectionTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeMetricCollectionTypes(*autoscaling.DescribeMetricCollectionTypesInput) (*autoscaling.DescribeMetricCollectionTypesOutput, error) {
func (m *MockAutoscaling) DescribeMetricCollectionTypes(*autoscaling.DescribeMetricCollectionTypesInput) (*autoscaling.DescribeMetricCollectionTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeNotificationConfigurationsRequest(*autoscaling.DescribeNotificationConfigurationsInput) (*request.Request, *autoscaling.DescribeNotificationConfigurationsOutput) {
func (m *MockAutoscaling) DescribeNotificationConfigurationsRequest(*autoscaling.DescribeNotificationConfigurationsInput) (*request.Request, *autoscaling.DescribeNotificationConfigurationsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeNotificationConfigurations(*autoscaling.DescribeNotificationConfigurationsInput) (*autoscaling.DescribeNotificationConfigurationsOutput, error) {
func (m *MockAutoscaling) DescribeNotificationConfigurations(*autoscaling.DescribeNotificationConfigurationsInput) (*autoscaling.DescribeNotificationConfigurationsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeNotificationConfigurationsPages(*autoscaling.DescribeNotificationConfigurationsInput, func(*autoscaling.DescribeNotificationConfigurationsOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeNotificationConfigurationsPages(*autoscaling.DescribeNotificationConfigurationsInput, func(*autoscaling.DescribeNotificationConfigurationsOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribePoliciesRequest(*autoscaling.DescribePoliciesInput) (*request.Request, *autoscaling.DescribePoliciesOutput) {
func (m *MockAutoscaling) DescribePoliciesRequest(*autoscaling.DescribePoliciesInput) (*request.Request, *autoscaling.DescribePoliciesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribePolicies(*autoscaling.DescribePoliciesInput) (*autoscaling.DescribePoliciesOutput, error) {
func (m *MockAutoscaling) DescribePolicies(*autoscaling.DescribePoliciesInput) (*autoscaling.DescribePoliciesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribePoliciesPages(*autoscaling.DescribePoliciesInput, func(*autoscaling.DescribePoliciesOutput, bool) bool) error {
func (m *MockAutoscaling) DescribePoliciesPages(*autoscaling.DescribePoliciesInput, func(*autoscaling.DescribePoliciesOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeScalingActivitiesRequest(*autoscaling.DescribeScalingActivitiesInput) (*request.Request, *autoscaling.DescribeScalingActivitiesOutput) {
func (m *MockAutoscaling) DescribeScalingActivitiesRequest(*autoscaling.DescribeScalingActivitiesInput) (*request.Request, *autoscaling.DescribeScalingActivitiesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScalingActivities(*autoscaling.DescribeScalingActivitiesInput) (*autoscaling.DescribeScalingActivitiesOutput, error) {
func (m *MockAutoscaling) DescribeScalingActivities(*autoscaling.DescribeScalingActivitiesInput) (*autoscaling.DescribeScalingActivitiesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScalingActivitiesPages(*autoscaling.DescribeScalingActivitiesInput, func(*autoscaling.DescribeScalingActivitiesOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeScalingActivitiesPages(*autoscaling.DescribeScalingActivitiesInput, func(*autoscaling.DescribeScalingActivitiesOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeScalingProcessTypesRequest(*autoscaling.DescribeScalingProcessTypesInput) (*request.Request, *autoscaling.DescribeScalingProcessTypesOutput) {
func (m *MockAutoscaling) DescribeScalingProcessTypesRequest(*autoscaling.DescribeScalingProcessTypesInput) (*request.Request, *autoscaling.DescribeScalingProcessTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScalingProcessTypes(*autoscaling.DescribeScalingProcessTypesInput) (*autoscaling.DescribeScalingProcessTypesOutput, error) {
func (m *MockAutoscaling) DescribeScalingProcessTypes(*autoscaling.DescribeScalingProcessTypesInput) (*autoscaling.DescribeScalingProcessTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScheduledActionsRequest(*autoscaling.DescribeScheduledActionsInput) (*request.Request, *autoscaling.DescribeScheduledActionsOutput) {
func (m *MockAutoscaling) DescribeScheduledActionsRequest(*autoscaling.DescribeScheduledActionsInput) (*request.Request, *autoscaling.DescribeScheduledActionsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScheduledActions(*autoscaling.DescribeScheduledActionsInput) (*autoscaling.DescribeScheduledActionsOutput, error) {
func (m *MockAutoscaling) DescribeScheduledActions(*autoscaling.DescribeScheduledActionsInput) (*autoscaling.DescribeScheduledActionsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeScheduledActionsPages(*autoscaling.DescribeScheduledActionsInput, func(*autoscaling.DescribeScheduledActionsOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeScheduledActionsPages(*autoscaling.DescribeScheduledActionsInput, func(*autoscaling.DescribeScheduledActionsOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeTagsRequest(*autoscaling.DescribeTagsInput) (*request.Request, *autoscaling.DescribeTagsOutput) {
func (m *MockAutoscaling) DescribeTagsRequest(*autoscaling.DescribeTagsInput) (*request.Request, *autoscaling.DescribeTagsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeTags(*autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) {
func (m *MockAutoscaling) DescribeTags(*autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeTagsPages(*autoscaling.DescribeTagsInput, func(*autoscaling.DescribeTagsOutput, bool) bool) error {
func (m *MockAutoscaling) DescribeTagsPages(*autoscaling.DescribeTagsInput, func(*autoscaling.DescribeTagsOutput, bool) bool) error {
log.Fatal("Not implemented")
return nil
}
func DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) {
func (m *MockAutoscaling) DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DescribeTerminationPolicyTypes(*autoscaling.DescribeTerminationPolicyTypesInput) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) {
func (m *MockAutoscaling) DescribeTerminationPolicyTypes(*autoscaling.DescribeTerminationPolicyTypesInput) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) {
func (m *MockAutoscaling) DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) {
func (m *MockAutoscaling) DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachLoadBalancerTargetGroupsRequest(*autoscaling.DetachLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DetachLoadBalancerTargetGroupsOutput) {
func (m *MockAutoscaling) DetachLoadBalancerTargetGroupsRequest(*autoscaling.DetachLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DetachLoadBalancerTargetGroupsOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachLoadBalancerTargetGroups(*autoscaling.DetachLoadBalancerTargetGroupsInput) (*autoscaling.DetachLoadBalancerTargetGroupsOutput, error) {
func (m *MockAutoscaling) DetachLoadBalancerTargetGroups(*autoscaling.DetachLoadBalancerTargetGroupsInput) (*autoscaling.DetachLoadBalancerTargetGroupsOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachLoadBalancersRequest(*autoscaling.DetachLoadBalancersInput) (*request.Request, *autoscaling.DetachLoadBalancersOutput) {
func (m *MockAutoscaling) DetachLoadBalancersRequest(*autoscaling.DetachLoadBalancersInput) (*request.Request, *autoscaling.DetachLoadBalancersOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DetachLoadBalancers(*autoscaling.DetachLoadBalancersInput) (*autoscaling.DetachLoadBalancersOutput, error) {
func (m *MockAutoscaling) DetachLoadBalancers(*autoscaling.DetachLoadBalancersInput) (*autoscaling.DetachLoadBalancersOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func DisableMetricsCollectionRequest(*autoscaling.DisableMetricsCollectionInput) (*request.Request, *autoscaling.DisableMetricsCollectionOutput) {
func (m *MockAutoscaling) DisableMetricsCollectionRequest(*autoscaling.DisableMetricsCollectionInput) (*request.Request, *autoscaling.DisableMetricsCollectionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func DisableMetricsCollection(*autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) {
func (m *MockAutoscaling) DisableMetricsCollection(*autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func EnableMetricsCollectionRequest(*autoscaling.EnableMetricsCollectionInput) (*request.Request, *autoscaling.EnableMetricsCollectionOutput) {
func (m *MockAutoscaling) EnableMetricsCollectionRequest(*autoscaling.EnableMetricsCollectionInput) (*request.Request, *autoscaling.EnableMetricsCollectionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func EnableMetricsCollection(*autoscaling.EnableMetricsCollectionInput) (*autoscaling.EnableMetricsCollectionOutput, error) {
func (m *MockAutoscaling) EnableMetricsCollection(*autoscaling.EnableMetricsCollectionInput) (*autoscaling.EnableMetricsCollectionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func EnterStandbyRequest(*autoscaling.EnterStandbyInput) (*request.Request, *autoscaling.EnterStandbyOutput) {
func (m *MockAutoscaling) EnterStandbyRequest(*autoscaling.EnterStandbyInput) (*request.Request, *autoscaling.EnterStandbyOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func EnterStandby(*autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) {
func (m *MockAutoscaling) EnterStandby(*autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func ExecutePolicyRequest(*autoscaling.ExecutePolicyInput) (*request.Request, *autoscaling.ExecutePolicyOutput) {
func (m *MockAutoscaling) ExecutePolicyRequest(*autoscaling.ExecutePolicyInput) (*request.Request, *autoscaling.ExecutePolicyOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func ExecutePolicy(*autoscaling.ExecutePolicyInput) (*autoscaling.ExecutePolicyOutput, error) {
func (m *MockAutoscaling) ExecutePolicy(*autoscaling.ExecutePolicyInput) (*autoscaling.ExecutePolicyOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func ExitStandbyRequest(*autoscaling.ExitStandbyInput) (*request.Request, *autoscaling.ExitStandbyOutput) {
func (m *MockAutoscaling) ExitStandbyRequest(*autoscaling.ExitStandbyInput) (*request.Request, *autoscaling.ExitStandbyOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func ExitStandby(*autoscaling.ExitStandbyInput) (*autoscaling.ExitStandbyOutput, error) {
func (m *MockAutoscaling) ExitStandby(*autoscaling.ExitStandbyInput) (*autoscaling.ExitStandbyOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func PutLifecycleHookRequest(*autoscaling.PutLifecycleHookInput) (*request.Request, *autoscaling.PutLifecycleHookOutput) {
func (m *MockAutoscaling) PutLifecycleHookRequest(*autoscaling.PutLifecycleHookInput) (*request.Request, *autoscaling.PutLifecycleHookOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func PutLifecycleHook(*autoscaling.PutLifecycleHookInput) (*autoscaling.PutLifecycleHookOutput, error) {
func (m *MockAutoscaling) PutLifecycleHook(*autoscaling.PutLifecycleHookInput) (*autoscaling.PutLifecycleHookOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func PutNotificationConfigurationRequest(*autoscaling.PutNotificationConfigurationInput) (*request.Request, *autoscaling.PutNotificationConfigurationOutput) {
func (m *MockAutoscaling) PutNotificationConfigurationRequest(*autoscaling.PutNotificationConfigurationInput) (*request.Request, *autoscaling.PutNotificationConfigurationOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func PutNotificationConfiguration(*autoscaling.PutNotificationConfigurationInput) (*autoscaling.PutNotificationConfigurationOutput, error) {
func (m *MockAutoscaling) PutNotificationConfiguration(*autoscaling.PutNotificationConfigurationInput) (*autoscaling.PutNotificationConfigurationOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func PutScalingPolicyRequest(*autoscaling.PutScalingPolicyInput) (*request.Request, *autoscaling.PutScalingPolicyOutput) {
func (m *MockAutoscaling) PutScalingPolicyRequest(*autoscaling.PutScalingPolicyInput) (*request.Request, *autoscaling.PutScalingPolicyOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func PutScalingPolicy(*autoscaling.PutScalingPolicyInput) (*autoscaling.PutScalingPolicyOutput, error) {
func (m *MockAutoscaling) PutScalingPolicy(*autoscaling.PutScalingPolicyInput) (*autoscaling.PutScalingPolicyOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) {
func (m *MockAutoscaling) PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func PutScheduledUpdateGroupAction(*autoscaling.PutScheduledUpdateGroupActionInput) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) {
func (m *MockAutoscaling) PutScheduledUpdateGroupAction(*autoscaling.PutScheduledUpdateGroupActionInput) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) {
func (m *MockAutoscaling) RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) {
func (m *MockAutoscaling) RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func ResumeProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.ResumeProcessesOutput) {
func (m *MockAutoscaling) ResumeProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.ResumeProcessesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func ResumeProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.ResumeProcessesOutput, error) {
func (m *MockAutoscaling) ResumeProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.ResumeProcessesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func SetDesiredCapacityRequest(*autoscaling.SetDesiredCapacityInput) (*request.Request, *autoscaling.SetDesiredCapacityOutput) {
func (m *MockAutoscaling) SetDesiredCapacityRequest(*autoscaling.SetDesiredCapacityInput) (*request.Request, *autoscaling.SetDesiredCapacityOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) {
func (m *MockAutoscaling) SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func SetInstanceHealthRequest(*autoscaling.SetInstanceHealthInput) (*request.Request, *autoscaling.SetInstanceHealthOutput) {
func (m *MockAutoscaling) SetInstanceHealthRequest(*autoscaling.SetInstanceHealthInput) (*request.Request, *autoscaling.SetInstanceHealthOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func SetInstanceHealth(*autoscaling.SetInstanceHealthInput) (*autoscaling.SetInstanceHealthOutput, error) {
func (m *MockAutoscaling) SetInstanceHealth(*autoscaling.SetInstanceHealthInput) (*autoscaling.SetInstanceHealthOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func SetInstanceProtectionRequest(*autoscaling.SetInstanceProtectionInput) (*request.Request, *autoscaling.SetInstanceProtectionOutput) {
func (m *MockAutoscaling) SetInstanceProtectionRequest(*autoscaling.SetInstanceProtectionInput) (*request.Request, *autoscaling.SetInstanceProtectionOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func SetInstanceProtection(*autoscaling.SetInstanceProtectionInput) (*autoscaling.SetInstanceProtectionOutput, error) {
func (m *MockAutoscaling) SetInstanceProtection(*autoscaling.SetInstanceProtectionInput) (*autoscaling.SetInstanceProtectionOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func SuspendProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.SuspendProcessesOutput) {
func (m *MockAutoscaling) SuspendProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.SuspendProcessesOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func SuspendProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.SuspendProcessesOutput, error) {
func (m *MockAutoscaling) SuspendProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.SuspendProcessesOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func TerminateInstanceInAutoScalingGroupRequest(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*request.Request, *autoscaling.TerminateInstanceInAutoScalingGroupOutput) {
func (m *MockAutoscaling) TerminateInstanceInAutoScalingGroupRequest(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*request.Request, *autoscaling.TerminateInstanceInAutoScalingGroupOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func UpdateAutoScalingGroupRequest(*autoscaling.UpdateAutoScalingGroupInput) (*request.Request, *autoscaling.UpdateAutoScalingGroupOutput) {
func (m *MockAutoscaling) UpdateAutoScalingGroupRequest(*autoscaling.UpdateAutoScalingGroupInput) (*request.Request, *autoscaling.UpdateAutoScalingGroupOutput) {
log.Fatal("Not implemented")
return nil, nil
}
func UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
func (m *MockAutoscaling) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
log.Fatal("Not implemented")
return nil, nil
}
func WaitUntilGroupExists(*autoscaling.DescribeAutoScalingGroupsInput) error {
func (m *MockAutoscaling) WaitUntilGroupExists(*autoscaling.DescribeAutoScalingGroupsInput) error {
log.Fatal("Not implemented")
return nil
}
func WaitUntilGroupInService(*autoscaling.DescribeAutoScalingGroupsInput) error {
func (m *MockAutoscaling) WaitUntilGroupInService(*autoscaling.DescribeAutoScalingGroupsInput) error {
log.Fatal("Not implemented")
return nil
}
func WaitUntilGroupNotExists(*autoscaling.DescribeAutoScalingGroupsInput) error {
func (m *MockAutoscaling) WaitUntilGroupNotExists(*autoscaling.DescribeAutoScalingGroupsInput) error {
log.Fatal("Not implemented")
return nil
}

View File

@ -690,14 +690,6 @@ func (m *MockEC2) DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.
panic("Not implemented")
return nil
}
func (m *MockEC2) DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) {
panic("Not implemented")
return nil, nil
}
func (m *MockEC2) DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) {
panic("Not implemented")
return nil, nil
}
func (m *MockEC2) DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) {
panic("Not implemented")
return nil, nil

View File

@ -144,3 +144,15 @@ func (m *MockEC2) DescribeVpcAttribute(request *ec2.DescribeVpcAttributeInput) (
return response, nil
}
func (m *MockEC2) DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) {
panic("Not implemented")
return nil, nil
}
func (m *MockEC2) DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) {
return &ec2.DescribeInternetGatewaysOutput{
InternetGateways: []*ec2.InternetGateway{{
InternetGatewayId: aws.String("fake-ig"),
}},
}, nil
}

View File

@ -27,6 +27,7 @@ type zoneInfo struct {
ID string
hostedZone *route53.HostedZone
records []*route53.ResourceRecordSet
vpcs []*route53.VPC
}
type MockRoute53 struct {
@ -48,10 +49,11 @@ func (m *MockRoute53) findZone(hostedZoneId string) *zoneInfo {
return nil
}
func (m *MockRoute53) MockCreateZone(z *route53.HostedZone) {
func (m *MockRoute53) MockCreateZone(z *route53.HostedZone, vpcs []*route53.VPC) {
zi := &zoneInfo{
ID: aws.StringValue(z.Id),
hostedZone: z,
vpcs: vpcs,
}
m.Zones = append(m.Zones, zi)
}

View File

@ -45,7 +45,7 @@ func (m *MockRoute53) GetHostedZone(request *route53.GetHostedZoneInput) (*route
response := &route53.GetHostedZoneOutput{
// DelegationSet ???
HostedZone: &copy,
// VPCs
VPCs: zone.vpcs,
}
return response, nil
}
@ -89,6 +89,13 @@ func (m *MockRoute53) ListHostedZonesByNameRequest(*route53.ListHostedZonesByNam
}
func (m *MockRoute53) ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) {
panic("MockRoute53 ListHostedZonesByName not implemented")
return nil, nil
var zones []*route53.HostedZone
for _, z := range m.Zones {
zones = append(zones, z.hostedZone)
}
return &route53.ListHostedZonesByNameOutput{
HostedZones: zones,
}, nil
}

View File

@ -14,19 +14,33 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeup
package main
type ProtokubeFlags struct {
Master *bool `json:"master,omitempty" flag:"master"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
import (
"flag"
"os"
"runtime"
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
"k8s.io/kops/pkg/apiserver/cmd/server"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/logs"
"math/rand"
"time"
)
Zone []string `json:"zone,omitempty" flag:"zone"`
func main() {
rand.Seed(time.Now().UTC().UnixNano())
Channels []string `json:"channels,omitempty" flag:"channels"`
logs.InitLogs()
defer logs.FlushLogs()
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
cmd := server.NewCommandStartKopsServer(os.Stdout, os.Stderr)
cmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := cmd.Execute(); err != nil {
cmdutil.CheckErr(err)
}
}

View File

@ -60,7 +60,7 @@ func NewCmdCompletion(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "completion",
Short: "Output shell completion code for the given shell (bash)",
Short: "Output shell completion code for the given shell (bash).",
Long: longDescription,
Example: example,
Run: func(cmd *cobra.Command, args []string) {

View File

@ -21,6 +21,7 @@ import (
"io"
"bytes"
"github.com/golang/glog"
"github.com/spf13/cobra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -43,7 +44,7 @@ func NewCmdCreate(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "create -f FILENAME",
Short: "Create a resource by filename or stdin",
Short: "Create a resource by filename or stdin.",
Run: func(cmd *cobra.Command, args []string) {
if cmdutil.IsFilenameEmpty(options.Filenames) {
cmd.Help()
@ -83,6 +84,10 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
codec := codecs.UniversalDecoder(kopsapi.SchemeGroupVersion)
var clusterName = ""
//var cSpec = false
var sb bytes.Buffer
fmt.Fprintf(&sb, "\n")
for _, f := range c.Filenames {
contents, err := vfs.Context.ReadFile(f)
if err != nil {
@ -90,7 +95,6 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
}
sections := bytes.Split(contents, []byte("\n---\n"))
for _, section := range sections {
defaults := &schema.GroupVersionKind{
Group: v1alpha1.SchemeGroupVersion.Group,
@ -110,6 +114,7 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
}
return fmt.Errorf("error creating federation: %v", err)
}
fmt.Fprintf(&sb, "Created federation/%q\n", v.ObjectMeta.Name)
case *kopsapi.Cluster:
// Adding a PerformAssignments() call here as the user might be trying to use
@ -124,10 +129,13 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
return fmt.Errorf("cluster %q already exists", v.ObjectMeta.Name)
}
return fmt.Errorf("error creating cluster: %v", err)
} else {
fmt.Fprintf(&sb, "Created cluster/%s\n", v.ObjectMeta.Name)
//cSpec = true
}
case *kopsapi.InstanceGroup:
clusterName := v.ObjectMeta.Labels[kopsapi.LabelClusterName]
clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
if clusterName == "" {
return fmt.Errorf("must specify %q label with cluster name to create instanceGroup", kopsapi.LabelClusterName)
}
@ -137,15 +145,29 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name)
}
return fmt.Errorf("error creating instanceGroup: %v", err)
} else {
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
}
default:
glog.V(2).Infof("Type of object was %T", v)
return fmt.Errorf("Unhandled kind %q in %q", gvk, f)
return fmt.Errorf("Unhandled kind %q in %s", gvk, f)
}
}
}
{
// If there is a value in this sb, this should mean that we have something to deploy
// so let's advise the user how to engage the cloud provider and deploy
if sb.String() != "" {
fmt.Fprintf(&sb, "\n")
fmt.Fprintf(&sb, "To deploy these resources, run: kops update cluster %s --yes\n", clusterName)
fmt.Fprintf(&sb, "\n")
}
_, err := out.Write(sb.Bytes())
if err != nil {
return fmt.Errorf("error writing to output: %v", err)
}
}
return nil
}

View File

@ -39,6 +39,11 @@ import (
"k8s.io/kops/upup/pkg/fi/utils"
)
const (
AuthorizationFlagAlwaysAllow = "AlwaysAllow"
AuthorizationFlagRBAC = "RBAC"
)
type CreateClusterOptions struct {
ClusterName string
Yes bool
@ -51,6 +56,7 @@ type CreateClusterOptions struct {
MasterSize string
MasterCount int32
NodeCount int32
EncryptEtcdStorage bool
Project string
KubernetesVersion string
OutDir string
@ -71,6 +77,9 @@ type CreateClusterOptions struct {
// The network topology to use
Topology string
// The authorization approach to use (RBAC, AlwaysAllow)
Authorization string
// The DNS type to use (public/private)
DNSType string
@ -82,6 +91,10 @@ type CreateClusterOptions struct {
// Egress configuration - FOR TESTING ONLY
Egress string
// Specify tenancy (default or dedicated) for masters and nodes
MasterTenancy string
NodeTenancy string
}
func (o *CreateClusterOptions) InitDefaults() {
@ -97,6 +110,8 @@ func (o *CreateClusterOptions) InitDefaults() {
// Default to open API & SSH access
o.AdminAccess = []string{"0.0.0.0/0"}
o.Authorization = AuthorizationFlagAlwaysAllow
}
func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
@ -152,6 +167,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().Int32Var(&options.MasterCount, "master-count", options.MasterCount, "Set the number of masters. Defaults to one master per master-zone")
cmd.Flags().Int32Var(&options.NodeCount, "node-count", options.NodeCount, "Set the number of nodes")
cmd.Flags().BoolVar(&options.EncryptEtcdStorage, "encrypt-etcd-storage", options.EncryptEtcdStorage, "Generate key in aws kms and use it for encrypt etcd volumes")
cmd.Flags().StringVar(&options.Image, "image", options.Image, "Image to use")
@ -172,6 +188,9 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
// Network topology
cmd.Flags().StringVarP(&options.Topology, "topology", "t", options.Topology, "Controls network topology for the cluster. public|private. Default is 'public'.")
// Authorization
cmd.Flags().StringVar(&options.Authorization, "authorization", options.Authorization, "Authorization mode to use: "+AuthorizationFlagAlwaysAllow+" or "+AuthorizationFlagRBAC)
// DNS
cmd.Flags().StringVar(&options.DNSType, "dns", options.DNSType, "DNS hosted zone to use: public|private. Default is 'public'.")
@ -181,6 +200,10 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
// Allow custom tags from the CLI
cmd.Flags().StringVar(&options.CloudLabels, "cloud-labels", options.CloudLabels, "A list of KV pairs used to tag all instance groups in AWS (eg \"Owner=John Doe,Team=Some Team\").")
// Master and Node Tenancy
cmd.Flags().StringVar(&options.MasterTenancy, "master-tenancy", options.MasterTenancy, "The tenancy of the master group on AWS. Can either be default or dedicated.")
cmd.Flags().StringVar(&options.NodeTenancy, "node-tenancy", options.NodeTenancy, "The tenancy of the node group on AWS. Can be either default or dedicated.")
return cmd
}
@ -278,6 +301,16 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
glog.V(4).Infof("networking mode=%s => %s", c.Networking, fi.DebugAsJsonString(cluster.Spec.Networking))
// In future we could change the default if the flag is not specified, e.g. in 1.7 maybe the default is RBAC?
cluster.Spec.Authorization = &api.AuthorizationSpec{}
if strings.EqualFold(c.Authorization, AuthorizationFlagAlwaysAllow) {
cluster.Spec.Authorization.AlwaysAllow = &api.AlwaysAllowAuthorizationSpec{}
} else if strings.EqualFold(c.Authorization, AuthorizationFlagRBAC) {
cluster.Spec.Authorization.RBAC = &api.RBACAuthorizationSpec{}
} else {
return fmt.Errorf("unknown authorization mode %q", c.Authorization)
}
if len(c.Zones) != 0 {
existingSubnets := make(map[string]*api.ClusterSubnetSpec)
for i := range cluster.Spec.Subnets {
@ -405,6 +438,9 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
for i, ig := range masters {
m := &api.EtcdMemberSpec{}
if c.EncryptEtcdStorage {
m.EncryptedVolume = &c.EncryptEtcdStorage
}
m.Name = names[i]
m.InstanceGroup = fi.String(ig.ObjectMeta.Name)
@ -448,6 +484,18 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
}
}
if c.MasterTenancy != "" {
for _, group := range masters {
group.Spec.Tenancy = c.MasterTenancy
}
}
if c.NodeTenancy != "" {
for _, group := range nodes {
group.Spec.Tenancy = c.NodeTenancy
}
}
if len(c.NodeSecurityGroups) > 0 {
for _, group := range nodes {
group.Spec.AdditionalSecurityGroups = c.NodeSecurityGroups

View File

@ -42,6 +42,8 @@ func TestCreateClusterMinimal(t *testing.T) {
func TestCreateClusterHA(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha", "v1alpha1")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha_encrypt", "v1alpha1")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha_encrypt", "v1alpha2")
}
// TestCreateClusterHASharedZones tests kops create cluster when the master count is bigger than the numebr of zones

View File

@ -23,6 +23,7 @@ import (
"io"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/editor"
"os"
@ -148,7 +149,7 @@ func RunCreateInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string,
return fmt.Errorf("unexpected object type: %T", obj)
}
err = group.Validate()
err = validation.ValidateInstanceGroup(group)
if err != nil {
return err
}

View File

@ -17,24 +17,48 @@ limitations under the License.
package main
import (
"github.com/spf13/cobra"
"fmt"
"io"
"bytes"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kops/cmd/kops/util"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/v1alpha1"
"k8s.io/kops/util/pkg/vfs"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
)
type DeleteOptions struct {
resource.FilenameOptions
Yes bool
}
func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command {
//options := &DeleteOptions{}
options := &DeleteOptions{}
cmd := &cobra.Command{
Use: "delete",
Short: "delete clusters",
Long: `Delete clusters`,
Use: "delete -f FILENAME [--yes]",
Short: "Delete clusters and instancegroups",
Long: `Delete clusters and instancegroups`,
SuggestFor: []string{"rm"},
Run: func(cmd *cobra.Command, args []string) {
if cmdutil.IsFilenameEmpty(options.Filenames) {
cmd.Help()
return
}
cmdutil.CheckErr(RunDelete(f, out, options))
},
}
cmd.Flags().StringSliceVarP(&options.Filenames, "filename", "f", options.Filenames, "Filename to use to delete the resource")
cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Specify --yes to delete the resource")
cmd.MarkFlagRequired("filename")
// create subcommands
cmd.AddCommand(NewCmdDeleteCluster(f, out))
cmd.AddCommand(NewCmdDeleteInstanceGroup(f, out))
@ -42,3 +66,68 @@ func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command {
return cmd
}
func RunDelete(factory *util.Factory, out io.Writer, d *DeleteOptions) error {
// Codecs provides access to encoding and decoding for the scheme
codecs := kopsapi.Codecs //serializer.NewCodecFactory(scheme)
codec := codecs.UniversalDecoder(kopsapi.SchemeGroupVersion)
var sb bytes.Buffer
fmt.Fprintf(&sb, "\n")
for _, f := range d.Filenames {
contents, err := vfs.Context.ReadFile(f)
if err != nil {
return fmt.Errorf("error reading file %q: %v", f, err)
}
sections := bytes.Split(contents, []byte("\n---\n"))
for _, section := range sections {
defaults := &schema.GroupVersionKind{
Group: v1alpha1.SchemeGroupVersion.Group,
Version: v1alpha1.SchemeGroupVersion.Version,
}
o, gvk, err := codec.Decode(section, defaults, nil)
if err != nil {
return fmt.Errorf("error parsing file %q: %v", f, err)
}
switch v := o.(type) {
case *kopsapi.Cluster:
options := &DeleteClusterOptions{}
options.ClusterName = v.ObjectMeta.Name
options.Yes = d.Yes
err = RunDeleteCluster(factory, out, options)
if err != nil {
exitWithError(err)
}
if d.Yes {
fmt.Fprintf(&sb, "Deleted cluster/%s\n", v.ObjectMeta.Name)
}
case *kopsapi.InstanceGroup:
options := &DeleteInstanceGroupOptions{}
options.GroupName = v.ObjectMeta.Name
options.ClusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
options.Yes = d.Yes
err := RunDeleteInstanceGroup(factory, out, options)
if err != nil {
exitWithError(err)
}
if d.Yes {
fmt.Fprintf(&sb, "Deleted instancegroup/%s\n", v.ObjectMeta.Name)
}
default:
glog.V(2).Infof("Type of object was %T", v)
return fmt.Errorf("Unhandled kind %q in %s", gvk, f)
}
}
}
{
_, err := out.Write(sb.Bytes())
if err != nil {
return fmt.Errorf("error writing to output: %v", err)
}
}
return nil
}

View File

@ -26,10 +26,11 @@ import (
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/pkg/resources"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/kutil"
"k8s.io/kops/util/pkg/tables"
"k8s.io/kops/util/pkg/vfs"
)
@ -121,32 +122,32 @@ func RunDeleteCluster(f *util.Factory, out io.Writer, options *DeleteClusterOpti
}
// Todo lets make this smart enough to detect the cloud and switch on the ClusterResources interface
d := &kutil.AwsCluster{}
d := &resources.AwsCluster{}
d.ClusterName = clusterName
d.Cloud = cloud
resources, err := d.ListResources()
clusterResources, err := d.ListResources()
if err != nil {
return err
}
if len(resources) == 0 {
if len(clusterResources) == 0 {
fmt.Fprintf(out, "No cloud resources to delete\n")
} else {
wouldDeleteCloudResources = true
t := &tables.Table{}
t.AddColumn("TYPE", func(r *kutil.ResourceTracker) string {
t.AddColumn("TYPE", func(r *resources.ResourceTracker) string {
return r.Type
})
t.AddColumn("ID", func(r *kutil.ResourceTracker) string {
t.AddColumn("ID", func(r *resources.ResourceTracker) string {
return r.ID
})
t.AddColumn("NAME", func(r *kutil.ResourceTracker) string {
t.AddColumn("NAME", func(r *resources.ResourceTracker) string {
return r.Name
})
var l []*kutil.ResourceTracker
for _, v := range resources {
var l []*resources.ResourceTracker
for _, v := range clusterResources {
l = append(l, v)
}
@ -161,7 +162,7 @@ func RunDeleteCluster(f *util.Factory, out io.Writer, options *DeleteClusterOpti
fmt.Fprintf(out, "\n")
err = d.DeleteResources(resources)
err = d.DeleteResources(clusterResources)
if err != nil {
return err
}
@ -183,7 +184,7 @@ func RunDeleteCluster(f *util.Factory, out io.Writer, options *DeleteClusterOpti
}
}
b := kutil.NewKubeconfigBuilder()
b := kubeconfig.NewKubeconfigBuilder()
b.Context = clusterName
err = b.DeleteKubeConfig()
if err != nil {

View File

@ -28,7 +28,7 @@ type DescribeCmd struct {
var describeCmd = DescribeCmd{
cobraCommand: &cobra.Command{
Use: "describe",
Short: "describe objects",
Short: "Get additional information about cloud resources.",
},
}

View File

@ -26,9 +26,9 @@ import (
func NewCmdEdit(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "edit",
Short: "Edit resource",
Short: "Edit clusters and other resrouces.",
Long: `Edit a resource configuration.
This command changes the cloud specification in the registry.
It does not update the cloud resources, to apply the changes use "kops update cluster".`,

View File

@ -26,6 +26,7 @@ import (
"io"
"k8s.io/kops/cmd/kops/util"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/editor"
)
@ -115,7 +116,7 @@ func RunEditFederation(f *util.Factory, cmd *cobra.Command, args []string, out i
return fmt.Errorf("object was not of expected type: %T", newObj)
}
err = newFed.Validate()
err = validation.ValidateFederation(newFed)
if err != nil {
return err
}

View File

@ -27,6 +27,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/editor"
)
@ -131,7 +132,7 @@ func RunEditInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string, ou
return fmt.Errorf("object was not of expected type: %T", newObj)
}
err = newGroup.Validate()
err = validation.ValidateInstanceGroup(newGroup)
if err != nil {
return err
}
@ -153,7 +154,7 @@ func RunEditInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string, ou
return err
}
err = fullGroup.CrossValidate(fullCluster, true)
err = validation.CrossValidateInstanceGroup(fullGroup, fullCluster, true)
if err != nil {
return err
}

View File

@ -23,7 +23,7 @@ import (
// exportCmd represents the export command
var exportCmd = &cobra.Command{
Use: "export",
Short: "export clusters/kubecfg",
Short: "Exports a kubecfg for target cluster.",
Long: `export clusters/kubecfg`,
}

View File

@ -19,8 +19,8 @@ package main
import (
"github.com/spf13/cobra"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/kutil"
)
type ExportKubecfgCommand struct {
@ -67,19 +67,10 @@ func (c *ExportKubecfgCommand) Run(args []string) error {
return err
}
clusterName := cluster.ObjectMeta.Name
master := cluster.Spec.MasterPublicName
if master == "" {
master = "api." + clusterName
conf, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore)
if err != nil {
return err
}
x := &kutil.CreateKubecfg{
ContextName: clusterName,
KeyStore: keyStore,
SecretStore: secretStore,
KubeMasterIP: master,
}
return x.WriteKubecfg()
return conf.WriteKubecfg()
}

View File

@ -37,7 +37,7 @@ var getCmd = GetCmd{
cobraCommand: &cobra.Command{
Use: "get",
SuggestFor: []string{"list"},
Short: "list or get objects",
Short: "List all instances of a resource.",
Long: `list or get objects`,
},
}

View File

@ -23,7 +23,7 @@ import (
// importCmd represents the import command
var importCmd = &cobra.Command{
Use: "import",
Short: "import clusters",
Short: "Import existing resources into the state store.",
Long: `import clusters`,
}

View File

@ -102,6 +102,16 @@ func TestPrivateKopeio(t *testing.T) {
runTest(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1)
}
// TestPrivateDns runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
runTest(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1)
}
// TestPrivateDns runs the test on a configuration with private topology, private dns, extant vpc
func TestPrivateDns2(t *testing.T) {
runTest(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1)
}
func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
var stdout bytes.Buffer
@ -369,7 +379,28 @@ func (h *IntegrationTestHarness) SetupMockAWS() {
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z1AFAKE1ZON3YO"),
Name: aws.String("example.com."),
})
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(false),
},
}, nil)
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z2AFAKE1ZON3NO"),
Name: aws.String("internal.example.com."),
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(true),
},
}, []*route53.VPC{{
VPCId: aws.String("vpc-234"),
}})
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z3AFAKE1ZOMORE"),
Name: aws.String("private.example.com."),
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(true),
},
}, []*route53.VPC{{
VPCId: aws.String("vpc-123"),
}})
mockEC2.Images = append(mockEC2.Images, &ec2.Image{
ImageId: aws.String("ami-12345678"),

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/util/pkg/vfs"
"bytes"
kopsapi "k8s.io/kops/pkg/apis/kops"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
@ -39,7 +40,7 @@ func NewCmdReplace(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "replace -f FILENAME",
Short: "Replace a resource by filename or stdin",
Short: "Replace a resource by filename or stdin.",
Run: func(cmd *cobra.Command, args []string) {
if cmdutil.IsFilenameEmpty(options.Filenames) {
cmd.Help()
@ -72,40 +73,44 @@ func RunReplace(f *util.Factory, cmd *cobra.Command, out io.Writer, c *ReplaceOp
if err != nil {
return fmt.Errorf("error reading file %q: %v", f, err)
}
sections := bytes.Split(contents, []byte("\n---\n"))
for _, section := range sections {
o, gvk, err := codec.Decode(section, nil, nil)
if err != nil {
return fmt.Errorf("error parsing file %q: %v", f, err)
}
switch v := o.(type) {
case *kopsapi.Federation:
_, err = clientset.Federations().Update(v)
if err != nil {
return fmt.Errorf("error replacing federation: %v", err)
}
case *kopsapi.Cluster:
_, err = clientset.Clusters().Update(v)
if err != nil {
return fmt.Errorf("error replacing cluster: %v", err)
}
case *kopsapi.InstanceGroup:
clusterName := v.ObjectMeta.Labels[kopsapi.LabelClusterName]
if clusterName == "" {
return fmt.Errorf("must specify %q label with cluster name to replace instanceGroup", kopsapi.LabelClusterName)
}
_, err = clientset.InstanceGroups(clusterName).Update(v)
if err != nil {
return fmt.Errorf("error replacing instanceGroup: %v", err)
}
default:
glog.V(2).Infof("Type of object was %T", v)
return fmt.Errorf("Unhandled kind %q in %q", gvk, f)
}
o, gvk, err := codec.Decode(contents, nil, nil)
if err != nil {
return fmt.Errorf("error parsing file %q: %v", f, err)
}
switch v := o.(type) {
case *kopsapi.Federation:
_, err = clientset.Federations().Update(v)
if err != nil {
return fmt.Errorf("error replacing federation: %v", err)
}
case *kopsapi.Cluster:
_, err = clientset.Clusters().Update(v)
if err != nil {
return fmt.Errorf("error replacing cluster: %v", err)
}
case *kopsapi.InstanceGroup:
clusterName := v.ObjectMeta.Labels[kopsapi.LabelClusterName]
if clusterName == "" {
return fmt.Errorf("must specify %q label with cluster name to replace instanceGroup", kopsapi.LabelClusterName)
}
_, err = clientset.InstanceGroups(clusterName).Update(v)
if err != nil {
return fmt.Errorf("error replacing instanceGroup: %v", err)
}
default:
glog.V(2).Infof("Type of object was %T", v)
return fmt.Errorf("Unhandled kind %q in %q", gvk, f)
}
}
return nil

View File

@ -17,15 +17,16 @@ limitations under the License.
package main
import (
"github.com/spf13/cobra"
"io"
"github.com/spf13/cobra"
"k8s.io/kops/cmd/kops/util"
)
func NewCmdRollingUpdate(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "rolling-update",
Short: "rolling update clusters",
Short: "Initiate rolling updates on clusters.",
Long: `rolling update clusters`,
}

View File

@ -95,7 +95,7 @@ func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
Short: "Rolling update a cluster",
Long: `Rolling update a cluster instance groups.
This command updates a kubernetes cluseter to match the cloud, and kops specifications.
This command updates a kubernetes cluster to match the cloud, and kops specifications.
To perform rolling update, you need to update the cloud resources first with "kops update cluster"

View File

@ -23,7 +23,7 @@ import (
// secretsCmd represents the secrets command
var secretsCmd = &cobra.Command{
Use: "secrets",
Short: "Manage secrets & keys",
Short: "Manage secrets & keys.",
Long: `Manage secrets & keys`,
}

View File

@ -17,15 +17,16 @@ limitations under the License.
package main
import (
"github.com/spf13/cobra"
"io"
"github.com/spf13/cobra"
"k8s.io/kops/cmd/kops/util"
)
func NewCmdToolbox(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "toolbox",
Short: "Misc infrequently used commands",
Short: "Misc infrequently used commands.",
}
cmd.AddCommand(NewCmdToolboxConvertImported(f, out))

View File

@ -24,8 +24,8 @@ import (
"io"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/resources"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/kutil"
)
type ToolboxDumpOptions struct {
@ -91,7 +91,7 @@ func RunToolboxDump(f *util.Factory, out io.Writer, options *ToolboxDumpOptions)
}
// Todo lets make this smart enough to detect the cloud and switch on the ClusterResources interface
d := &kutil.AwsCluster{}
d := &resources.AwsCluster{}
d.ClusterName = options.ClusterName
d.Cloud = cloud

View File

@ -17,15 +17,16 @@ limitations under the License.
package main
import (
"github.com/spf13/cobra"
"io"
"github.com/spf13/cobra"
"k8s.io/kops/cmd/kops/util"
)
func NewCmdUpdate(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "update",
Short: "update clusters",
Short: "Creates or updates cloud resources to match cluster spec.",
Long: `Update clusters`,
}

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/utils"
@ -85,7 +86,7 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)")
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem")
return cmd
}
@ -206,14 +207,11 @@ func RunUpdateCluster(f *util.Factory, clusterName string, out io.Writer, c *Upd
}
if kubecfgCert != nil {
glog.Infof("Exporting kubecfg for cluster")
x := &kutil.CreateKubecfg{
ContextName: cluster.ObjectMeta.Name,
KeyStore: keyStore,
SecretStore: secretStore,
KubeMasterIP: cluster.Spec.MasterPublicName,
conf, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore)
if err != nil {
return err
}
err = x.WriteKubecfg()
err = conf.WriteKubecfg()
if err != nil {
return err
}

View File

@ -23,7 +23,7 @@ import (
// upgradeCmd represents the upgrade command
var upgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "upgrade clusters",
Short: "Automates checking for and applying Kubernetes updates.",
Long: `upgrade clusters`,
}

View File

@ -27,7 +27,7 @@ func NewCmdValidate(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "validate",
//SuggestFor: []string{"val"},
Short: "Validate Cluster",
Short: "Run validation check on Kubernetes cluster.",
Long: `Validate a Kubernetes Cluster`,
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
apiutil "k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/validation"
"k8s.io/kops/util/pkg/tables"
)
@ -151,9 +152,9 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
nodeTable.AddColumn("ROLE", func(n v1.Node) string {
// TODO: Maybe print the instance group role instead?
// TODO: Maybe include the instance group name?
role := "node"
if val, ok := n.ObjectMeta.Labels[api.RoleLabelName]; ok {
role = val
role := apiutil.GetNodeRole(&n)
if role == "" {
role = "node"
}
return role
})

View File

@ -30,7 +30,7 @@ type VersionCmd struct {
var versionCmd = VersionCmd{
cobraCommand: &cobra.Command{
Use: "version",
Short: "Print the client version information",
Short: "Print the client version information.",
},
}

View File

@ -89,6 +89,9 @@ func main() {
}
fi, err := os.Lstat(procSelfExe)
if err != nil {
glog.Fatalf("error doing lstat on %q: %v", procSelfExe, err)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Fatalf("file %v is not a symlink", procSelfExe)
}

View File

@ -43,6 +43,9 @@ var (
func main() {
fmt.Printf("dns-controller version %s\n", BuildVersion)
// Be sure to get the glog flags
glog.Flush()
dnsProviderId := "aws-route53"
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns)")
@ -57,6 +60,8 @@ func main() {
flag.Set("logtostderr", "true")
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
zoneRules, err := dns.ParseZoneRules(zones)

View File

@ -256,7 +256,24 @@ func (c *DNSController) runOnce() error {
glog.V(4).Infof("updating records for %s: %v -> %v", k, oldValues, newValues)
err := op.updateRecords(k, newValues, int64(ttl.Seconds()))
// Duplicate records are a hard-error on e.g. Route53
var dedup []string
for _, s := range newValues {
alreadyExists := false
for _, e := range dedup {
if e == s {
alreadyExists = true
break
}
}
if alreadyExists {
glog.V(2).Infof("skipping duplicate record %s", s)
continue
}
dedup = append(dedup, s)
}
err := op.updateRecords(k, dedup, int64(ttl.Seconds()))
if err != nil {
glog.Infof("error updating records for %s: %v", k, err)
errors = append(errors, err)
@ -282,7 +299,7 @@ func (c *DNSController) runOnce() error {
for key, changeset := range op.changesets {
glog.V(2).Infof("applying DNS changeset for zone %s", key)
if err := changeset.Apply(); err != nil {
glog.Warningf("error applying DNS changset for zone %s: %v", key, err)
glog.Warningf("error applying DNS changeset for zone %s: %v", key, err)
errors = append(errors, fmt.Errorf("error applying DNS changeset for zone %s: %v", key, err))
}
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/dns-controller/pkg/dns"
"k8s.io/kops/dns-controller/pkg/util"
kopsutil "k8s.io/kops/pkg/apis/kops/util"
)
// NodeController watches for nodes
@ -215,7 +216,8 @@ func (c *NodeController) updateNodeRecords(node *v1.Node) {
// node/role=<role>/external -> ExternalIP
// node/role=<role>/internal -> InternalIP
{
role := node.Labels["kubernetes.io/role"]
role := kopsutil.GetNodeRole(node)
// Default to node
if role == "" {
role = "node"
}

137
docs/addon_manager.md Normal file
View File

@ -0,0 +1,137 @@
## Addons Management
kops incorporates management of some addons; we _have_ to manage some addons which are needed before
the kubernetes API is functional.
In addition, kops offers end-user management of addons via the `channels` tool (which is still experimental,
but we are working on making it a recommended part of kubernetes addon management). We ship some
curated addons in the [addons directory](/addons), more information in the [addons document](addons.md).
kops uses the `channels` tool for system addon management also. Because kops uses the same tool
for *system* addon management as it does for *user* addon management, this means that
addons installed by kops as part of cluster bringup can be managed alongside additional addons.
(Though note that bootstrap addons are much more likely to be replaced during a kops upgrade).
The general kops philosophy is to try to make the set of bootstrap addons minimal, and
to make installation of subsequent addons easy.
Thus, `kube-dns` and the networking overlay (if any) are the canonical bootstrap addons.
But addons such as the dashboard or the EFK stack are easily installed after kops bootstrap,
with a `kubectl apply -f https://...` or with the channels tool.
In future, we may as a convenience make it easy to add optional addons to the kops manifest,
though this will just be a convenience wrapper around doing it manually.
## Versioning
The channels tool adds a manifest-of-manifests file, of `Kind: Addons`, which allows for a description
of the various manifest versions that are available. In this way kops can manage updates
as new versions of the addon are released. For example,
the [dashboard addon](https://github.com/kubernetes/kops/blob/master/addons/kubernetes-dashboard/addon.yaml)
lists multiple versions.
For example, a typical addons declaration might looks like this:
```
- version: 1.4.0
selector:
k8s-addon: kubernetes-dashboard.addons.k8s.io
manifest: v1.4.0.yaml
- version: 1.5.0
selector:
k8s-addon: kubernetes-dashboard.addons.k8s.io
manifest: v1.5.0.yaml
```
That declares two versions of an addon, with manifests at `v1.4.0.yaml` and at `v1.5.0.yaml`.
These are evaluated as relative paths to the Addons file itself. (The channels tool supports
a few more protocols than `kubectl` - for example `s3://...` for S3 hosted manifests).
The `version` field gives meaning to the alternative manifests. This is interpreted as a
semver. The channels tool keeps track of the current version installed (currently by means
of an annotation on the `kube-system` namespace), and it will not reapply the same version
of the manifest. This means that a user can edit a deployed addon, and changes will not
be replaced, until a new version of the addon is installed.
The long-term direction here is that addons will mostly be configured through a ConfigMap or Secret object,
and that the addon manager will (TODO) not replace the ConfigMap.
The `selector` determines the objects which make up the addon. This will be used
to construct a `--prune` argument (TODO), so that objects that existed in the
previous but not the new version will be removed as part of an upgrade.
## Kubernetes Version Selection
The addon manager now supports a `kubernetesVersion` field, which is a semver range specifier
on the kubernetes version. If the targeted version of kubernetes does not match the semver
specified, the addon version will be ignored.
This allows you to have different versions of the manifest for significant changes to the
kubernetes API. For example, 1.6 changed the taints & tolerations to a field, and RBAC moved
to beta. As such it is easier to have two separate manifests.
For example:
```
- version: 1.5.0
selector:
k8s-addon: kube-dashboard.addons.k8s.io
manifest: v1.5.0.yaml
kubernetesVersion: "<1.6.0"
id: "pre-k8s-16"
- version: 1.6.0
selector:
k8s-addon: kube-dashboard.addons.k8s.io
manifest: v1.6.0.yaml
kubernetesVersion: ">=1.6.0"
id: "k8s-16"
```
On kubernetes versions before 1.6, we will install `v1.5.0.yaml`, whereas from kubernetes
versions 1.6 on we will install `v1.6.0.yaml`.
Note that we remove the `pre-release` field of the kubernetes semver, so that `1.6.0-beta.1`
will match `>=1.6.0`. This matches the way kubernetes does pre-releases.
## Semver is not enough: `id`
However, semver is insufficient here with the kubernetes version selection. The problem is
arises in the following scenario:
* Install k8s 1.5, 1.5 version of manifest is installed
* Upgrade to k8s 1.6, 1.6 version of manifest is installed
* Downgrade to k8s 1.5; we want the 1.5 version of the manifest to be installed but the 1.6 version
will have a semver that is greater than or equal to the 1.5 semver.
We need a way to break the ties between the semvers, and thus we introduce the `id` field.
Thus a manifest will actually look like this:
```
- version: 1.6.0
selector:
k8s-addon: kube-dns.addons.k8s.io
manifest: pre-k8s-16.yaml
kubernetesVersion: "<1.6.0"
id: "pre-k8s-16"
- version: 1.6.0
selector:
k8s-addon: kube-dns.addons.k8s.io
manifest: k8s-16.yaml
kubernetesVersion: ">=1.6.0"
id: "k8s-16"
```
Note that the two addons have the same version, but a different `kubernetesVersion` selector.
But they have different `id` values; addons with matching semvers but different `id`s will
be upgraded. (We will never downgrade to an older semver though, regardless of `id`)
So now in the above scenario after the downgrade to 1.5, although the semver is the same,
the id will not match, and the `pre-k8s-16` will be installed. (And when we upgrade back
to 1.6, the `k8s-16` version will be installed.
A few tips:
* The `version` can now more closely mirror the upstream version.
* The manifest names should probably incorporate the `id`, for maintainability.

View File

@ -2,6 +2,8 @@
With kops you manage addons by using kubectl.
(For a description of the addon-manager, please see [addon_manager.md](addon_manager.md).)
Addons in kubernetes are traditionally done by copying files to `/etc/kubernetes/addons` on the master. But this
doesn't really make sense in HA master configurations. We also have kubectl available, and addons is just a thin
wrapper over calling kubectl.

View File

@ -1,6 +1,23 @@
# Kops HTTP API Server
### Notes:
# Building the kops API server
Set your docker registry
- https://github.com/kubernetes/kubernetes/pull/40803
```bash
export DOCKER_REGISTRY=$registry
```
Build the kops API server container, and push the image up to your registry.
```bash
kops-server-push
```
# Deploy the kops API server to a cluster
From the kops directory run the following `helm` command. More information on `helm` can be found [here](https://github.com/kubernetes/helm)
```bash
helm install charts/kops --namespace kops
```

View File

@ -121,7 +121,7 @@ ID=$(uuidgen) && aws route53 create-hosted-zone --name subdomain.example.com --c
```bash
# Note: This example assumes you have jq installed locally.
aws route53 list-hosted-zones | jq '.HostedZones[] | select(.Name=="subdomain.example.com.") | .Id'
aws route53 list-hosted-zones | jq '.HostedZones[] | select(.Name=="example.com.") | .Id'
```
* Create a new JSON file with your values (`subdomain.json`)

View File

@ -1,11 +1,21 @@
## kops delete
delete clusters
Deletes a resource by filename or stdin
### Synopsis
Delete clusters or instancegroups by filename or stdin
Delete clusters
```
kops delete -f FILENAME [--yes]
```
### Options
```
-f, --filename stringSlice Filename to use to delete the resource
-y, --yes Specify --yes to delete the resource
```
### Options inherited from parent commands
@ -27,4 +37,3 @@ Delete clusters
* [kops delete cluster](kops_delete_cluster.md) - Delete cluster
* [kops delete instancegroup](kops_delete_instancegroup.md) - Delete instancegroup
* [kops delete secret](kops_delete_secret.md) - Delete secret

View File

@ -7,7 +7,7 @@ Rolling update a cluster
Rolling update a cluster instance groups.
This command updates a kubernetes cluseter to match the cloud, and kops specifications.
This command updates a kubernetes cluster to match the cloud, and kops specifications.
To perform rolling update, you need to update the cloud resources first with "kops update cluster"

View File

@ -14,6 +14,7 @@ kops update cluster
### Options
```
--create-kube-config Will control automatically creating the kube config file on your local filesystem (default true)
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
--out string Path to write any local output
--ssh-public-key string SSH public key to use (deprecated: use kops create secret instead)

View File

@ -118,6 +118,23 @@ spec:
Will result in the flag `--runtime-config=batch/v2alpha1=true,apps/v1alpha1=true`. Note that `kube-apiserver` accepts `true` as a value for switch-like flags.
### kubelet
This block contains configurations for `kubelet`. See https://kubernetes.io/docs/admin/kubelet/
#### Feature Gates
```yaml
spec:
kubelet:
featureGates:
ExperimentalCriticalPodAnnotation: "true"
AllowExtTrafficLocalEndpoints: "false"
```
Will result in the flag `--feature-gates=ExperimentalCriticalPodAnnotation=true,AllowExtTrafficLocalEndpoints=false`
### networkID
On AWS, this is the id of the VPC the cluster is created in. If creating a cluster from scratch, this field doesn't need to be specified at create time; `kops` will create a `VPC` for you.

View File

@ -48,7 +48,7 @@ make ci
## Push new dns-controller image if needed
```
make dns-controller-push DNS_CONTROLLER_TAG=1.5.1 DOCKER_REGISTRY=kope
make dns-controller-push DOCKER_REGISTRY=kope
```
## Upload new version
@ -63,8 +63,7 @@ make upload S3_BUCKET=s3://kubeupv2
Make sure you are on the release branch `git checkout release-1.X`
```
export TAG=1.5.0-alpha4
git tag ${TAG}
make release-tag
git push --tags
```
@ -77,10 +76,10 @@ versions to that.
## Upload to github
Manually create a release on github & upload, but soon we'll publish shipbot which automates this...
Use [shipbot](https://github.com/kopeio/shipbot) to upload the release:
```
bazel run //cmd/shipbot -- -tag ${TAG}
make release-github
```

74
docs/gpu.md Normal file
View File

@ -0,0 +1,74 @@
# GPU support
```
kops create cluster gpu.example.com --zones us-east-1c --node-size p2.xlarge --node-count 1 --kubernetes-version 1.6.1
```
(Note that the p2.xlarge instance type is not cheap, but no GPU instances are)
You can use the experimental hooks feature to install the nvidia drivers:
`> kops edit cluster gpu.example.com`
```
spec:
...
hooks:
- execContainer:
image: kopeio/nvidia-bootstrap:1.6
```
(TODO: Only on instance groups, or have nvidia-bootstrap detect if GPUs are present..)
In addition, you will likely want to set the `Accelerators=true` feature-flag to kubelet:
`> kops edit cluster gpu.example.com`
```
spec:
...
kubelet:
featureGates:
Accelerators: "true"
```
`> kops update cluster gpu.example.com --yes`
Here is an example pod that runs tensorflow; note that it mounts libcuda from the host:
(TODO: Is there some way to have a well-known volume or similar?)
```
apiVersion: v1
kind: Pod
metadata:
name: tf
spec:
containers:
- image: gcr.io/tensorflow/tensorflow:1.0.1-gpu
imagePullPolicy: IfNotPresent
name: gpu
command:
- /bin/bash
- -c
- "cp -d /rootfs/usr/lib/x86_64-linux-gnu/libcuda.* /usr/lib/x86_64-linux-gnu/ && cp -d /rootfs/usr/lib/x86_64-linux-gnu/libnvidia* /usr/lib/x86_64-linux-gnu/ &&/run_jupyter.sh"
resources:
limits:
cpu: 2000m
alpha.kubernetes.io/nvidia-gpu: 1
volumeMounts:
- name: rootfs-usr-lib
mountPath: /rootfs/usr/lib
volumes:
- name: rootfs-usr-lib
hostPath:
path: /usr/lib
```
To use this particular tensorflow image, you should port-forward and get the URL from the log:
```
kubectl port-forward tf 8888 &
kubectl logs tf
```
And browse to the URL printed

View File

@ -99,7 +99,7 @@ CoreOS support is highly experimental. Please report any issues.
The following steps are known:
* CoreOS AMIs can be found using `aws ec2 describe-images --region=us-east-1 --owner=595879546273 --filters Name=virtualization-type,Values=hvm`
* You can specify the name using the 'coreos.com` owner alias, for example `coreos.com/CoreOS-stable-1235.9.0-hvm`
* You can specify the name using the `coreos.com` owner alias, for example `coreos.com/CoreOS-stable-1235.9.0-hvm`

View File

@ -15,7 +15,7 @@ By default, a cluster has:
## Listing instance groups
`kops get instancegroups`
> ```
```
NAME ROLE MACHINETYPE MIN MAX ZONES
master-us-east-1c Master 1 1 us-east-1c
nodes Node t2.medium 2 2
@ -32,7 +32,7 @@ have not yet been applied (this may change soon though!).
To preview the change:
`kops update cluster <clustername>`
> ```
```
...
Will modify resources:
*awstasks.LaunchConfiguration launchConfiguration/mycluster.mydomain.com
@ -131,6 +131,27 @@ So the procedure is:
* Rolling-update, only if you want to apply changes immediately: `kops rolling-update cluster`
## Adding Taints to an Instance Group
If you're running Kubernetes 1.6.0 or later, you can also control taints in the InstanceGroup.
The taints property takes a list of strings. The following example would add two taints to an IG,
using the same `edit` -> `update` -> `rolling-update` process as above.
```
metadata:
creationTimestamp: "2016-07-10T15:47:14Z"
name: nodes
spec:
machineType: m3.medium
maxSize: 3
minSize: 3
role: Node
taints:
- dedicated=gpu:NoSchedule
- team=search:PreferNoSchedule
```
## Resizing the master
(This procedure should be pretty familiar by now!)

View File

@ -7,7 +7,10 @@ There are two main types of labels that kops can create:
Both are specified at the InstanceGroup level.
A nice use for CloudLabels is to specify [AWS cost allocation tags](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
A nice use for cloudLabels is to specify [AWS cost allocation tags](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
A good use for nodeLables is to implement [nodeSelector labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#step-two-add-a-nodeselector-field-to-your-pod-configuration).
CloudLables and nodeLables are automatically applied to new nodes created by [AWS EC2 auto scaling groups](https://aws.amazon.com/autoscaling/).
An example:
@ -28,3 +31,10 @@ Note that keys and values are strings, so you need quotes around values that YAM
would otherwise treat as numbers or booleans.
To apply changes, you'll need to do a `kops update cluster` and then likely a `kops rolling-update cluster`
For AWS if `kops rolling-update cluster --instance-group nodes` returns "No rolling-update required." the
[kops rolling-update cluster](https://github.com/kubernetes/kops/blob/8bc48ef10a44a3e481b604f5dbb663420c68dcab/docs/cli/kops_rolling-update_cluster.md) `--force` flag can be used to force a rolling update, even when no changes are identified.
Example:
`kops rolling-update cluster --instance-group nodes --force`

View File

@ -0,0 +1,52 @@
## 1.6.0-alpha.1
1.6.0-alpha.1 is a prerelease early-access of kops 1.6, which is the release with full support for kubernetes 1.6.
This version of kops & kubernetes has not yet undergone extensive validation, and there will be improvements
made before release of kops 1.6.0.
This is not a full set of release notes, but rather a summary of the highest impact changes in the 1.6 release:
* RBAC can be enabled by passing the `--authorization=rbac` parameter to `kops create cluster`,
or via `kops edit cluster` and change `authorization` from `alwaysAllow: {}` to `rbac: {}`
* The standard RBAC policy for 1.6 means that all access to the Kubernetes API using the default
service account method will be denied.
* The taints & tolerations have changed as part of their graduation from alpha. The taint is now a field on the node:
```yaml
spec:
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
```
An example toleration (as used in dns-controller) is:
```yaml
spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
```
Note that the annotation form is ignored. To schedule a pod on the master, the toleration must be updated
and moved from an annotation to the field.
* A new label for nodes, mirroring the toleration, is added and is now preferred: `node-role.kubernetes.io/master=`
(`node-role.kubernetes.io/master` with an empty value). `kubernetes.io/role=master` is still present, but
the `node-role.kubernetes.io/<role>=` form is preferred. `kubernetes.io/role=node` and `node-role.kubernetes.io/node=`
are also present.
Workaround: create the configmap with `kubectl create configmap -n kube-system kube-dns` before updating.
## Known Issues
##### Rolling updates
Rolling update to 1.6 does not succeed because new kube-dns pods mount a configmap with an optional volume map,
but that is enforced by the kubelets, which are upgraded after the master.
##### etcd3
`kops` is not yet recommending etcd3. We do however support a **run at your own risk** option. Right now we are working on resolving issues such as HA upgrade support.

View File

@ -1,6 +1,6 @@
## Building Kubernetes clusters with Terraform
Kops can generate Terraform configurations, and then you can then apply them using the `terraform plan` and `terraform apply` tools. This is very handy if you are already using Terraform, or if you want to check in the Terraform output into version control.
Kops can generate Terraform configurations, and then you can apply them using the `terraform plan` and `terraform apply` tools. This is very handy if you are already using Terraform, or if you want to check in the Terraform output into version control.
The gist of it is that, instead of letting kops apply the changes, you tell kops what you want, and then kops spits out what it wants done into a `.tf` file. **_You_** are then responsible for turning those plans into reality.

View File

@ -21,9 +21,12 @@ import (
crypto_rand "crypto/rand"
"crypto/rsa"
"fmt"
"strings"
"text/template"
"github.com/golang/glog"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/kops/federation/model"
@ -32,14 +35,12 @@ import (
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/upup/pkg/fi/k8sapi"
"k8s.io/kops/upup/pkg/kutil"
federation_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
k8sapiv1 "k8s.io/kubernetes/pkg/api/v1"
"strings"
"text/template"
)
type ApplyFederationOperation struct {
@ -56,7 +57,7 @@ type ApplyFederationOperation struct {
apiserverSecretName string
}
func (o *ApplyFederationOperation) FindKubecfg() (*kutil.KubeconfigBuilder, error) {
func (o *ApplyFederationOperation) FindKubecfg() (*kubeconfig.KubeconfigBuilder, error) {
// TODO: Only if not yet set?
// hasKubecfg, err := hasKubecfg(f.Name)
// if err != nil {
@ -146,10 +147,6 @@ func (o *ApplyFederationOperation) Run() error {
if err != nil {
return err
}
//k8sControllerClient, err := release_1_5.NewForConfig(federationRestConfig)
//if err != nil {
// return err
//}
for _, member := range o.Federation.Spec.Members {
glog.V(2).Infof("configuring member cluster %q", member)
@ -158,7 +155,7 @@ func (o *ApplyFederationOperation) Run() error {
return fmt.Errorf("error reading cluster %q: %v", member, err)
}
clusterName := strings.Replace(cluster.ObjectMeta.Name, ".", "-", -1)
clusterName := strings.Replace(cluster.Name, ".", "-", -1)
a := &FederationCluster{
FederationNamespace: o.namespace,
@ -166,7 +163,7 @@ func (o *ApplyFederationOperation) Run() error {
ControllerKubernetesClients: controllerKubernetesClients,
FederationClient: federationControllerClient,
ClusterSecretName: "secret-" + cluster.ObjectMeta.Name,
ClusterSecretName: "secret-" + cluster.Name,
ClusterName: clusterName,
ApiserverHostname: cluster.Spec.MasterPublicName,
}
@ -212,7 +209,7 @@ func (o *ApplyFederationOperation) buildApiserverKeypair() *fitasks.Keypair {
keypairName := "secret-" + o.apiserverHostName
keypair := &fitasks.Keypair{
Name: fi.String(keypairName),
Subject: "cn=" + o.Federation.ObjectMeta.Name,
Subject: "cn=" + o.Federation.Name,
Type: "server",
}
@ -362,7 +359,7 @@ func (o *ApplyFederationOperation) executeTemplate(key string, templateDefinitio
func (o *ApplyFederationOperation) EnsureNamespace(c *fi.Context) error {
k8s := c.Target.(*kubernetestarget.KubernetesTarget).KubernetesClient
ns, err := k8s.Core().Namespaces().Get(o.namespace, meta_v1.GetOptions{})
ns, err := k8s.CoreV1().Namespaces().Get(o.namespace, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
ns = nil

View File

@ -26,7 +26,6 @@ import (
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/kutil"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
k8sapiv1 "k8s.io/kubernetes/pkg/api/v1"
@ -54,14 +53,7 @@ func (o *FederationCluster) Run(cluster *kopsapi.Cluster) error {
return err
}
k := kutil.CreateKubecfg{
ContextName: cluster.ObjectMeta.Name,
KeyStore: keyStore,
SecretStore: secretStore,
KubeMasterIP: cluster.Spec.MasterPublicName,
}
conf, err := k.ExtractKubeconfig()
conf, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore)
if err != nil {
return fmt.Errorf("error building connection information for cluster %q: %v", cluster.ObjectMeta.Name, err)
}

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/upup/pkg/kutil"
)
const UserAdmin = "admin"
@ -43,12 +42,12 @@ type FederationConfiguration struct {
KubeconfigSecretName string
}
func (o *FederationConfiguration) extractKubecfg(c *fi.Context, f *kopsapi.Federation) (*kutil.KubeconfigBuilder, error) {
func (o *FederationConfiguration) extractKubecfg(c *fi.Context, f *kopsapi.Federation) (*kubeconfig.KubeconfigBuilder, error) {
// TODO: move this
masterName := "api." + f.Spec.DNSName
k := kutil.NewKubeconfigBuilder()
k.KubeMasterIP = masterName
k := kubeconfig.NewKubeconfigBuilder()
k.Server = "https://" + masterName
k.Context = "federation-" + f.ObjectMeta.Name
// CA Cert

View File

@ -60,7 +60,7 @@ func mutateNamespace(k8s federation_clientset.Interface, name string, fn func(s
return created, nil
} else {
glog.V(2).Infof("updating federation Namespace %s", name)
created, err := k8s.Core().Namespaces().Update(updated)
created, err := k8s.CoreV1().Namespaces().Update(updated)
if err != nil {
return nil, fmt.Errorf("error updating federation Namespace %s: %v", name, err)
}

View File

@ -21,26 +21,19 @@ import (
"k8s.io/client-go/kubernetes"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/kutil"
)
type KubernetesTarget struct {
//kubectlContext string
//keystore *k8sapi.KubernetesKeystore
KubernetesClient kubernetes.Interface
cluster *kopsapi.Cluster
}
func NewKubernetesTarget(clientset simple.Clientset, keystore fi.Keystore, cluster *kopsapi.Cluster) (*KubernetesTarget, error) {
b := &kutil.CreateKubecfg{
ContextName: cluster.ObjectMeta.Name,
KeyStore: keystore,
SecretStore: nil,
KubeMasterIP: cluster.Spec.MasterPublicName,
}
kubeconfig, err := b.ExtractKubeconfig()
func NewKubernetesTarget(clientset simple.Clientset, keyStore fi.Keystore, cluster *kopsapi.Cluster) (*KubernetesTarget, error) {
var secretStore fi.SecretStore
kubeconfig, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore)
if err != nil {
return nil, fmt.Errorf("error building credentials for cluster %q: %v", cluster.ObjectMeta.Name, err)
}

View File

@ -2,11 +2,13 @@ k8s.io/kops
k8s.io/kops/channels/cmd/channels
k8s.io/kops/channels/pkg/api
k8s.io/kops/channels/pkg/channels
k8s.io/kops/channels/pkg/cmd
k8s.io/kops/cloudmock/aws/mockautoscaling
k8s.io/kops/cloudmock/aws/mockec2
k8s.io/kops/cloudmock/aws/mockroute53
k8s.io/kops/cmd/kops
k8s.io/kops/cmd/kops/util
k8s.io/kops/cmd/kops-server
k8s.io/kops/cmd/nodeup
k8s.io/kops/dns-controller/cmd/dns-controller
k8s.io/kops/dns-controller/pkg/dns
@ -28,6 +30,10 @@ k8s.io/kops/pkg/apis/kops/util
k8s.io/kops/pkg/apis/kops/v1alpha1
k8s.io/kops/pkg/apis/kops/v1alpha2
k8s.io/kops/pkg/apis/kops/validation
k8s.io/kops/pkg/apis/nodeup
k8s.io/kops/pkg/apiserver
k8s.io/kops/pkg/apiserver/cmd/server
k8s.io/kops/pkg/apiserver/registry/cluster
k8s.io/kops/pkg/client/simple
k8s.io/kops/pkg/client/simple/vfsclientset
k8s.io/kops/pkg/diff
@ -41,6 +47,7 @@ k8s.io/kops/pkg/model/components
k8s.io/kops/pkg/model/gcemodel
k8s.io/kops/pkg/model/iam
k8s.io/kops/pkg/model/resources
k8s.io/kops/pkg/resources
k8s.io/kops/pkg/systemd
k8s.io/kops/pkg/util/stringorslice
k8s.io/kops/pkg/validation

61
hack/deps.py Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This python script helps sync godeps from the k8s repos into our git submodules
# It generates bash commands where changes are needed
# We can probably also use it for deps when the time comes!
import json
import sys
import subprocess
from pprint import pprint
from os.path import expanduser, join
kops_dir = expanduser('~/k8s/src/k8s.io/kops')
k8s_dir = expanduser('~/k8s/src/k8s.io/kubernetes')
with open(join(k8s_dir, 'Godeps/Godeps.json')) as data_file:
godeps = json.load(data_file)
#pprint(godeps)
godep_map = {}
for godep in godeps['Deps']:
#print("%s %s" % (godep['ImportPath'], godep['Rev']))
godep_map[godep['ImportPath']] = godep['Rev']
process = subprocess.Popen(['git', 'submodule', 'status'], stdout=subprocess.PIPE, cwd=kops_dir)
submodule_status, err = process.communicate()
for submodule_line in submodule_status.splitlines():
tokens = submodule_line.split()
dep = tokens[1]
dep = dep.replace('_vendor/', '')
sha = tokens[0]
sha = sha.replace('+', '')
godep_sha = godep_map.get(dep)
if not godep_sha:
for k in godep_map:
if k.startswith(dep):
godep_sha = godep_map[k]
break
if godep_sha:
if godep_sha != sha:
print("# update needed: %s vs %s" % (godep_sha, sha))
print("pushd _vendor/{dep}; git fetch; git checkout {sha}; popd".format(dep=dep, sha=godep_sha))
else:
print("# UNKNOWN dep %s" % dep)

View File

@ -76,6 +76,7 @@ NODE_ZONES=${NODE_ZONES:-"us-west-2a,us-west-2b,us-west-2c"}
NODE_SIZE=${NODE_SIZE:-m4.xlarge}
MASTER_ZONES=${MASTER_ZONES:-"us-west-2a,us-west-2b,us-west-2c"}
MASTER_SIZE=${MASTER_SIZE:-m4.large}
KOPS_CREATE=${KOPS_CREATE:-yes}
# NETWORK
@ -90,15 +91,18 @@ cd $KOPS_DIRECTORY/..
GIT_VER=git-$(git describe --always)
[ -z "$GIT_VER" ] && echo "we do not have GIT_VER something is very wrong" && exit 1;
echo ==========
echo "Starting build"
make ci && S3_BUCKET=s3://${NODEUP_BUCKET} make upload
export CI=1
make && make test && S3_BUCKET=s3://${NODEUP_BUCKET} make upload
KOPS_CHANNEL=$(kops version | awk '{ print $2 }')
KOPS_CHANNEL=$(kops version | awk '{ print $2 }' |sed 's/\+/%2B/')
KOPS_BASE_URL="http://${NODEUP_BUCKET}.s3.amazonaws.com/kops/${KOPS_CHANNEL}/"
echo "KOPS_BASE_URL=${KOPS_BASE_URL}"
echo "NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup"
echo ==========
echo "Deleting cluster ${CLUSTER_NAME}. Elle est finie."
@ -111,25 +115,21 @@ kops delete cluster \
echo ==========
echo "Creating cluster ${CLUSTER_NAME}"
NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup \
KOPS_BASE_URL=${KOPS_BASE_URL} \
kops create cluster \
--name $CLUSTER_NAME \
--state $KOPS_STATE_STORE \
--node-count $NODE_COUNT \
--zones $NODE_ZONES \
--master-zones $MASTER_ZONES \
--cloud aws \
--node-size $NODE_SIZE \
--master-size $MASTER_SIZE \
-v $VERBOSITY \
--image $IMAGE \
--kubernetes-version "1.5.2" \
--topology $TOPOLOGY \
--networking $NETWORKING \
--bastion="true" \
--yes
kops_command="NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup KOPS_BASE_URL=${KOPS_BASE_URL} kops create cluster --name $CLUSTER_NAME --state $KOPS_STATE_STORE --node-count $NODE_COUNT --zones $NODE_ZONES --master-zones $MASTER_ZONES --node-size $NODE_SIZE --master-size $MASTER_SIZE -v $VERBOSITY --image $IMAGE --channel alpha --topology $TOPOLOGY --networking $NETWORKING"
if [[ $TOPOLOGY == "private" ]]; then
kops_command+=" --bastion='true'"
fi
if [ -n "${KOPS_FEATURE_FLAGS+x}" ]; then
kops_command=KOPS_FEATURE_FLAGS="${KOPS_FEATURE_FLAGS}" $kops_command
fi
if [[ $KOPS_CREATE == "yes" ]]; then
kops_command="$kops_command --yes"
fi
eval $kops_command
echo ==========
echo "Your k8s cluster ${CLUSTER_NAME}, awaits your bidding."

View File

@ -16,7 +16,7 @@
. $(dirname "${BASH_SOURCE}")/common.sh
BAD_HEADERS=$(${KUBE_ROOT}/hack/verify-boilerplate.sh | awk '{ print $6}')
BAD_HEADERS=$((${KUBE_ROOT}/hack/verify-boilerplate.sh || true) | awk '{ print $6}')
FORMATS="sh go Makefile Dockerfile"
for i in ${FORMATS}

View File

@ -0,0 +1,23 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all: image
.PHONY: image push
image:
docker build -t kopeio/nvidia-bootstrap:1.6.0 -f image/Dockerfile image/
push: image
docker push kopeio/nvidia-bootstrap:1.6.0

View File

@ -0,0 +1,7 @@
## NVIDIA Driver Installation
Using this hook indicates that you agree to the [license](http://www.nvidia.com/content/DriverDownload-March2009/licence.php?lang=us)
This is an experimental hook for installing the nvidia drivers as part of the kops boot process.
Please see the [GPU docs](/docs/gpu.md) for more details on how to use this.

Some files were not shown because too many files have changed in this diff Show More