mirror of https://github.com/kubernetes/kops.git
Merge branch 'master' into surge
This commit is contained in:
commit
9f9b98e9f4
67
Makefile
67
Makefile
|
|
@ -13,6 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
|
||||
# kops source root directory (without trailing /)
|
||||
KOPS_ROOT?=$(patsubst %/,%,$(abspath $(dir $(lastword $(MAKEFILE_LIST)))))
|
||||
DOCKER_REGISTRY?=gcr.io/must-override
|
||||
S3_BUCKET?=s3://must-override/
|
||||
UPLOAD_DEST?=$(S3_BUCKET)
|
||||
|
|
@ -21,8 +23,8 @@ GCS_URL=$(GCS_LOCATION:gs://%=https://storage.googleapis.com/%)
|
|||
LATEST_FILE?=latest-ci.txt
|
||||
GOPATH_1ST:=$(shell go env | grep GOPATH | cut -f 2 -d \")
|
||||
UNIQUE:=$(shell date +%s)
|
||||
GOVERSION=1.13.4
|
||||
BUILD=$(GOPATH_1ST)/src/k8s.io/kops/.build
|
||||
GOVERSION=1.13.8
|
||||
BUILD=$(KOPS_ROOT)/.build
|
||||
LOCAL=$(BUILD)/local
|
||||
BINDATA_TARGETS=upup/models/bindata.go
|
||||
ARTIFACTS=$(BUILD)/artifacts
|
||||
|
|
@ -33,7 +35,7 @@ CHANNELS=$(LOCAL)/channels
|
|||
NODEUP=$(LOCAL)/nodeup
|
||||
PROTOKUBE=$(LOCAL)/protokube
|
||||
UPLOAD=$(BUILD)/upload
|
||||
BAZELBUILD=$(GOPATH_1ST)/src/k8s.io/kops/.bazelbuild
|
||||
BAZELBUILD=$(KOPS_ROOT)/.bazelbuild
|
||||
BAZELDIST=$(BAZELBUILD)/dist
|
||||
BAZELIMAGES=$(BAZELDIST)/images
|
||||
BAZELUPLOAD=$(BAZELBUILD)/upload
|
||||
|
|
@ -44,11 +46,7 @@ BAZEL_CONFIG?=
|
|||
API_OPTIONS?=
|
||||
GCFLAGS?=
|
||||
|
||||
# See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile
|
||||
MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
|
||||
|
||||
UPLOAD_CMD=$(MAKEDIR)/hack/upload
|
||||
|
||||
UPLOAD_CMD=$(KOPS_ROOT)/hack/upload
|
||||
|
||||
# Unexport environment variables that can affect tests and are not used in builds
|
||||
unexport AWS_ACCESS_KEY_ID AWS_REGION AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN CNI_VERSION_URL DNS_IGNORE_NS_CHECK DNSCONTROLLER_IMAGE DO_ACCESS_TOKEN GOOGLE_APPLICATION_CREDENTIALS
|
||||
|
|
@ -56,9 +54,9 @@ unexport KOPS_BASE_URL KOPS_CLUSTER_NAME KOPS_RUN_OBSOLETE_VERSION KOPS_STATE_ST
|
|||
unexport SKIP_REGION_CHECK S3_ACCESS_KEY_ID S3_ENDPOINT S3_REGION S3_SECRET_ACCESS_KEY VSPHERE_USERNAME VSPHERE_PASSWORD
|
||||
|
||||
# Keep in sync with upup/models/cloudup/resources/addons/dns-controller/
|
||||
DNS_CONTROLLER_TAG=1.17.0-alpha.1
|
||||
DNS_CONTROLLER_TAG=1.18.0-alpha.2
|
||||
# Keep in sync with upup/models/cloudup/resources/addons/kops-controller.addons.k8s.io/
|
||||
KOPS_CONTROLLER_TAG=1.17.0-alpha.1
|
||||
KOPS_CONTROLLER_TAG=1.18.0-alpha.2
|
||||
|
||||
# Keep in sync with logic in get_workspace_status
|
||||
# TODO: just invoke tools/get_workspace_status.sh?
|
||||
|
|
@ -68,10 +66,7 @@ KOPS_CI_VERSION:=$(shell grep 'KOPS_CI_VERSION\s*=' version.go | awk '{print $$3
|
|||
# kops local location
|
||||
KOPS = ${LOCAL}/kops
|
||||
|
||||
# kops source root directory (without trailing /)
|
||||
KOPS_ROOT ?= $(patsubst %/,%,$(abspath $(dir $(firstword $(MAKEFILE_LIST)))))
|
||||
|
||||
GITSHA := $(shell cd ${GOPATH_1ST}/src/k8s.io/kops; git describe --always)
|
||||
GITSHA := $(shell cd ${KOPS_ROOT}; git describe --always)
|
||||
|
||||
# Keep in sync with logic in get_workspace_status
|
||||
ifndef VERSION
|
||||
|
|
@ -95,7 +90,6 @@ endif
|
|||
# + is valid in semver, but not in docker tags. Fixup CI versions.
|
||||
# Note that this mirrors the logic in DefaultProtokubeImageName
|
||||
PROTOKUBE_TAG := $(subst +,-,${VERSION})
|
||||
KOPS_SERVER_TAG := $(subst +,-,${VERSION})
|
||||
|
||||
# Go exports:
|
||||
LDFLAGS := -ldflags=all=
|
||||
|
|
@ -170,7 +164,7 @@ ${KOPS}: ${BINDATA_TARGETS}
|
|||
|
||||
${GOBINDATA}:
|
||||
mkdir -p ${LOCAL}
|
||||
go build ${GCFLAGS} ${EXTRA_BUILDFLAGS} ${LDFLAGS}"${EXTRA_LDFLAGS}" -o $@ k8s.io/kops/vendor/github.com/jteeuwen/go-bindata/go-bindata
|
||||
go build ${GCFLAGS} ${EXTRA_BUILDFLAGS} ${LDFLAGS}"${EXTRA_LDFLAGS}" -o $@ k8s.io/kops/vendor/github.com/go-bindata/go-bindata/go-bindata
|
||||
|
||||
.PHONY: gobindata-tool
|
||||
gobindata-tool: ${GOBINDATA}
|
||||
|
|
@ -180,21 +174,17 @@ kops-gobindata: gobindata-tool ${BINDATA_TARGETS}
|
|||
|
||||
UPUP_MODELS_BINDATA_SOURCES:=$(shell find upup/models/ | egrep -v "upup/models/bindata.go")
|
||||
upup/models/bindata.go: ${GOBINDATA} ${UPUP_MODELS_BINDATA_SOURCES}
|
||||
cd ${GOPATH_1ST}/src/k8s.io/kops; ${GOBINDATA} -o $@ -pkg models -ignore="\\.DS_Store" -ignore="bindata\\.go" -ignore="vfs\\.go" -prefix upup/models/ upup/models/... && GO111MODULE=on go run golang.org/x/tools/cmd/goimports -w -v upup/models/bindata.go
|
||||
cd ${KOPS_ROOT}; ${GOBINDATA} -o $@ -pkg models -nometadata -ignore="\\.DS_Store" -ignore="bindata\\.go" -ignore="vfs\\.go" -prefix upup/models/ upup/models/... && GO111MODULE=on go run golang.org/x/tools/cmd/goimports -w -v upup/models/bindata.go
|
||||
|
||||
# Build in a docker container with golang 1.X
|
||||
# Used to test we have not broken 1.X
|
||||
.PHONY: check-builds-in-go111
|
||||
check-builds-in-go111:
|
||||
docker run -e GO111MODULE=on -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.11 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: check-builds-in-go112
|
||||
check-builds-in-go112:
|
||||
docker run -e GO111MODULE=on -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.12 make -C /go/src/k8s.io/kops all
|
||||
docker run -e GO111MODULE=on -e EXTRA_BUILDFLAGS=-mod=vendor -v ${KOPS_ROOT}:/go/src/k8s.io/kops golang:1.12 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: check-builds-in-go113
|
||||
check-builds-in-go113:
|
||||
docker run -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.13 make -C /go/src/k8s.io/kops all
|
||||
docker run -e EXTRA_BUILDFLAGS=-mod=vendor -v ${KOPS_ROOT}:/go/src/k8s.io/kops golang:1.13 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: kops-gobindata
|
||||
|
|
@ -231,7 +221,7 @@ crossbuild-nodeup: ${DIST}/linux/amd64/nodeup
|
|||
.PHONY: crossbuild-nodeup-in-docker
|
||||
crossbuild-nodeup-in-docker:
|
||||
docker pull golang:${GOVERSION} # Keep golang image up to date
|
||||
docker run --name=nodeup-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${MAKEDIR}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ crossbuild-nodeup
|
||||
docker run --name=nodeup-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${KOPS_ROOT}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ crossbuild-nodeup
|
||||
docker start nodeup-build-${UNIQUE}
|
||||
docker exec nodeup-build-${UNIQUE} chown -R ${UID}:${GID} /go/src/k8s.io/kops/.build
|
||||
docker cp nodeup-build-${UNIQUE}:/go/src/k8s.io/kops/.build .
|
||||
|
|
@ -260,7 +250,7 @@ crossbuild: ${DIST}/windows/amd64/kops.exe ${DIST}/darwin/amd64/kops ${DIST}/lin
|
|||
.PHONY: crossbuild-in-docker
|
||||
crossbuild-in-docker:
|
||||
docker pull golang:${GOVERSION} # Keep golang image up to date
|
||||
docker run --name=kops-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${MAKEDIR}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ crossbuild
|
||||
docker run --name=kops-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${KOPS_ROOT}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ crossbuild
|
||||
docker start kops-build-${UNIQUE}
|
||||
docker exec kops-build-${UNIQUE} chown -R ${UID}:${GID} /go/src/k8s.io/kops/.build
|
||||
docker cp kops-build-${UNIQUE}:/go/src/k8s.io/kops/.build .
|
||||
|
|
@ -405,7 +395,7 @@ protokube-image: protokube-build-in-docker
|
|||
.PHONY: protokube-export
|
||||
protokube-export: protokube-image
|
||||
docker save protokube:${PROTOKUBE_TAG} > ${IMAGES}/protokube.tar
|
||||
gzip --force --best ${IMAGES}/protokube.tar
|
||||
gzip --no-name --force --best ${IMAGES}/protokube.tar
|
||||
tools/sha1 ${IMAGES}/protokube.tar.gz ${IMAGES}/protokube.tar.gz.sha1
|
||||
tools/sha256 ${IMAGES}/protokube.tar.gz ${IMAGES}/protokube.tar.gz.sha256
|
||||
|
||||
|
|
@ -427,7 +417,7 @@ ${NODEUP}: ${BINDATA_TARGETS}
|
|||
nodeup-dist:
|
||||
mkdir -p ${DIST}
|
||||
docker pull golang:${GOVERSION} # Keep golang image up to date
|
||||
docker run --name=nodeup-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${MAKEDIR}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ nodeup
|
||||
docker run --name=nodeup-build-${UNIQUE} -e STATIC_BUILD=yes -e VERSION=${VERSION} -v ${KOPS_ROOT}:/go/src/k8s.io/kops golang:${GOVERSION} make -C /go/src/k8s.io/kops/ nodeup
|
||||
docker start nodeup-build-${UNIQUE}
|
||||
docker exec nodeup-build-${UNIQUE} chown -R ${UID}:${GID} /go/src/k8s.io/kops/.build
|
||||
docker cp nodeup-build-${UNIQUE}:/go/src/k8s.io/kops/.build/local/nodeup .build/dist/
|
||||
|
|
@ -482,7 +472,7 @@ gomod: gomod-prereqs
|
|||
|
||||
.PHONY: gofmt
|
||||
gofmt:
|
||||
find $(MAKEDIR) -name "*.go" | grep -v vendor | xargs bazel run //:gofmt -- -w -s
|
||||
find $(KOPS_ROOT) -name "*.go" | grep -v vendor | xargs bazel run //:gofmt -- -w -s
|
||||
|
||||
.PHONY: goimports
|
||||
goimports:
|
||||
|
|
@ -584,7 +574,6 @@ ${CHANNELS}:
|
|||
|
||||
.PHONY: release-tag
|
||||
release-tag:
|
||||
git tag ${KOPS_RELEASE_VERSION}
|
||||
git tag v${KOPS_RELEASE_VERSION}
|
||||
|
||||
.PHONY: release-github
|
||||
|
|
@ -738,20 +727,18 @@ bazel-protokube-export:
|
|||
.PHONY: bazel-kops-controller-export
|
||||
bazel-kops-controller-export:
|
||||
mkdir -p ${BAZELIMAGES}
|
||||
DOCKER_REGISTRY="" DOCKER_IMAGE_PREFIX="kope/" KOPS_CONTROLLER_TAG=${KOPS_CONTROLLER_TAG} bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/kops-controller:image-bundle.tar
|
||||
cp -fp bazel-bin/cmd/kops-controller/image-bundle.tar ${BAZELIMAGES}/kops-controller.tar
|
||||
gzip --force --fast ${BAZELIMAGES}/kops-controller.tar
|
||||
tools/sha1 ${BAZELIMAGES}/kops-controller.tar.gz ${BAZELIMAGES}/kops-controller.tar.gz.sha1
|
||||
tools/sha256 ${BAZELIMAGES}/kops-controller.tar.gz ${BAZELIMAGES}/kops-controller.tar.gz.sha256
|
||||
DOCKER_REGISTRY="" DOCKER_IMAGE_PREFIX="kope/" KOPS_CONTROLLER_TAG=${KOPS_CONTROLLER_TAG} bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/kops-controller:image-bundle.tar.gz //cmd/kops-controller:image-bundle.tar.gz.sha1 //cmd/kops-controller:image-bundle.tar.gz.sha256
|
||||
cp -fp bazel-bin/cmd/kops-controller/image-bundle.tar.gz ${BAZELIMAGES}/kops-controller.tar.gz
|
||||
cp -fp bazel-bin/cmd/kops-controller/image-bundle.tar.gz.sha1 ${BAZELIMAGES}/kops-controller.tar.gz.sha1
|
||||
cp -fp bazel-bin/cmd/kops-controller/image-bundle.tar.gz.sha256 ${BAZELIMAGES}/kops-controller.tar.gz.sha256
|
||||
|
||||
.PHONY: bazel-dns-controller-export
|
||||
bazel-dns-controller-export:
|
||||
mkdir -p ${BAZELIMAGES}
|
||||
DOCKER_REGISTRY="" DOCKER_IMAGE_PREFIX="kope/" DNS_CONTROLLER_TAG=${DNS_CONTROLLER_TAG} bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //dns-controller/cmd/dns-controller:image-bundle.tar
|
||||
cp -fp bazel-bin/dns-controller/cmd/dns-controller/image-bundle.tar ${BAZELIMAGES}/dns-controller.tar
|
||||
gzip --force --fast ${BAZELIMAGES}/dns-controller.tar
|
||||
tools/sha1 ${BAZELIMAGES}/dns-controller.tar.gz ${BAZELIMAGES}/dns-controller.tar.gz.sha1
|
||||
tools/sha256 ${BAZELIMAGES}/dns-controller.tar.gz ${BAZELIMAGES}/dns-controller.tar.gz.sha256
|
||||
DOCKER_REGISTRY="" DOCKER_IMAGE_PREFIX="kope/" DNS_CONTROLLER_TAG=${DNS_CONTROLLER_TAG} bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //dns-controller/cmd/dns-controller:image-bundle.tar.gz //dns-controller/cmd/dns-controller:image-bundle.tar.gz.sha1 //dns-controller/cmd/dns-controller:image-bundle.tar.gz.sha256
|
||||
cp -fp bazel-bin/dns-controller/cmd/dns-controller/image-bundle.tar.gz ${BAZELIMAGES}/dns-controller.tar.gz
|
||||
cp -fp bazel-bin/dns-controller/cmd/dns-controller/image-bundle.tar.gz.sha1 ${BAZELIMAGES}/dns-controller.tar.gz.sha1
|
||||
cp -fp bazel-bin/dns-controller/cmd/dns-controller/image-bundle.tar.gz.sha256 ${BAZELIMAGES}/dns-controller.tar.gz.sha256
|
||||
|
||||
.PHONY: bazel-version-dist
|
||||
bazel-version-dist: bazel-crossbuild-nodeup bazel-crossbuild-kops bazel-kops-controller-export bazel-dns-controller-export bazel-protokube-export bazel-utils-dist
|
||||
|
|
@ -812,7 +799,7 @@ build-docs:
|
|||
|
||||
.PHONY: build-docs-netlify
|
||||
build-docs-netlify:
|
||||
pip install -r ${MAKEDIR}/images/mkdocs/requirements.txt
|
||||
pip install -r ${KOPS_ROOT}/images/mkdocs/requirements.txt
|
||||
mkdocs build
|
||||
|
||||
# Update machine_types.go
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[](https://travis-ci.org/kubernetes/kops) [](https://goreportcard.com/report/k8s.io/kops) [![GoDoc Widget]][GoDoc]
|
||||
|
||||
[GoDoc]: https://godoc.org/k8s.io/kops
|
||||
[GoDoc]: https://pkg.go.dev/k8s.io/kops
|
||||
[GoDoc Widget]: https://godoc.org/k8s.io/kops?status.svg
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
[](https://travis-ci.org/kubernetes/kops) [](https://goreportcard.com/report/k8s.io/kops) [![GoDoc Widget]][GoDoc]
|
||||
|
||||
[GoDoc]: https://godoc.org/k8s.io/kops
|
||||
[GoDoc]: https://pkg.go.dev/k8s.io/kops
|
||||
[GoDoc Widget]: https://godoc.org/k8s.io/kops?status.svg
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
|||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.20.7/rules_go-v0.20.7.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.7/rules_go-v0.20.7.tar.gz",
|
||||
],
|
||||
sha256 = "b9aa86ec08a292b97ec4591cf578e020b35f98e12173bbd4a921f84f583aebd9",
|
||||
sha256 = "62bedd372f125fe62c16c0cc2ad9d7a2b6a1171d639933a5651a729fdce497fc",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
|
@ -27,7 +27,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
|||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.13.4",
|
||||
go_version = "1.13.8",
|
||||
)
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
|
||||
|
|
|
|||
|
|
@ -56,13 +56,13 @@ spec:
|
|||
kubenet: {}
|
||||
kubernetesVersions:
|
||||
- range: ">=1.17.0"
|
||||
recommendedVersion: 1.17.2
|
||||
recommendedVersion: 1.17.3
|
||||
requiredVersion: 1.17.0
|
||||
- range: ">=1.16.0"
|
||||
recommendedVersion: 1.16.6
|
||||
recommendedVersion: 1.16.7
|
||||
requiredVersion: 1.16.0
|
||||
- range: ">=1.15.0"
|
||||
recommendedVersion: 1.15.9
|
||||
recommendedVersion: 1.15.10
|
||||
requiredVersion: 1.15.0
|
||||
- range: ">=1.14.0"
|
||||
recommendedVersion: 1.14.10
|
||||
|
|
@ -83,15 +83,15 @@ spec:
|
|||
- range: ">=1.17.0-alpha.1"
|
||||
#recommendedVersion: "1.17.0"
|
||||
#requiredVersion: 1.17.0
|
||||
kubernetesVersion: 1.17.2
|
||||
kubernetesVersion: 1.17.3
|
||||
- range: ">=1.16.0-alpha.1"
|
||||
#recommendedVersion: "1.16.0"
|
||||
#requiredVersion: 1.16.0
|
||||
kubernetesVersion: 1.16.6
|
||||
kubernetesVersion: 1.16.7
|
||||
- range: ">=1.15.0-alpha.1"
|
||||
#recommendedVersion: "1.15.0"
|
||||
#requiredVersion: 1.15.0
|
||||
kubernetesVersion: 1.15.9
|
||||
kubernetesVersion: 1.15.10
|
||||
- range: ">=1.14.0-alpha.1"
|
||||
#recommendedVersion: "1.14.0"
|
||||
#requiredVersion: 1.14.0
|
||||
|
|
|
|||
|
|
@ -110,13 +110,13 @@ func (a *Addon) GetRequiredUpdates(k8sClient kubernetes.Interface) (*AddonUpdate
|
|||
|
||||
func (a *Addon) GetManifestFullUrl() (*url.URL, error) {
|
||||
if a.Spec.Manifest == nil || *a.Spec.Manifest == "" {
|
||||
return nil, field.Required(field.NewPath("Spec", "Manifest"), "")
|
||||
return nil, field.Required(field.NewPath("spec", "manifest"), "")
|
||||
}
|
||||
|
||||
manifest := *a.Spec.Manifest
|
||||
manifestURL, err := url.Parse(manifest)
|
||||
if err != nil {
|
||||
return nil, field.Invalid(field.NewPath("Spec", "Manifest"), manifest, "Not a valid URL")
|
||||
return nil, field.Invalid(field.NewPath("spec", "manifest"), manifest, "Not a valid URL")
|
||||
}
|
||||
if !manifestURL.IsAbs() {
|
||||
manifestURL = a.ChannelLocation.ResolveReference(manifestURL)
|
||||
|
|
|
|||
|
|
@ -23,36 +23,46 @@ spec:
|
|||
providerID: aws
|
||||
kubernetesVersion: ">=1.10.0 <1.11.0"
|
||||
# Stretch is the default for 1.11 (for nvme)
|
||||
- name: kope.io/k8s-1.11-debian-stretch-amd64-hvm-ebs-2019-09-26
|
||||
- name: kope.io/k8s-1.11-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.11.0 <1.12.0"
|
||||
- name: kope.io/k8s-1.12-debian-stretch-amd64-hvm-ebs-2019-09-26
|
||||
- name: kope.io/k8s-1.12-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.12.0 <1.13.0"
|
||||
- name: kope.io/k8s-1.13-debian-stretch-amd64-hvm-ebs-2019-09-26
|
||||
- name: kope.io/k8s-1.13-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.13.0 <1.14.0"
|
||||
- name: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2019-09-26
|
||||
- name: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.14.0 <1.15.0"
|
||||
- name: kope.io/k8s-1.15-debian-stretch-amd64-hvm-ebs-2019-09-26
|
||||
- name: kope.io/k8s-1.15-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.15.0"
|
||||
kubernetesVersion: ">=1.15.0 <1.16.0"
|
||||
- name: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.16.0 <1.17.0"
|
||||
- name: kope.io/k8s-1.17-debian-stretch-amd64-hvm-ebs-2020-01-17
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.17.0"
|
||||
- providerID: gce
|
||||
kubernetesVersion: "<1.16.0-alpha.1"
|
||||
name: "cos-cloud/cos-stable-65-10323-99-0"
|
||||
- providerID: gce
|
||||
kubernetesVersion: ">=1.16.0-alpha.1"
|
||||
name: "cos-cloud/cos-stable-77-12371-114-0"
|
||||
cluster:
|
||||
kubernetesVersion: v1.5.8
|
||||
networking:
|
||||
kubenet: {}
|
||||
kubernetesVersions:
|
||||
- range: ">=1.17.0"
|
||||
recommendedVersion: 1.17.0
|
||||
recommendedVersion: 1.17.2
|
||||
requiredVersion: 1.17.0
|
||||
- range: ">=1.16.0"
|
||||
recommendedVersion: 1.16.4
|
||||
recommendedVersion: 1.16.6
|
||||
requiredVersion: 1.16.0
|
||||
- range: ">=1.15.0"
|
||||
recommendedVersion: 1.15.7
|
||||
recommendedVersion: 1.15.9
|
||||
requiredVersion: 1.15.0
|
||||
- range: ">=1.14.0"
|
||||
recommendedVersion: 1.14.10
|
||||
|
|
@ -73,15 +83,15 @@ spec:
|
|||
- range: ">=1.17.0-alpha.1"
|
||||
#recommendedVersion: "1.17.0"
|
||||
#requiredVersion: 1.17.0
|
||||
kubernetesVersion: 1.17.0
|
||||
kubernetesVersion: 1.17.2
|
||||
- range: ">=1.16.0-alpha.1"
|
||||
#recommendedVersion: "1.16.0"
|
||||
#requiredVersion: 1.16.0
|
||||
kubernetesVersion: 1.16.4
|
||||
kubernetesVersion: 1.16.6
|
||||
- range: ">=1.15.0-alpha.1"
|
||||
#recommendedVersion: "1.15.0"
|
||||
#requiredVersion: 1.15.0
|
||||
kubernetesVersion: 1.15.7
|
||||
kubernetesVersion: 1.15.9
|
||||
- range: ">=1.14.0-alpha.1"
|
||||
#recommendedVersion: "1.14.0"
|
||||
#requiredVersion: 1.14.0
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
timeout: 1200s
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
machineType: 'N1_HIGHCPU_8'
|
||||
steps:
|
||||
# Start by just pushing the image
|
||||
- name: 'gcr.io/k8s-testimages/bazelbuild:v20190916-ec59af8-0.29.1'
|
||||
|
|
@ -13,6 +14,16 @@ steps:
|
|||
- DOCKER_IMAGE_PREFIX=$_DOCKER_IMAGE_PREFIX
|
||||
args:
|
||||
- kops-controller-push
|
||||
- name: 'gcr.io/k8s-testimages/bazelbuild:v20190916-ec59af8-0.29.1'
|
||||
entrypoint: make
|
||||
env:
|
||||
- VERSION=$_GIT_TAG
|
||||
- PULL_BASE_REF=$_PULL_BASE_REF
|
||||
- DOCKER_REGISTRY=$_DOCKER_REGISTRY
|
||||
- DOCKER_IMAGE_PREFIX=$_DOCKER_IMAGE_PREFIX
|
||||
- GCS_LOCATION=$_GCS_LOCATION
|
||||
args:
|
||||
- gcs-upload
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
|
|
@ -20,3 +31,4 @@ substitutions:
|
|||
_PULL_BASE_REF: 'dev'
|
||||
_DOCKER_REGISTRY: 'gcr.io'
|
||||
_DOCKER_IMAGE_PREFIX: 'k8s-staging-kops/'
|
||||
_GCS_LOCATION: 'gs://k8s-staging-kops/kops/releases/'
|
||||
|
|
@ -33,6 +33,7 @@ type MockIAM struct {
|
|||
InstanceProfiles map[string]*iam.InstanceProfile
|
||||
Roles map[string]*iam.Role
|
||||
RolePolicies []*rolePolicy
|
||||
AttachedPolicies map[string][]*iam.AttachedPolicy
|
||||
}
|
||||
|
||||
var _ iamiface.IAMAPI = &MockIAM{}
|
||||
|
|
|
|||
|
|
@ -141,9 +141,47 @@ func (m *MockIAM) DeleteRole(request *iam.DeleteRoleInput) (*iam.DeleteRoleOutpu
|
|||
|
||||
return &iam.DeleteRoleOutput{}, nil
|
||||
}
|
||||
|
||||
func (m *MockIAM) DeleteRoleWithContext(aws.Context, *iam.DeleteRoleInput, ...request.Option) (*iam.DeleteRoleOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (m *MockIAM) DeleteRoleRequest(*iam.DeleteRoleInput) (*request.Request, *iam.DeleteRoleOutput) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (m *MockIAM) ListAttachedRolePolicies(input *iam.ListAttachedRolePoliciesInput) (*iam.ListAttachedRolePoliciesOutput, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
klog.Infof("ListAttachedRolePolicies: %s", aws.StringValue(input.RoleName))
|
||||
|
||||
for _, r := range m.Roles {
|
||||
if r.RoleName == input.RoleName {
|
||||
role := aws.StringValue(r.RoleName)
|
||||
|
||||
return &iam.ListAttachedRolePoliciesOutput{
|
||||
AttachedPolicies: m.AttachedPolicies[role],
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &iam.ListAttachedRolePoliciesOutput{}, nil
|
||||
}
|
||||
|
||||
func (m *MockIAM) ListAttachedRolePoliciesPages(input *iam.ListAttachedRolePoliciesInput, pager func(*iam.ListAttachedRolePoliciesOutput, bool) bool) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
klog.Infof("ListAttachedRolePolicies: %s", aws.StringValue(input.RoleName))
|
||||
|
||||
role := aws.StringValue(input.RoleName)
|
||||
|
||||
if pager(&iam.ListAttachedRolePoliciesOutput{
|
||||
AttachedPolicies: m.AttachedPolicies[role],
|
||||
}, true) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ container_image(
|
|||
name = "image",
|
||||
base = "@distroless_base//image",
|
||||
cmd = ["/usr/bin/kops-controller"],
|
||||
user = "1000",
|
||||
user = "10001",
|
||||
directory = "/usr/bin/",
|
||||
files = [
|
||||
"//cmd/kops-controller",
|
||||
|
|
@ -63,3 +63,17 @@ container_bundle(
|
|||
"{STABLE_DOCKER_IMAGE_PREFIX}kops-controller:{STABLE_KOPS_CONTROLLER_TAG}": "image",
|
||||
},
|
||||
)
|
||||
|
||||
load("//tools:gzip.bzl", "gzip")
|
||||
|
||||
gzip(
|
||||
name = "image-bundle.tar.gz",
|
||||
src = "image-bundle.tar",
|
||||
)
|
||||
|
||||
load("//tools:hashes.bzl", "hashes")
|
||||
|
||||
hashes(
|
||||
name = "image-bundle.tar.gz.hashes",
|
||||
src = "image-bundle.tar.gz",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1033,7 +1033,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
MajorVersion: "v3",
|
||||
}
|
||||
// Validate to check if etcd clusters have an acceptable version
|
||||
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("Calico")); len(errList) != 0 {
|
||||
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("spec", "networking", "calico")); len(errList) != 0 {
|
||||
|
||||
// This is not a special version but simply of the 3 series
|
||||
for _, etcd := range cluster.Spec.EtcdClusters {
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ func RunCreateInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string,
|
|||
return fmt.Errorf("unexpected object type: %T", obj)
|
||||
}
|
||||
|
||||
err = validation.ValidateInstanceGroup(group)
|
||||
err = validation.ValidateInstanceGroup(group).ToAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ func RunEditInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string, ou
|
|||
return fmt.Errorf("object was not of expected type: %T", newObj)
|
||||
}
|
||||
|
||||
err = validation.ValidateInstanceGroup(newGroup)
|
||||
err = validation.ValidateInstanceGroup(newGroup).ToAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ func RunEditInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string, ou
|
|||
return err
|
||||
}
|
||||
|
||||
err = validation.CrossValidateInstanceGroup(fullGroup, fullCluster, true)
|
||||
err = validation.CrossValidateInstanceGroup(fullGroup, fullCluster, true).ToAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,11 @@ func TestComplex(t *testing.T) {
|
|||
runTestCloudformation(t, "complex.example.com", "complex", "v1alpha2", false, nil, true)
|
||||
}
|
||||
|
||||
// TestExternalPolicies tests external policies output
|
||||
func TestExternalPolicies(t *testing.T) {
|
||||
runTestAWS(t, "externalpolicies.example.com", "externalpolicies", "v1alpha2", false, 1, true, false, nil, true, false)
|
||||
}
|
||||
|
||||
func TestNoSSHKey(t *testing.T) {
|
||||
runTestAWS(t, "nosshkey.example.com", "nosshkey", "v1alpha2", false, 1, true, false, nil, false, false)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ func (c *RootCmd) Cluster() (*kopsapi.Cluster, error) {
|
|||
|
||||
func GetCluster(factory Factory, clusterName string) (*kopsapi.Cluster, error) {
|
||||
if clusterName == "" {
|
||||
return nil, field.Required(field.NewPath("ClusterName"), "Cluster name is required")
|
||||
return nil, field.Required(field.NewPath("clusterName"), "Cluster name is required")
|
||||
}
|
||||
|
||||
clientset, err := factory.Clientset()
|
||||
|
|
|
|||
|
|
@ -111,7 +111,14 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context["clusterName"] = options.clusterName
|
||||
|
||||
// @step: set clusterName from template's values or cli flag
|
||||
value, ok := context["clusterName"].(string)
|
||||
if ok {
|
||||
options.clusterName = value
|
||||
} else {
|
||||
context["clusterName"] = options.clusterName
|
||||
}
|
||||
|
||||
// @check if we are just rendering the config value
|
||||
if options.configValue != "" {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ container_image(
|
|||
name = "image",
|
||||
base = "@distroless_base//image",
|
||||
cmd = ["/usr/bin/dns-controller"],
|
||||
user = "1000",
|
||||
user = "10001",
|
||||
directory = "/usr/bin/",
|
||||
files = [
|
||||
"dns-controller",
|
||||
|
|
@ -69,3 +69,17 @@ container_bundle(
|
|||
"{STABLE_DOCKER_IMAGE_PREFIX}dns-controller:{STABLE_DNS_CONTROLLER_TAG}": "image",
|
||||
},
|
||||
)
|
||||
|
||||
load("//tools:gzip.bzl", "gzip")
|
||||
|
||||
gzip(
|
||||
name = "image-bundle.tar.gz",
|
||||
src = "image-bundle.tar",
|
||||
)
|
||||
|
||||
load("//tools:hashes.bzl", "hashes")
|
||||
|
||||
hashes(
|
||||
name = "image-bundle.tar.gz.hashes",
|
||||
src = "image-bundle.tar.gz",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package internal
|
||||
|
||||
// Implementation of internal/interfaces/* on top of Google Cloud DNS API.
|
||||
// See https://godoc.org/google.golang.org/api/dns/v1 for details
|
||||
// See https://pkg.go.dev/google.golang.org/api/dns/v1 for details
|
||||
// This facilitates stubbing out Google Cloud DNS for unit testing.
|
||||
// Only the parts of the API that we use are included.
|
||||
// Others can be added as needed.
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
// Interfaces to directly mirror the Google Cloud DNS API structures.
|
||||
// See https://godoc.org/google.golang.org/api/dns/v1 for details
|
||||
// See https://pkg.go.dev/google.golang.org/api/dns/v1 for details
|
||||
// This facilitates stubbing out Google Cloud DNS for unit testing.
|
||||
// Only the parts of the API that we use are included.
|
||||
// Others can be added as needed.
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package stubs
|
||||
|
||||
// Implementation of internal/interfaces/* on top of Google Cloud DNS API.
|
||||
// See https://godoc.org/google.golang.org/api/dns/v1 for details
|
||||
// See https://pkg.go.dev/google.golang.org/api/dns/v1 for details
|
||||
// This facilitates stubbing out Google Cloud DNS for unit testing.
|
||||
// Only the parts of the API that we use are included.
|
||||
// Others can be added as needed.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
FROM alpine:3.8
|
||||
|
||||
ARG GO_VERSION=1.13.4
|
||||
ARG GO_VERSION=1.13.8
|
||||
|
||||
# KOPS_GITISH: Modify to build at an explicit tag/gitish
|
||||
ARG KOPS_GITISH=release
|
||||
|
|
|
|||
|
|
@ -37,8 +37,8 @@
|
|||
## Advanced / Detailed List of Configurations
|
||||
|
||||
### API / Configuration References
|
||||
* [Godocs for Cluster - `ClusterSpec`](https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec).
|
||||
* [Godocs for Instance Group - `InstanceGroupSpec`](https://godoc.org/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec).
|
||||
* [Godocs for Cluster - `ClusterSpec`](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec).
|
||||
* [Godocs for Instance Group - `InstanceGroupSpec`](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec).
|
||||
|
||||
### API Usage Guides
|
||||
* [`kops` cluster API definitions](cluster_spec.md)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Description of Keys in `config` and `cluster.spec`
|
||||
|
||||
This list is not complete but aims to document any keys that are less than self-explanatory. Our [godoc](https://godoc.org/k8s.io/kops/pkg/apis/kops) reference provides a more detailed list of API values. [ClusterSpec](https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec), defined as `kind: Cluster` in YAML, and [InstanceGroup](https://godoc.org/k8s.io/kops/pkg/apis/kops#InstanceGroup), defined as `kind: InstanceGroup` in YAML, are the two top-level API values used to describe a cluster.
|
||||
This list is not complete but aims to document any keys that are less than self-explanatory. Our [go.dev](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops) reference provides a more detailed list of API values. [ClusterSpec](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec), defined as `kind: Cluster` in YAML, and [InstanceGroupSpec](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec), defined as `kind: InstanceGroup` in YAML, are the two top-level API values used to describe a cluster.
|
||||
|
||||
## spec
|
||||
|
||||
|
|
@ -488,7 +488,7 @@ This will install [CoreDNS](https://coredns.io/) instead of kube-dns.
|
|||
|
||||
If you are using CoreDNS and want to use an entirely custom CoreFile you can do this by specifying the file. This will not work with any other options which interact with the default CoreFile. You can also override the version of the CoreDNS image used to use a different registry or version by specifying `CoreDNSImage`.
|
||||
|
||||
**Note:** If you are using this functionality you will need to be extra vigiliant on version changes of CoreDNS for changes in functionality of the plugins being used etc.
|
||||
**Note:** If you are using this functionality you will need to be extra vigilant on version changes of CoreDNS for changes in functionality of the plugins being used etc.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -745,7 +745,7 @@ spec:
|
|||
|
||||
### docker
|
||||
|
||||
It is possible to override Docker daemon options for all masters and nodes in the cluster. See the [API docs](https://godoc.org/k8s.io/kops/pkg/apis/kops#DockerConfig) for the full list of options.
|
||||
It is possible to override Docker daemon options for all masters and nodes in the cluster. See the [API docs](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#DockerConfig) for the full list of options.
|
||||
|
||||
#### registryMirrors
|
||||
|
||||
|
|
@ -846,7 +846,7 @@ spec:
|
|||
|
||||
### assets
|
||||
|
||||
Assets define alernative locations from where to retrieve static files and containers
|
||||
Assets define alternative locations from where to retrieve static files and containers
|
||||
|
||||
#### containerRegistry
|
||||
|
||||
|
|
|
|||
|
|
@ -57,6 +57,26 @@ The additional permissions are:
|
|||
}
|
||||
```
|
||||
|
||||
## Adding External Policies
|
||||
|
||||
At times you may want to attach policies shared to you by another AWS account or that are maintained by an outside application. You can specify managed policies through the `policyOverrides` spec field.
|
||||
|
||||
Policy Overrides are specified by their ARN on AWS and are grouped by their role type. See the example below:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
externalPolicies:
|
||||
node:
|
||||
- aws:arn:iam:123456789000:policy:test-policy
|
||||
master:
|
||||
- aws:arn:iam:123456789000:policy:test-policy
|
||||
bastion:
|
||||
- aws:arn:iam:123456789000:policy:test-policy
|
||||
```
|
||||
|
||||
External Policy attachments are treated declaritively. Any policies declared will be attached to the role, any policies not specified will be detached _after_ new policies are attached. This does not replace or affect built in Kops policies in any way.
|
||||
|
||||
It's important to note that externalPolicies will only handle the attachment and detachment of policies, not creation, modification, or deletion.
|
||||
|
||||
## Adding Additional Policies
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# kops - Kubernetes Operations
|
||||
|
||||
[GoDoc]: https://godoc.org/k8s.io/kops
|
||||
[GoDoc]: https://pkg.go.dev/k8s.io/kops
|
||||
[GoDoc Widget]: https://godoc.org/k8s.io/kops?status.svg
|
||||
|
||||
The easiest way to get a production grade Kubernetes cluster up and running.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ This document also applies to using the `kops` API to customize a Kubernetes clu
|
|||
|
||||
Because of the above statement `kops` includes an API which provides a feature for users to utilize YAML or JSON manifests for managing their `kops` created Kubernetes installations. In the same way that you can use a YAML manifest to deploy a Job, you can deploy and manage a `kops` Kubernetes instance with a manifest. All of these values are also usable via the interactive editor with `kops edit`.
|
||||
|
||||
> You can see all the options that are currently supported in Kops [here](https://github.com/kubernetes/kops/blob/master/pkg/apis/kops/componentconfig.go) or [more prettily here](https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec)
|
||||
> You can see all the options that are currently supported in Kops [here](https://github.com/kubernetes/kops/blob/master/pkg/apis/kops/componentconfig.go) or [more prettily here](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec)
|
||||
|
||||
The following is a list of the benefits of using a file to manage instances.
|
||||
|
||||
|
|
@ -298,7 +298,7 @@ spec:
|
|||
api:
|
||||
```
|
||||
|
||||
Full documentation is accessible via [godoc](https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec).
|
||||
Full documentation is accessible via [godoc](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec).
|
||||
|
||||
The `ClusterSpec` allows a user to set configurations for such values as Docker log driver, Kubernetes API server log level, VPC for reusing a VPC (`NetworkID`), and the Kubernetes version.
|
||||
|
||||
|
|
@ -330,7 +330,7 @@ metadata:
|
|||
spec:
|
||||
```
|
||||
|
||||
Full documentation is accessible via [godocs](https://godoc.org/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec).
|
||||
Full documentation is accessible via [godocs](https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec).
|
||||
|
||||
Instance Groups map to Auto Scaling Groups in AWS, and Instance Groups in GCE. They are an API level description of a group of compute instances used as Masters or Nodes.
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ The [node authorization service] is an experimental service which in the absence
|
|||
- the client certificate by default is added into the system:nodes rbac group _(note, if you are using PSP this is automatically bound by kops on your behalf)_.
|
||||
- the kubelet at this point has a server certificate and the client api certificate and good to go.
|
||||
|
||||
#### **Integretion with Kops**
|
||||
#### **Integration with Kops**
|
||||
|
||||
The node authorization service is run on the master as a daemonset, by default dns is _node-authorizer-internal.dns_zone_:10443 and added via same mechanism at the internal kube-apiserver i.e. annotations on the kube-apiserver pods which is picked up the dns-controller and added to the dns zone.
|
||||
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ Be aware of the following limitations:
|
|||
|
||||
## Flatcar
|
||||
|
||||
Flatcar is a friendly fork of CoreOS and as such, compatible with it. If some issues occurs with it, it is likely that also CoreOS miight be affected. If you encounter any problem please report it to us.
|
||||
Flatcar is a friendly fork of CoreOS and as such, compatible with it. If some issues occurs with it, it is likely that also CoreOS might be affected. If you encounter any problem please report it to us.
|
||||
|
||||
The following steps are known:
|
||||
|
||||
|
|
|
|||
|
|
@ -356,3 +356,31 @@ is safer.
|
|||
* Create PodDisruptionBudget for kube-dns in kube-system namespace [@hakman](https://github.com/hakman),[@justinsb](https://github.com/justinsb) [#7856](https://github.com/kubernetes/kops/pull/7856)
|
||||
* Machine types updates [@mikesplain](https://github.com/mikesplain) [#7947](https://github.com/kubernetes/kops/pull/7947)
|
||||
* Add support for newer Docker versions [@hakman](https://github.com/hakman) [#7860](https://github.com/kubernetes/kops/pull/7860)
|
||||
|
||||
## 1.15.0 to 1.15.1
|
||||
|
||||
* Add indent template function and use it to fix KubeDNS.ExternalCoreFile rendering [@rochacon](https://github.com/rochacon) [#7979](https://github.com/kubernetes/kops/pull/7979)
|
||||
* fix(openstack): fix additional security groups on instance groups [@mitch000001](https://github.com/mitch000001) [#8004](https://github.com/kubernetes/kops/pull/8004)
|
||||
* Fix Handling of LaunchTemplate Versions for MixedInstancePolicy [@granular-ryanbonham](https://github.com/granular-ryanbonham) [#8038](https://github.com/kubernetes/kops/pull/8038)
|
||||
* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062)
|
||||
* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545)
|
||||
* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115)
|
||||
* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022)
|
||||
* mark weavenet-pod as system-critical [@jochen42](https://github.com/jochen42) [#7874](https://github.com/kubernetes/kops/pull/7874)
|
||||
* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832)
|
||||
* Update copyrights for 2020 [@hakman](https://github.com/hakman) [#8241](https://github.com/kubernetes/kops/pull/8241)
|
||||
* Fix rendering of the Node Authorizer template [@KashifSaadat](https://github.com/KashifSaadat) [#7916](https://github.com/kubernetes/kops/pull/7916)
|
||||
* Cherry pick #7874 onto 1.15 [@k8s-ci-robot](https://github.com/k8s-ci-robot) [#8090](https://github.com/kubernetes/kops/pull/8090)
|
||||
* Backport the k8s 1.9 required action release note [@johngmyers](https://github.com/johngmyers) [#8378](https://github.com/kubernetes/kops/pull/8378)
|
||||
* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb),[@rifelpet](https://github.com/rifelpet) [#8317](https://github.com/kubernetes/kops/pull/8317)
|
||||
* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman),[@rifelpet](https://github.com/rifelpet) [#8248](https://github.com/kubernetes/kops/pull/8248)
|
||||
* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333)
|
||||
* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338)
|
||||
* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373)
|
||||
|
||||
## 1.15.1 to 1.15.2
|
||||
|
||||
* Fix Github download url for nodeup [@adri](https://github.com/adri),[@justinsb](https://github.com/justinsb) [#8468](https://github.com/kubernetes/kops/pull/8468)
|
||||
* GCS: Don't try to set ACLs if bucket-policy only is set [@justinsb](https://github.com/justinsb) [#8493](https://github.com/kubernetes/kops/pull/8493)
|
||||
* Cilium - Add missing Identity Allocation Mode to Operator Template [@daviddyball](https://github.com/daviddyball) [#8445](https://github.com/kubernetes/kops/pull/8445)
|
||||
* Make it possible to enable Prometheus metrics for Cilium [@olemarkus](https://github.com/olemarkus) [#8433](https://github.com/kubernetes/kops/pull/8433)
|
||||
|
|
|
|||
|
|
@ -39,6 +39,12 @@ the notes prior to the release).
|
|||
PodPriority: "true"
|
||||
```
|
||||
|
||||
# Deprecations
|
||||
|
||||
* Support for Kubernetes releases prior to 1.9 is deprecated and will be removed in kops 1.18.
|
||||
|
||||
* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources.
|
||||
|
||||
# Full change list since 1.15.0 release
|
||||
|
||||
## 1.15.0-alpha.1 to 1.16.0-alpha.1
|
||||
|
|
@ -226,3 +232,47 @@ the notes prior to the release).
|
|||
* Fix netlify mixed content [@mikesplain](https://github.com/mikesplain) [#7953](https://github.com/kubernetes/kops/pull/7953)
|
||||
* Fix goimports errors [@rifelpet](https://github.com/rifelpet) [#7955](https://github.com/kubernetes/kops/pull/7955)
|
||||
* Upate Lyft CNI to v0.5.1 [@maruina](https://github.com/maruina) [#7402](https://github.com/kubernetes/kops/pull/7402)
|
||||
|
||||
## 1.16.0-alpha.2 to 1.16.0-beta.1
|
||||
|
||||
* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545)
|
||||
* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062)
|
||||
* fix(openstack): fix additional security groups on instance groups [@mitch000001](https://github.com/mitch000001) [#8004](https://github.com/kubernetes/kops/pull/8004)
|
||||
* Cloud controller template function [@DavidSie](https://github.com/DavidSie) [#7992](https://github.com/kubernetes/kops/pull/7992)
|
||||
* Add CapacityOptimized to list of supported spot allocation strategies [@gjtempleton](https://github.com/gjtempleton) [#7406](https://github.com/kubernetes/kops/pull/7406)
|
||||
* Add inf1 isntances [@mikesplain](https://github.com/mikesplain) [#8128](https://github.com/kubernetes/kops/pull/8128)
|
||||
* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115)
|
||||
* [Issue-7870] kops controller support for digital ocean [@srikiz](https://github.com/srikiz) [#7961](https://github.com/kubernetes/kops/pull/7961)
|
||||
* Fix Handling of LaunchTemplate Versions for MixedInstancePolicy [@granular-ryanbonham](https://github.com/granular-ryanbonham) [#8038](https://github.com/kubernetes/kops/pull/8038)
|
||||
* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022)
|
||||
* Update copyrights for 2020 [@johngmyers](https://github.com/johngmyers) [#8241](https://github.com/kubernetes/kops/pull/8241)
|
||||
* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832)
|
||||
* Fix protokube osx build [@mikesplain](https://github.com/mikesplain) [#8263](https://github.com/kubernetes/kops/pull/8263)
|
||||
* Add deprecation warning for older k8s versions [@rifelpet](https://github.com/rifelpet) [#8176](https://github.com/kubernetes/kops/pull/8176)
|
||||
* Remove kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8273](https://github.com/kubernetes/kops/pull/8273)
|
||||
* Promote peter & ryan & zetaab to approvers [@justinsb](https://github.com/justinsb) [#7983](https://github.com/kubernetes/kops/pull/7983)
|
||||
* Fix crossbuild-nodeup-in-docker [@johngmyers](https://github.com/johngmyers) [#8343](https://github.com/kubernetes/kops/pull/8343)
|
||||
* Add release notes for deleting the kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8321](https://github.com/kubernetes/kops/pull/8321)
|
||||
* Configuration to specify no SSH key [@austinmoore-](https://github.com/austinmoore-) [#7096](https://github.com/kubernetes/kops/pull/7096)
|
||||
* Set CLUSTER_NAME env var on amazon-vpc-cni pods [@rifelpet](https://github.com/rifelpet) [#8274](https://github.com/kubernetes/kops/pull/8274)
|
||||
* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb) [#8317](https://github.com/kubernetes/kops/pull/8317)
|
||||
* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman),[@rifelpet](https://github.com/rifelpet) [#8248](https://github.com/kubernetes/kops/pull/8248)
|
||||
* Backport the k8s 1.9 required action release note [@johngmyers](https://github.com/johngmyers) [#8378](https://github.com/kubernetes/kops/pull/8378)
|
||||
* Fix scheduler policy configmap args [@vvbogdanov87](https://github.com/vvbogdanov87) [#8386](https://github.com/kubernetes/kops/pull/8386)
|
||||
* Use IAMPrefix() for hostedzone [@lazzarello](https://github.com/lazzarello) [#8366](https://github.com/kubernetes/kops/pull/8366)
|
||||
* Add Cilium.EnablePolicy back into templates [@olemarkus](https://github.com/olemarkus) [#8379](https://github.com/kubernetes/kops/pull/8379)
|
||||
* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333)
|
||||
* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338)
|
||||
* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373)
|
||||
* EBS Root Volume Termination [@tioxy](https://github.com/tioxy) [#7865](https://github.com/kubernetes/kops/pull/7865)
|
||||
* Announce impending removal of v1alpha1 API [@johngmyers](https://github.com/johngmyers) [#8064](https://github.com/kubernetes/kops/pull/8064)
|
||||
* Add missing priorityClassName for critical pods [@johngmyers](https://github.com/johngmyers) [#8200](https://github.com/kubernetes/kops/pull/8200)
|
||||
|
||||
## 1.16.0-beta.1 to 1.16.0-beta.2
|
||||
|
||||
* Fix Github download url for nodeup [@adri](https://github.com/adri),[@justinsb](https://github.com/justinsb) [#8468](https://github.com/kubernetes/kops/pull/8468)
|
||||
* GCS: Don't try to set ACLs if bucket-policy only is set [@justinsb](https://github.com/justinsb) [#8493](https://github.com/kubernetes/kops/pull/8493)
|
||||
* Alicloud: allow use RAM role for OSS client [@bittopaz](https://github.com/bittopaz) [#8025](https://github.com/kubernetes/kops/pull/8025)
|
||||
* Cilium - Add missing Identity Allocation Mode to Operator Template [@daviddyball](https://github.com/daviddyball) [#8445](https://github.com/kubernetes/kops/pull/8445)
|
||||
* Make it possible to enable Prometheus metrics for Cilium [@olemarkus](https://github.com/olemarkus) [#8433](https://github.com/kubernetes/kops/pull/8433)
|
||||
* Update cilium to 1.6.6 [@olemarkus](https://github.com/olemarkus) [#8484](https://github.com/kubernetes/kops/pull/8484)
|
||||
|
|
|
|||
|
|
@ -55,6 +55,12 @@ the notes prior to the release).
|
|||
a kops-controller Deployment may have been created that should get deleted because it has been replaced with a DaemonSet.
|
||||
Run `kubectl -n kube-system delete deployment kops-controller` after upgrading to Kops 1.17.0-alpha.2 or later.
|
||||
|
||||
# Deprecations
|
||||
|
||||
* Support for Kubernetes releases prior to 1.9 is deprecated and will be removed in kops 1.18.
|
||||
|
||||
* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources.
|
||||
|
||||
# Full change list since 1.16.0 release
|
||||
|
||||
## 1.16.0-alpha.1 to 1.17.0-alpha.1
|
||||
|
|
@ -112,3 +118,56 @@ the notes prior to the release).
|
|||
* DOCS: fix simple typo in readme [@lpmi-13](https://github.com/lpmi-13) [#8005](https://github.com/kubernetes/kops/pull/8005)
|
||||
* Spotinst: Upgrade the Spotinst SDK to version 1.36 [@liranp](https://github.com/liranp) [#8003](https://github.com/kubernetes/kops/pull/8003)
|
||||
* Release 1.17.0-alpha.1 [@justinsb](https://github.com/justinsb) [#7985](https://github.com/kubernetes/kops/pull/7985)
|
||||
|
||||
## 1.17.0-alpha.1 to 1.17.0-alpha.2
|
||||
|
||||
* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062)
|
||||
* Cherry-pick #8074 to release-1.17 [@johngmyers](https://github.com/johngmyers) [#8084](https://github.com/kubernetes/kops/pull/8084)
|
||||
* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022)
|
||||
* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545)
|
||||
* Canal v3.10 manifest for k8s v1.15+ [@KashifSaadat](https://github.com/KashifSaadat),[@hakman](https://github.com/hakman) [#7917](https://github.com/kubernetes/kops/pull/7917)
|
||||
* Cherry pick #8095 [@zetaab](https://github.com/zetaab) [#8096](https://github.com/kubernetes/kops/pull/8096)
|
||||
* test validateCluster twice to make sure it does not flap [@zetaab](https://github.com/zetaab),[@johngmyers](https://github.com/johngmyers) [#8088](https://github.com/kubernetes/kops/pull/8088)
|
||||
* Add inf1 isntances [@mikesplain](https://github.com/mikesplain) [#8128](https://github.com/kubernetes/kops/pull/8128)
|
||||
* Add CapacityOptimized to list of supported spot allocation strategies [@gjtempleton](https://github.com/gjtempleton) [#7406](https://github.com/kubernetes/kops/pull/7406)
|
||||
* Update Calico to v3.10.2 [@hakman](https://github.com/hakman) [#8104](https://github.com/kubernetes/kops/pull/8104)
|
||||
* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115)
|
||||
* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832)
|
||||
* Update copyrights for 2020 [@johngmyers](https://github.com/johngmyers) [#8241](https://github.com/kubernetes/kops/pull/8241)
|
||||
* Fix protokube osx build [@mikesplain](https://github.com/mikesplain) [#8263](https://github.com/kubernetes/kops/pull/8263)
|
||||
* Set CLUSTER_NAME env var on amazon-vpc-cni pods [@rifelpet](https://github.com/rifelpet) [#8274](https://github.com/kubernetes/kops/pull/8274)
|
||||
* Add deprecation warning for older k8s versions [@rifelpet](https://github.com/rifelpet) [#8176](https://github.com/kubernetes/kops/pull/8176)
|
||||
* Remove kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8273](https://github.com/kubernetes/kops/pull/8273)
|
||||
* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb) [#8317](https://github.com/kubernetes/kops/pull/8317)
|
||||
* Cloud controller template function [@DavidSie](https://github.com/DavidSie) [#7992](https://github.com/kubernetes/kops/pull/7992)
|
||||
* Configuration to specify no SSH key [@austinmoore-](https://github.com/austinmoore-) [#7096](https://github.com/kubernetes/kops/pull/7096)
|
||||
* tests: increase timeout in rolling update tests [@justinsb](https://github.com/justinsb) [#8139](https://github.com/kubernetes/kops/pull/8139)
|
||||
* Fix crossbuild-nodeup-in-docker [@johngmyers](https://github.com/johngmyers) [#8343](https://github.com/kubernetes/kops/pull/8343)
|
||||
* update gophercloud dependency [@zetaab](https://github.com/zetaab) [#8347](https://github.com/kubernetes/kops/pull/8347)
|
||||
* Update Terraform resource names to be 0.12 compatible. [@rifelpet](https://github.com/rifelpet) [#7957](https://github.com/kubernetes/kops/pull/7957)
|
||||
* Allow local filesystem state stores (to aid CI pull-request workflows) [@ari-becker](https://github.com/ari-becker),[@rifelpet](https://github.com/rifelpet) [#6465](https://github.com/kubernetes/kops/pull/6465)
|
||||
* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman) [#8248](https://github.com/kubernetes/kops/pull/8248)
|
||||
* Use IAMPrefix() for hostedzone [@lazzarello](https://github.com/lazzarello) [#8366](https://github.com/kubernetes/kops/pull/8366)
|
||||
* Fix scheduler policy configmap args [@vvbogdanov87](https://github.com/vvbogdanov87) [#8386](https://github.com/kubernetes/kops/pull/8386)
|
||||
* Add Cilium.EnablePolicy back into templates [@olemarkus](https://github.com/olemarkus) [#8379](https://github.com/kubernetes/kops/pull/8379)
|
||||
* Bump etcd-manager to 3.0.20200116 (#8310) [@mmerrill3](https://github.com/mmerrill3) [#8399](https://github.com/kubernetes/kops/pull/8399)
|
||||
* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333)
|
||||
* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338)
|
||||
* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373)
|
||||
* EBS Root Volume Termination [@tioxy](https://github.com/tioxy) [#7865](https://github.com/kubernetes/kops/pull/7865)
|
||||
* Alicloud: etcd-manager support [@bittopaz](https://github.com/bittopaz) [#8016](https://github.com/kubernetes/kops/pull/8016)
|
||||
|
||||
## 1.17.0-alpha.2 to 1.17.0-alpha.3
|
||||
|
||||
* Add missing priorityClassName for critical pods [@johngmyers](https://github.com/johngmyers) [#8200](https://github.com/kubernetes/kops/pull/8200)
|
||||
* Alicloud: allow use RAM role for OSS client [@bittopaz](https://github.com/bittopaz) [#8025](https://github.com/kubernetes/kops/pull/8025)
|
||||
* Update coredns to 1.6.7 [@maruina](https://github.com/maruina) [#8452](https://github.com/kubernetes/kops/pull/8452)
|
||||
* Fix Github download url for nodeup [@adri](https://github.com/adri),[@justinsb](https://github.com/justinsb) [#8468](https://github.com/kubernetes/kops/pull/8468)
|
||||
|
||||
## 1.17.0-alpha.3 to 1.17.0-alpha.4
|
||||
|
||||
* Cilium - Add missing Identity Allocation Mode to Operator Template [@daviddyball](https://github.com/daviddyball) [#8445](https://github.com/kubernetes/kops/pull/8445)
|
||||
* Revert "Update coredns to 1.6.7" [@gjtempleton](https://github.com/gjtempleton) [#8502](https://github.com/kubernetes/kops/pull/8502)
|
||||
* GCS: Don't try to set ACLs if bucket-policy only is set [@justinsb](https://github.com/justinsb) [#8493](https://github.com/kubernetes/kops/pull/8493)
|
||||
* Make it possible to enable Prometheus metrics for Cilium [@olemarkus](https://github.com/olemarkus) [#8433](https://github.com/kubernetes/kops/pull/8433)
|
||||
* Update cilium to 1.6.6 [@olemarkus](https://github.com/olemarkus) [#8484](https://github.com/kubernetes/kops/pull/8484)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@
|
|||
|
||||
* Terraform users on AWS may need to rename some resources in their state file in order to prepare for Terraform 0.12 support. See Required Actions below.
|
||||
|
||||
* Support for Kubernetes versions prior to 1.9 has been removed.
|
||||
|
||||
* Kubernetes 1.9 users will need to enable the PodPriority feature gate. See Required Actions below.
|
||||
|
||||
* A controller is now used to apply labels to nodes. If you are not using AWS, GCE or OpenStack your (non-master) nodes may not have labels applied correctly.
|
||||
|
|
@ -51,4 +53,250 @@
|
|||
* If a custom Kops build was used on a cluster, a kops-controller Deployment may have been created that should get deleted.
|
||||
Run `kubectl -n kube-system delete deployment kops-controller` after upgrading to Kops 1.16.0-beta.1 or later.
|
||||
|
||||
# Deprecations
|
||||
|
||||
* Support for Kubernetes version 1.10 is deprecated and will be removed in kops 1.19.
|
||||
|
||||
# Full change list since 1.17.0 release
|
||||
|
||||
## 1.17.0-alpha.1 to 1.18.0-alpha.1
|
||||
|
||||
* Release notes for 1.17.0-alpha.1 [@justinsb](https://github.com/justinsb) [#8006](https://github.com/kubernetes/kops/pull/8006)
|
||||
* Implementing audit dynamic configuration (#7392) [@mmerrill3](https://github.com/mmerrill3) [#7424](https://github.com/kubernetes/kops/pull/7424)
|
||||
* Set bazel version [@mikesplain](https://github.com/mikesplain) [#7996](https://github.com/kubernetes/kops/pull/7996)
|
||||
* Add verify-gomod and verify-goimports to Travis job [@rifelpet](https://github.com/rifelpet) [#7952](https://github.com/kubernetes/kops/pull/7952)
|
||||
* Alicloud: fix typo for listenerPort [@bittopaz](https://github.com/bittopaz) [#8011](https://github.com/kubernetes/kops/pull/8011)
|
||||
* Alicloud: only private subnets need SNAT rule [@bittopaz](https://github.com/bittopaz) [#8015](https://github.com/kubernetes/kops/pull/8015)
|
||||
* Alicloud: use ID of EIP to compare [@bittopaz](https://github.com/bittopaz) [#8012](https://github.com/kubernetes/kops/pull/8012)
|
||||
* Improve rolling update test coverage [@johngmyers](https://github.com/johngmyers) [#7904](https://github.com/kubernetes/kops/pull/7904)
|
||||
* Add download link for etcd-manager-ctl, clarify restarting etcd [@dzoeteman](https://github.com/dzoeteman) [#7506](https://github.com/kubernetes/kops/pull/7506)
|
||||
* Run goimports in make ci [@tioxy](https://github.com/tioxy) [#8023](https://github.com/kubernetes/kops/pull/8023)
|
||||
* Alicloud: fix comparison failure for scalinggroup [@bittopaz](https://github.com/bittopaz) [#8029](https://github.com/kubernetes/kops/pull/8029)
|
||||
* Alicloud: support internal api loadbalancer [@bittopaz](https://github.com/bittopaz) [#8014](https://github.com/kubernetes/kops/pull/8014)
|
||||
* Alicloud: fix comparison failures for SecurityGroupRule and SSHKey [@bittopaz](https://github.com/bittopaz) [#8028](https://github.com/kubernetes/kops/pull/8028)
|
||||
* Alicloud: add Encrypted field in Disk Find func [@bittopaz](https://github.com/bittopaz) [#8026](https://github.com/kubernetes/kops/pull/8026)
|
||||
* Add joshbranham to OWNERs as reviewer [@joshbranham](https://github.com/joshbranham) [#8010](https://github.com/kubernetes/kops/pull/8010)
|
||||
* Add CapacityOptimized to list of supported spot allocation strategies [@gjtempleton](https://github.com/gjtempleton) [#7406](https://github.com/kubernetes/kops/pull/7406)
|
||||
* Alicloud: fix comparison failures for VPC and NATGateway [@bittopaz](https://github.com/bittopaz) [#8027](https://github.com/kubernetes/kops/pull/8027)
|
||||
* Alicloud: use Balance as MultiAZPolicy [@bittopaz](https://github.com/bittopaz) [#8030](https://github.com/kubernetes/kops/pull/8030)
|
||||
* Allow users to set kube controller manager's concurrent sync flags. [@uruddarraju](https://github.com/uruddarraju) [#7600](https://github.com/kubernetes/kops/pull/7600)
|
||||
* Pass the cloud object to validator from caller [@johngmyers](https://github.com/johngmyers) [#7925](https://github.com/kubernetes/kops/pull/7925)
|
||||
* Rename to kops [@justinsb](https://github.com/justinsb) [#1](https://github.com/kubernetes/kops/pull/1)
|
||||
* Alicloud: allow use RAM role for OSS client [@bittopaz](https://github.com/bittopaz) [#8025](https://github.com/kubernetes/kops/pull/8025)
|
||||
* Fix Handling of LaunchTemplate Versions for MixedInstancePolicy [@granular-ryanbonham](https://github.com/granular-ryanbonham) [#8038](https://github.com/kubernetes/kops/pull/8038)
|
||||
* Additional leader election options [@DerekHeldtWerle](https://github.com/DerekHeldtWerle) [#8036](https://github.com/kubernetes/kops/pull/8036)
|
||||
* Fix truncation of admission control plugins list [@johngmyers](https://github.com/johngmyers) [#8033](https://github.com/kubernetes/kops/pull/8033)
|
||||
* Add more verifications to Travis [@johngmyers](https://github.com/johngmyers) [#8024](https://github.com/kubernetes/kops/pull/8024)
|
||||
* Alicloud: fix comparison failures for LaunchConfiguration and LoadBalancerWhiteList [@bittopaz](https://github.com/bittopaz) [#8042](https://github.com/kubernetes/kops/pull/8042)
|
||||
* [Issue-7956] - [Digital Ocean] Minor fix to have proper indexing for digital ocean regions [@srikiz](https://github.com/srikiz) [#8002](https://github.com/kubernetes/kops/pull/8002)
|
||||
* EBS Root Volume Termination [@tioxy](https://github.com/tioxy) [#7865](https://github.com/kubernetes/kops/pull/7865)
|
||||
* Canal v3.10 manifest for k8s v1.15+ [@KashifSaadat](https://github.com/KashifSaadat) [#7917](https://github.com/kubernetes/kops/pull/7917)
|
||||
* Fix misleading message in "make ci" when imports formatted incorrectly [@johngmyers](https://github.com/johngmyers) [#8045](https://github.com/kubernetes/kops/pull/8045)
|
||||
* Increase validation test coverage [@johngmyers](https://github.com/johngmyers) [#8039](https://github.com/kubernetes/kops/pull/8039)
|
||||
* Update integration test to cover CloudFormation output of NAT gateways [@rifelpet](https://github.com/rifelpet) [#8053](https://github.com/kubernetes/kops/pull/8053)
|
||||
* Fix CloudFormation template tags for NatGateway [@hakman](https://github.com/hakman) [#8051](https://github.com/kubernetes/kops/pull/8051)
|
||||
* Alicloud: fix comparison failures for VSwitch and VSwitchSNAT [@bittopaz](https://github.com/bittopaz) [#8044](https://github.com/kubernetes/kops/pull/8044)
|
||||
* Alicloud: fix comparison failures for RAMRole and RAMPolicy [@bittopaz](https://github.com/bittopaz) [#8043](https://github.com/kubernetes/kops/pull/8043)
|
||||
* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022)
|
||||
* Add verify-generate to Travis and 'make ci' [@johngmyers](https://github.com/johngmyers) [#8054](https://github.com/kubernetes/kops/pull/8054)
|
||||
* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062)
|
||||
* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545)
|
||||
* Cloud controller template function [@DavidSie](https://github.com/DavidSie) [#7992](https://github.com/kubernetes/kops/pull/7992)
|
||||
* put kubernetes 1.17.0 to channels [@zetaab](https://github.com/zetaab) [#8072](https://github.com/kubernetes/kops/pull/8072)
|
||||
* remove unused functions and simplify return str [@tanjunchen](https://github.com/tanjunchen) [#7792](https://github.com/kubernetes/kops/pull/7792)
|
||||
* Validate cluster after updating bastions [@johngmyers](https://github.com/johngmyers) [#8074](https://github.com/kubernetes/kops/pull/8074)
|
||||
* Add support for NFT with Calico and Canal [@hakman](https://github.com/hakman) [#8076](https://github.com/kubernetes/kops/pull/8076)
|
||||
* Docs: Fix some broken development links [@gjtempleton](https://github.com/gjtempleton) [#8075](https://github.com/kubernetes/kops/pull/8075)
|
||||
* Docs: More List fixes [@gjtempleton](https://github.com/gjtempleton) [#8092](https://github.com/kubernetes/kops/pull/8092)
|
||||
* Update documentation of go version requirement [@johngmyers](https://github.com/johngmyers) [#8094](https://github.com/kubernetes/kops/pull/8094)
|
||||
* add missing rbac rules [@zetaab](https://github.com/zetaab) [#8095](https://github.com/kubernetes/kops/pull/8095)
|
||||
* test validateCluster twice to make sure it does not flap [@zetaab](https://github.com/zetaab) [#8088](https://github.com/kubernetes/kops/pull/8088)
|
||||
* update metrics server image [@nothinux](https://github.com/nothinux) [#8046](https://github.com/kubernetes/kops/pull/8046)
|
||||
* docs(advanced): fix broken links on doc [@pshanoop](https://github.com/pshanoop) [#8102](https://github.com/kubernetes/kops/pull/8102)
|
||||
* Add test for flapping cluster validation and improve its logging [@johngmyers](https://github.com/johngmyers) [#8105](https://github.com/kubernetes/kops/pull/8105)
|
||||
* some resources upgraded to google provider 3.0 [@mccare](https://github.com/mccare) [#8103](https://github.com/kubernetes/kops/pull/8103)
|
||||
* fix-pkg-staticcheck and remove the repeat code [@tanjunchen](https://github.com/tanjunchen) [#8035](https://github.com/kubernetes/kops/pull/8035)
|
||||
* dev: hack/update-expected.sh should generate missing file [@justinsb](https://github.com/justinsb) [#8111](https://github.com/kubernetes/kops/pull/8111)
|
||||
* dev: ignore kops-controller in hack/update-expected.sh [@justinsb](https://github.com/justinsb) [#8107](https://github.com/kubernetes/kops/pull/8107)
|
||||
* Terraform GCE output: setting google provider to at least 3.0.0 [@mccare](https://github.com/mccare) [#8108](https://github.com/kubernetes/kops/pull/8108)
|
||||
* Add test for protokube builder [@justinsb](https://github.com/justinsb),[@rifelpet](https://github.com/rifelpet) [#8112](https://github.com/kubernetes/kops/pull/8112)
|
||||
* channels: bump k8s versions after dec. patch releases [@idealhack](https://github.com/idealhack) [#8123](https://github.com/kubernetes/kops/pull/8123)
|
||||
* Fix doc(advanced) broken links [@pshanoop](https://github.com/pshanoop) [#8125](https://github.com/kubernetes/kops/pull/8125)
|
||||
* a little change [@zehuaiWANG](https://github.com/zehuaiWANG) [#8127](https://github.com/kubernetes/kops/pull/8127)
|
||||
* fix 404 urls [@yuxiaobo96](https://github.com/yuxiaobo96) [#8052](https://github.com/kubernetes/kops/pull/8052)
|
||||
* pkg/model pkg/diff pkg/client/:simplify code and remove unused code [@tanjunchen](https://github.com/tanjunchen) [#8120](https://github.com/kubernetes/kops/pull/8120)
|
||||
* Cleans up the create cluster CLI prompts [@geojaz](https://github.com/geojaz) [#8122](https://github.com/kubernetes/kops/pull/8122)
|
||||
* Add kubeconfig flag to validate cluster command [@vvbogdanov87](https://github.com/vvbogdanov87) [#8110](https://github.com/kubernetes/kops/pull/8110)
|
||||
* Add inf1 isntances [@mikesplain](https://github.com/mikesplain) [#8128](https://github.com/kubernetes/kops/pull/8128)
|
||||
* util/pkg/ upup/tools/ : simplify code and remove unused code [@tanjunchen](https://github.com/tanjunchen) [#8121](https://github.com/kubernetes/kops/pull/8121)
|
||||
* Bump COS version in alpha channel for k8s >= 1.16 [@justinsb](https://github.com/justinsb) [#8116](https://github.com/kubernetes/kops/pull/8116)
|
||||
* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115)
|
||||
* Update Calico to v3.10.2 [@hakman](https://github.com/hakman) [#8104](https://github.com/kubernetes/kops/pull/8104)
|
||||
* tests: increase timeout in rolling update tests [@justinsb](https://github.com/justinsb),[@rifelpet](https://github.com/rifelpet) [#8139](https://github.com/kubernetes/kops/pull/8139)
|
||||
* Automatically install dependencies with local packages [@hakman](https://github.com/hakman) [#8020](https://github.com/kubernetes/kops/pull/8020)
|
||||
* Add support for Containerd container runtime [@hakman](https://github.com/hakman),[@justinsb](https://github.com/justinsb) [#7986](https://github.com/kubernetes/kops/pull/7986)
|
||||
* Fix periodic e2e test for Ubuntu 16.04 [@hakman](https://github.com/hakman) [#8160](https://github.com/kubernetes/kops/pull/8160)
|
||||
* Latest bazel launcher seems to require a newline at end of .bazelversion [@justinsb](https://github.com/justinsb) [#8161](https://github.com/kubernetes/kops/pull/8161)
|
||||
* Spotinst: Upgrade the Spotinst controller to version 1.0.50 [@liranp](https://github.com/liranp) [#8080](https://github.com/kubernetes/kops/pull/8080)
|
||||
* kops-change-main [@zehuaiWANG](https://github.com/zehuaiWANG) [#8132](https://github.com/kubernetes/kops/pull/8132)
|
||||
* nodeup/pkg/model - fix static check [@hakman](https://github.com/hakman) [#8155](https://github.com/kubernetes/kops/pull/8155)
|
||||
* upup/pkg/fi/ upup/pkg/kutil : simplify code and remove code [@tanjunchen](https://github.com/tanjunchen) [#8118](https://github.com/kubernetes/kops/pull/8118)
|
||||
* Update kubernetes versions used in integration tests [@rifelpet](https://github.com/rifelpet) [#8173](https://github.com/kubernetes/kops/pull/8173)
|
||||
* Run dns-controller and kops-controller as non-root user [@johngmyers](https://github.com/johngmyers) [#8169](https://github.com/kubernetes/kops/pull/8169)
|
||||
* Fix verify-staticcheck prow job [@rifelpet](https://github.com/rifelpet) [#8182](https://github.com/kubernetes/kops/pull/8182)
|
||||
* protokube/pkg - fix static check [@hakman](https://github.com/hakman) [#8165](https://github.com/kubernetes/kops/pull/8165)
|
||||
* Refactor: Add Region() method to fi.Cloud [@justinsb](https://github.com/justinsb) [#8180](https://github.com/kubernetes/kops/pull/8180)
|
||||
* Remove make command from verify-staticcheck.sh [@rifelpet](https://github.com/rifelpet) [#8189](https://github.com/kubernetes/kops/pull/8189)
|
||||
* GCE: Fix Permission for the Storage Bucket [@mccare](https://github.com/mccare) [#8157](https://github.com/kubernetes/kops/pull/8157)
|
||||
* pkg/instancegroups - fix static check [@johngmyers](https://github.com/johngmyers) [#8186](https://github.com/kubernetes/kops/pull/8186)
|
||||
* pkg/resources/aws:simplify code and remove code [@Aresforchina](https://github.com/Aresforchina) [#8188](https://github.com/kubernetes/kops/pull/8188)
|
||||
* Update links printed by Kops to use new docs site [@rifelpet](https://github.com/rifelpet) [#8190](https://github.com/kubernetes/kops/pull/8190)
|
||||
* dnsprovider/pkg/dnsprovider - fix static check [@hakman](https://github.com/hakman) [#8149](https://github.com/kubernetes/kops/pull/8149)
|
||||
* fix staticcheck failures in pkg/resources [@Aresforchina](https://github.com/Aresforchina) [#8191](https://github.com/kubernetes/kops/pull/8191)
|
||||
* Add corresponding unit test to the function in subnet.go. [@fenggw-fnst](https://github.com/fenggw-fnst) [#8195](https://github.com/kubernetes/kops/pull/8195)
|
||||
* Update gcr.io images [@justinsb](https://github.com/justinsb) [#8197](https://github.com/kubernetes/kops/pull/8197)
|
||||
* pkg/resources-fix staticcheck [@k8s-ci-robot](https://github.com/k8s-ci-robot),[@Aresforchina](https://github.com/Aresforchina) [#8192](https://github.com/kubernetes/kops/pull/8192)
|
||||
* Update Weave Net to version 2.6.0 [@bboreham](https://github.com/bboreham) [#7898](https://github.com/kubernetes/kops/pull/7898)
|
||||
* Guard External cloud controller manager with its feature flag [@mitch000001](https://github.com/mitch000001) [#7770](https://github.com/kubernetes/kops/pull/7770)
|
||||
* Always consider spot instance node readiness in cluster validation [@johngmyers](https://github.com/johngmyers) [#8159](https://github.com/kubernetes/kops/pull/8159)
|
||||
* Update support for RHEL 8 [@hakman](https://github.com/hakman) [#8164](https://github.com/kubernetes/kops/pull/8164)
|
||||
* Fix upup/tools/generators/pkg/codegen staticcheck failures [@johngmyers](https://github.com/johngmyers) [#8203](https://github.com/kubernetes/kops/pull/8203)
|
||||
* containerd: Use containerd 1.2.4 with Docker 18.09.3 [@hakman](https://github.com/hakman) [#8170](https://github.com/kubernetes/kops/pull/8170)
|
||||
* util/pkg/vfs/:staticcheck [@tanjunchen](https://github.com/tanjunchen) [#8171](https://github.com/kubernetes/kops/pull/8171)
|
||||
* containerd: Add --container-runtime cli flag [@hakman](https://github.com/hakman) [#8172](https://github.com/kubernetes/kops/pull/8172)
|
||||
* Add deprecation warning for older k8s versions [@rifelpet](https://github.com/rifelpet) [#8176](https://github.com/kubernetes/kops/pull/8176)
|
||||
* Add all flag to export cluster command [@vvbogdanov87](https://github.com/vvbogdanov87) [#8179](https://github.com/kubernetes/kops/pull/8179)
|
||||
* Alicloud: refine Alicloud RAM role policy [@bittopaz](https://github.com/bittopaz) [#8194](https://github.com/kubernetes/kops/pull/8194)
|
||||
* Fix cmd/kops staticcheck failures [@johngmyers](https://github.com/johngmyers) [#8202](https://github.com/kubernetes/kops/pull/8202)
|
||||
* /hack: improve shell script in hack [@tanjunchen](https://github.com/tanjunchen) [#8143](https://github.com/kubernetes/kops/pull/8143)
|
||||
* dns-controller: allow it to run on CNI networking mode and remove dependency on kube-proxy [@rochacon](https://github.com/rochacon) [#8131](https://github.com/kubernetes/kops/pull/8131)
|
||||
* replace TrimRight with TrimSuffix [@tanjunchen](https://github.com/tanjunchen) [#8041](https://github.com/kubernetes/kops/pull/8041)
|
||||
* Fix typo in export kubeconfig [@vvbogdanov87](https://github.com/vvbogdanov87) [#8211](https://github.com/kubernetes/kops/pull/8211)
|
||||
* Fix typo in KubeProxy model [@rifelpet](https://github.com/rifelpet) [#8210](https://github.com/kubernetes/kops/pull/8210)
|
||||
* Fix link printed in k8s version deprecation message [@rifelpet](https://github.com/rifelpet) [#8209](https://github.com/kubernetes/kops/pull/8209)
|
||||
* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832)
|
||||
* Set shared field for volume resource on delete [@vvbogdanov87](https://github.com/vvbogdanov87) [#8079](https://github.com/kubernetes/kops/pull/8079)
|
||||
* clean up buildDiffLines [@zehuaiWANG](https://github.com/zehuaiWANG) [#8144](https://github.com/kubernetes/kops/pull/8144)
|
||||
* Fix cloudmock/aws/mockelbv2 staticcheck failures [@johngmyers](https://github.com/johngmyers) [#8218](https://github.com/kubernetes/kops/pull/8218)
|
||||
* Fix node-authorizer/pkg/authorizers/aws staticcheck failure [@johngmyers](https://github.com/johngmyers) [#8222](https://github.com/kubernetes/kops/pull/8222)
|
||||
* Fix pkg/resources/openstack staticcheck failure [@johngmyers](https://github.com/johngmyers) [#8223](https://github.com/kubernetes/kops/pull/8223)
|
||||
* Add code simplifications for staticheck [@hakman](https://github.com/hakman) [#8232](https://github.com/kubernetes/kops/pull/8232)
|
||||
* util/pkg/slice: Add slice test [@q384566678](https://github.com/q384566678) [#8219](https://github.com/kubernetes/kops/pull/8219)
|
||||
* pkg/apis/ pkg/commands/ pkg/model/ staticcheck [@tanjunchen](https://github.com/tanjunchen) [#8229](https://github.com/kubernetes/kops/pull/8229)
|
||||
* dnsprovider staticcheck [@tanjunchen](https://github.com/tanjunchen) [#8233](https://github.com/kubernetes/kops/pull/8233)
|
||||
* upup/pkg/fi/cloudup/apply_cluster staticcheck [@tanjunchen](https://github.com/tanjunchen) [#8231](https://github.com/kubernetes/kops/pull/8231)
|
||||
* staticcheck:remove duplicate import packages [@yuxiaobo96](https://github.com/yuxiaobo96) [#8225](https://github.com/kubernetes/kops/pull/8225)
|
||||
* cmd/kops/ staticcheck and remove one mom [@tanjunchen](https://github.com/tanjunchen) [#8230](https://github.com/kubernetes/kops/pull/8230)
|
||||
* nodeup/pkg/ pkg/ staticcheck: Fix ST1005 [@tanjunchen](https://github.com/tanjunchen) [#8234](https://github.com/kubernetes/kops/pull/8234)
|
||||
* upup/pkg/fi/cloudup/ staticcheck: Fix ST1005 [@tanjunchen](https://github.com/tanjunchen) [#8236](https://github.com/kubernetes/kops/pull/8236)
|
||||
* Update copyrights for 2020 [@johngmyers](https://github.com/johngmyers) [#8241](https://github.com/kubernetes/kops/pull/8241)
|
||||
* Run Travis verifications in a separate parallel job [@johngmyers](https://github.com/johngmyers) [#8254](https://github.com/kubernetes/kops/pull/8254)
|
||||
* Adding ability to configure resources for weave (#8113) [@mmerrill3](https://github.com/mmerrill3) [#8216](https://github.com/kubernetes/kops/pull/8216)
|
||||
* containerd: Fix tiny nits [@hakman](https://github.com/hakman) [#8217](https://github.com/kubernetes/kops/pull/8217)
|
||||
* Custom sysctl Parameters [@ripta](https://github.com/ripta) [#7730](https://github.com/kubernetes/kops/pull/7730)
|
||||
* Update mock kops version in integration tests [@rifelpet](https://github.com/rifelpet) [#8258](https://github.com/kubernetes/kops/pull/8258)
|
||||
* Fix protokube osx build [@mikesplain](https://github.com/mikesplain) [#8263](https://github.com/kubernetes/kops/pull/8263)
|
||||
* Fix aws-china.md to download SHA-256 checksums for kops assets [@johngmyers](https://github.com/johngmyers) [#8243](https://github.com/kubernetes/kops/pull/8243)
|
||||
* Fix broken link [@johngmyers](https://github.com/johngmyers) [#8266](https://github.com/kubernetes/kops/pull/8266)
|
||||
* Exclude one Travis osx job [@johngmyers](https://github.com/johngmyers) [#8262](https://github.com/kubernetes/kops/pull/8262)
|
||||
* Support tainting all nodes needing update during rolling update [@johngmyers](https://github.com/johngmyers) [#8021](https://github.com/kubernetes/kops/pull/8021)
|
||||
* Fix RollingUpdate behaviour when using LaunchTemplates for both kops & terraform spec updates [@KashifSaadat](https://github.com/KashifSaadat) [#8261](https://github.com/kubernetes/kops/pull/8261)
|
||||
* Refactor rolling update tests [@johngmyers](https://github.com/johngmyers) [#8268](https://github.com/kubernetes/kops/pull/8268)
|
||||
* Simplify code for rolling updates of nodes [@johngmyers](https://github.com/johngmyers) [#8239](https://github.com/kubernetes/kops/pull/8239)
|
||||
* Move nodeup to /opt/kops/bin [@hakman](https://github.com/hakman) [#8212](https://github.com/kubernetes/kops/pull/8212)
|
||||
* For dev, don't preload docker images on nodes [@justinsb](https://github.com/justinsb) [#8196](https://github.com/kubernetes/kops/pull/8196)
|
||||
* fixed yum proxy configuration [@zadowsmash](https://github.com/zadowsmash) [#7772](https://github.com/kubernetes/kops/pull/7772)
|
||||
* Use non-experimental version of encryption provider config flag in 1.13+ [@zacblazic](https://github.com/zacblazic) [#7900](https://github.com/kubernetes/kops/pull/7900)
|
||||
* Remove DrainAndValidateRollingUpdate feature flag [@johngmyers](https://github.com/johngmyers) [#7909](https://github.com/kubernetes/kops/pull/7909)
|
||||
* staticcheck:modify the import package alias [@yuxiaobo96](https://github.com/yuxiaobo96) [#8253](https://github.com/kubernetes/kops/pull/8253)
|
||||
* Remove forcing disabled cgos Darwin [@joshbranham](https://github.com/joshbranham) [#7914](https://github.com/kubernetes/kops/pull/7914)
|
||||
* Make /opt/kops writeable on COS [@justinsb](https://github.com/justinsb) [#8269](https://github.com/kubernetes/kops/pull/8269)
|
||||
* Remove duplicated words [@longkb](https://github.com/longkb) [#8277](https://github.com/kubernetes/kops/pull/8277)
|
||||
* nodeup: Add some dependencies for Service [@justinsb](https://github.com/justinsb) [#8270](https://github.com/kubernetes/kops/pull/8270)
|
||||
* Set CLUSTER_NAME env var on amazon-vpc-cni pods [@rifelpet](https://github.com/rifelpet) [#8274](https://github.com/kubernetes/kops/pull/8274)
|
||||
* containerd: Set a default version even with Kubernetes 1.17 [@hakman](https://github.com/hakman) [#8283](https://github.com/kubernetes/kops/pull/8283)
|
||||
* small documentation typo fixes [@stratusjerry](https://github.com/stratusjerry) [#8285](https://github.com/kubernetes/kops/pull/8285)
|
||||
* Enable host logging for kops-controller [@rifelpet](https://github.com/rifelpet) [#8204](https://github.com/kubernetes/kops/pull/8204)
|
||||
* Fix unit name for memory request for weave [@hakman](https://github.com/hakman) [#8303](https://github.com/kubernetes/kops/pull/8303)
|
||||
* Add wget and nfs-utils deps needed for e2e tests [@hakman](https://github.com/hakman) [#8286](https://github.com/kubernetes/kops/pull/8286)
|
||||
* Some Calico doc updates [@tmjd](https://github.com/tmjd) [#8302](https://github.com/kubernetes/kops/pull/8302)
|
||||
* Remove "pigz" containerd dependency for RHEL/CentOS 7 [@hakman](https://github.com/hakman) [#8307](https://github.com/kubernetes/kops/pull/8307)
|
||||
* Replace kubernetes mount code with utils [@justinsb](https://github.com/justinsb) [#8056](https://github.com/kubernetes/kops/pull/8056)
|
||||
* Stabilize sequence of "export xx=xxx" statements [@bittopaz](https://github.com/bittopaz) [#8247](https://github.com/kubernetes/kops/pull/8247)
|
||||
* upup/pkg/fi-fix staticcheck [@Aresforchina](https://github.com/Aresforchina) [#8193](https://github.com/kubernetes/kops/pull/8193)
|
||||
* Build dns-controller using bazel [@justinsb](https://github.com/justinsb) [#8315](https://github.com/kubernetes/kops/pull/8315)
|
||||
* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb) [#8317](https://github.com/kubernetes/kops/pull/8317)
|
||||
* Remove support for Kubernetes 1.8 and earlier [@johngmyers](https://github.com/johngmyers) [#8208](https://github.com/kubernetes/kops/pull/8208)
|
||||
* Improve CIDR validation [@johngmyers](https://github.com/johngmyers) [#8284](https://github.com/kubernetes/kops/pull/8284)
|
||||
* Remove kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8273](https://github.com/kubernetes/kops/pull/8273)
|
||||
* Remove traces of kops-server [@rifelpet](https://github.com/rifelpet) [#7824](https://github.com/kubernetes/kops/pull/7824)
|
||||
* Add release notes for deleting the kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8321](https://github.com/kubernetes/kops/pull/8321)
|
||||
* Don't share /etc/hosts when using etcd-manager [@justinsb](https://github.com/justinsb) [#8322](https://github.com/kubernetes/kops/pull/8322)
|
||||
* Use /opt/cni/bin on all distros (including COS) [@justinsb](https://github.com/justinsb) [#7833](https://github.com/kubernetes/kops/pull/7833)
|
||||
* Add unit test for func RenderInstanceGroupSubnets in instancegroup.go [@fenggw-fnst](https://github.com/fenggw-fnst) [#8245](https://github.com/kubernetes/kops/pull/8245)
|
||||
* upup/pkg/fi/cloudup/awstasks/ staticcheck [@tanjunchen](https://github.com/tanjunchen) [#8235](https://github.com/kubernetes/kops/pull/8235)
|
||||
* Allow CoreDNS to be specified by create cluster overrides [@rifelpet](https://github.com/rifelpet) [#8334](https://github.com/kubernetes/kops/pull/8334)
|
||||
* Allow removed of additionalUserData on bastion [@nvanheuverzwijn](https://github.com/nvanheuverzwijn) [#8331](https://github.com/kubernetes/kops/pull/8331)
|
||||
* docs(docs/operations) Adding more steps to etcd-manager restore operation to work properly [@phspagiari](https://github.com/phspagiari) [#8337](https://github.com/kubernetes/kops/pull/8337)
|
||||
* Configuration to specify no SSH key [@austinmoore-](https://github.com/austinmoore-) [#7096](https://github.com/kubernetes/kops/pull/7096)
|
||||
* Remove code for unsupported Kubernetes versions [@johngmyers](https://github.com/johngmyers) [#8327](https://github.com/kubernetes/kops/pull/8327)
|
||||
* Securing http link to https link [@truongnh1992](https://github.com/truongnh1992) [#8345](https://github.com/kubernetes/kops/pull/8345)
|
||||
* fix author nickName [@LinshanYu](https://github.com/LinshanYu) [#8205](https://github.com/kubernetes/kops/pull/8205)
|
||||
* Fix crossbuild-nodeup-in-docker [@johngmyers](https://github.com/johngmyers) [#8343](https://github.com/kubernetes/kops/pull/8343)
|
||||
* Update CentOS 7 AMI [@rifelpet](https://github.com/rifelpet) [#8350](https://github.com/kubernetes/kops/pull/8350)
|
||||
* update gophercloud dependency [@zetaab](https://github.com/zetaab) [#8346](https://github.com/kubernetes/kops/pull/8346)
|
||||
* Fix DNS loop on Ubuntu 18.04 (Bionic) [@hakman](https://github.com/hakman) [#8353](https://github.com/kubernetes/kops/pull/8353)
|
||||
* Add support for weave.npcExtraArgs [@ReillyProcentive](https://github.com/ReillyProcentive) [#8265](https://github.com/kubernetes/kops/pull/8265)
|
||||
* Make /opt/cni/bin writeable on COS [@justinsb](https://github.com/justinsb) [#8354](https://github.com/kubernetes/kops/pull/8354)
|
||||
* Add shellcheck verification [@rifelpet](https://github.com/rifelpet) [#8328](https://github.com/kubernetes/kops/pull/8328)
|
||||
* Update Terraform resource names to be 0.12 compatible. [@rifelpet](https://github.com/rifelpet) [#7957](https://github.com/kubernetes/kops/pull/7957)
|
||||
* Update dependencies to kubernetes 1.16 [@justinsb](https://github.com/justinsb) [#8055](https://github.com/kubernetes/kops/pull/8055)
|
||||
* Add unit test for pkg/edit/edit.go [@tiendc](https://github.com/tiendc) [#8349](https://github.com/kubernetes/kops/pull/8349)
|
||||
* Test package versions as well as hashes [@justinsb](https://github.com/justinsb) [#8360](https://github.com/kubernetes/kops/pull/8360)
|
||||
* Publish update AMIs into the alpha channel [@justinsb](https://github.com/justinsb) [#8361](https://github.com/kubernetes/kops/pull/8361)
|
||||
* Allow local filesystem state stores (to aid CI pull-request workflows) [@ari-becker](https://github.com/ari-becker) [#6465](https://github.com/kubernetes/kops/pull/6465)
|
||||
* Add release notes for terraform resource renaming [@rifelpet](https://github.com/rifelpet) [#8364](https://github.com/kubernetes/kops/pull/8364)
|
||||
* Add a warning when using file:// state store [@rifelpet](https://github.com/rifelpet) [#8368](https://github.com/kubernetes/kops/pull/8368)
|
||||
* Update default instance types for AWS [@hakman](https://github.com/hakman) [#8282](https://github.com/kubernetes/kops/pull/8282)
|
||||
* update docs: updating kops [@nothinux](https://github.com/nothinux) [#8358](https://github.com/kubernetes/kops/pull/8358)
|
||||
* Update lyft CNI to v0.5.3 [@maruina](https://github.com/maruina) [#8367](https://github.com/kubernetes/kops/pull/8367)
|
||||
* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman) [#8248](https://github.com/kubernetes/kops/pull/8248)
|
||||
* Backport the k8s 1.9 required action release note [@johngmyers](https://github.com/johngmyers) [#8378](https://github.com/kubernetes/kops/pull/8378)
|
||||
* Change the deleted user to ghost [@LinshanYu](https://github.com/LinshanYu) [#8380](https://github.com/kubernetes/kops/pull/8380)
|
||||
* Add Cilium.EnablePolicy back into templates [@olemarkus](https://github.com/olemarkus) [#8379](https://github.com/kubernetes/kops/pull/8379)
|
||||
* Use IAMPrefix() for hostedzone [@lazzarello](https://github.com/lazzarello) [#8366](https://github.com/kubernetes/kops/pull/8366)
|
||||
* Fix scheduler policy configmap args [@vvbogdanov87](https://github.com/vvbogdanov87) [#8386](https://github.com/kubernetes/kops/pull/8386)
|
||||
* Bump k8s versions in alpha and stable channels [@olemarkus](https://github.com/olemarkus) [#8392](https://github.com/kubernetes/kops/pull/8392)
|
||||
* Enabling JSON output for Terraform instead of writing the HCL syntax … [@mccare](https://github.com/mccare) [#8145](https://github.com/kubernetes/kops/pull/8145)
|
||||
* containerd: Add support for tar.gz package [@hakman](https://github.com/hakman) [#8199](https://github.com/kubernetes/kops/pull/8199)
|
||||
* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333)
|
||||
* Bump etcd-manager to 3.0.20200116 (#8310) [@mmerrill3](https://github.com/mmerrill3) [#8399](https://github.com/kubernetes/kops/pull/8399)
|
||||
* Remove addons only applicable to unsupported versions of Kubernetes [@johngmyers](https://github.com/johngmyers) [#8318](https://github.com/kubernetes/kops/pull/8318)
|
||||
* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338)
|
||||
* Edit author name [@LinshanYu](https://github.com/LinshanYu) [#8374](https://github.com/kubernetes/kops/pull/8374)
|
||||
* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373)
|
||||
* Support additional kube-scheduler config parameters via config file [@rralcala](https://github.com/rralcala) [#8407](https://github.com/kubernetes/kops/pull/8407)
|
||||
* Option to increase concurrency of rolling update within instancegroup [@johngmyers](https://github.com/johngmyers) [#8271](https://github.com/kubernetes/kops/pull/8271)
|
||||
* Fix template clusterName behavior [@lcrisci](https://github.com/lcrisci) [#7319](https://github.com/kubernetes/kops/pull/7319)
|
||||
* Update support for Amazon Linux 2 [@hakman](https://github.com/hakman) [#8425](https://github.com/kubernetes/kops/pull/8425)
|
||||
* Announce impending removal of v1alpha1 API [@johngmyers](https://github.com/johngmyers),[@justinsb](https://github.com/justinsb) [#8064](https://github.com/kubernetes/kops/pull/8064)
|
||||
* Alicloud: etcd-manager support [@bittopaz](https://github.com/bittopaz) [#8016](https://github.com/kubernetes/kops/pull/8016)
|
||||
* Fixes regression in e2e tests [@rralcala](https://github.com/rralcala) [#8430](https://github.com/kubernetes/kops/pull/8430)
|
||||
* Release notes for 1.17.0-alpha.2 [@justinsb](https://github.com/justinsb) [#8438](https://github.com/kubernetes/kops/pull/8438)
|
||||
* Add missing priorityClassName for critical pods [@johngmyers](https://github.com/johngmyers) [#8200](https://github.com/kubernetes/kops/pull/8200)
|
||||
* Release notes for 1.16.0-beta.1 [@justinsb](https://github.com/justinsb) [#8441](https://github.com/kubernetes/kops/pull/8441)
|
||||
* Release notes for 1.15.1 [@justinsb](https://github.com/justinsb) [#8442](https://github.com/kubernetes/kops/pull/8442)
|
||||
* Fix typo in comment [@longkb](https://github.com/longkb) [#8444](https://github.com/kubernetes/kops/pull/8444)
|
||||
|
||||
## 1.18.0-alpha.1 to 1.18.0-alpha.2
|
||||
|
||||
* Add an EnvVar type to the API [@rifelpet](https://github.com/rifelpet) [#8455](https://github.com/kubernetes/kops/pull/8455)
|
||||
* Update coredns to 1.6.7 [@maruina](https://github.com/maruina) [#8452](https://github.com/kubernetes/kops/pull/8452)
|
||||
* Update godoc.org references to use pkg.go.dev [@rifelpet](https://github.com/rifelpet) [#8460](https://github.com/kubernetes/kops/pull/8460)
|
||||
* docs: trivial fix some typos [@truongnh1992](https://github.com/truongnh1992) [#8450](https://github.com/kubernetes/kops/pull/8450)
|
||||
* Tag EBS volumes when using launch templates with AWS API target [@johngmyers](https://github.com/johngmyers) [#8462](https://github.com/kubernetes/kops/pull/8462)
|
||||
* [DigitalOcean] Add load balancer support for master HA [@srikiz](https://github.com/srikiz) [#8237](https://github.com/kubernetes/kops/pull/8237)
|
||||
* Cilium - Add missing Identity Allocation Mode to Operator Template [@daviddyball](https://github.com/daviddyball) [#8445](https://github.com/kubernetes/kops/pull/8445)
|
||||
* Stop logging to /var/log/kops-controller.log [@justinsb](https://github.com/justinsb) [#8467](https://github.com/kubernetes/kops/pull/8467)
|
||||
* Fix typos in cluster_spec.md [@hase1128](https://github.com/hase1128) [#8474](https://github.com/kubernetes/kops/pull/8474)
|
||||
* Fix Github download url for nodeup [@adri](https://github.com/adri),[@justinsb](https://github.com/justinsb) [#8468](https://github.com/kubernetes/kops/pull/8468)
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ The swift store can be configured by providing your OpenStack credentials and co
|
|||
- `OS_APPLICATION_CREDENTIAL_NAME`: application credential name
|
||||
- `OS_APPLICATION_CREDENTIAL_SECRET`: application secret
|
||||
|
||||
The mechanism used to retrieve the credentials is derived from the [gophercloud OpenStack SDK](https://godoc.org/github.com/gophercloud/gophercloud).
|
||||
The mechanism used to retrieve the credentials is derived from the [gophercloud OpenStack SDK](https://pkg.go.dev/github.com/gophercloud/gophercloud).
|
||||
|
||||
A credentials file with `OPENSTACK_CREDENTIAL_FILE` or a config derived from your personal credentials living in `$HOME/.openstack/config` can also be used to configure your store.
|
||||
|
||||
|
|
|
|||
4
go.mod
4
go.mod
|
|
@ -92,6 +92,7 @@ require (
|
|||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20180422025557-ae226422660e
|
||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible
|
||||
github.com/go-ini/ini v1.51.0
|
||||
github.com/go-logr/logr v0.1.0
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
||||
|
|
@ -102,7 +103,6 @@ require (
|
|||
github.com/huandu/xstrings v1.2.0 // indirect
|
||||
github.com/jacksontj/memberlistmesh v0.0.0-20190905163944-93462b9d2bb7
|
||||
github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d
|
||||
github.com/jteeuwen/go-bindata v0.0.0-20151023091102-a0ff2567cfb7
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/miekg/coredns v0.0.0-20161111164017-20e25559d5ea
|
||||
github.com/miekg/dns v1.1.4
|
||||
|
|
@ -125,7 +125,7 @@ require (
|
|||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861
|
||||
google.golang.org/api v0.17.0
|
||||
gopkg.in/gcfg.v1 v1.2.0
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/yaml.v2 v2.2.7
|
||||
|
|
|
|||
15
go.sum
15
go.sum
|
|
@ -70,6 +70,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 h1:HD4PLRzjuCVW79mQ0/pdsalOLHJ+FaEoqJLxfltpb2U=
|
||||
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
|
|
@ -124,6 +125,8 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1
|
|||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
|
|
@ -141,6 +144,8 @@ github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I
|
|||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE=
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
|
||||
github.com/go-ini/ini v1.51.0 h1:VPJKXGzbKlyExUE8f41aV57yxkYx5R49yR6n7flp0M0=
|
||||
github.com/go-ini/ini v1.51.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
|
@ -231,6 +236,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
|
||||
|
|
@ -397,6 +404,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f
|
|||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
|
|
@ -601,6 +610,8 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmK
|
|||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 h1:ppLucX0K/60T3t6LPZQzTOkt5PytkEbQLIaSteq+TpE=
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
|
|
@ -610,10 +621,14 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
|
|
|
|||
|
|
@ -144,6 +144,7 @@ k8s.io/kops/pkg/util/templater
|
|||
k8s.io/kops/pkg/validation
|
||||
k8s.io/kops/pkg/values
|
||||
k8s.io/kops/pkg/wellknownports
|
||||
k8s.io/kops/pkg/wellknownusers
|
||||
k8s.io/kops/protokube/cmd/protokube
|
||||
k8s.io/kops/protokube/pkg/etcd
|
||||
k8s.io/kops/protokube/pkg/gossip
|
||||
|
|
|
|||
|
|
@ -5,17 +5,12 @@
|
|||
./hack/dev-build.sh
|
||||
./hack/make-apimachinery.sh
|
||||
./hack/new-iam-user.sh
|
||||
./hack/publish-docs.sh
|
||||
./hack/update-bazel.sh
|
||||
./hack/update-expected.sh
|
||||
./hack/update-header.sh
|
||||
./hack/verify-apimachinery.sh
|
||||
./hack/verify-bazel.sh
|
||||
./hack/verify-boilerplate.sh
|
||||
./hack/verify-gofmt.sh
|
||||
./hack/verify-packages.sh
|
||||
./hack/verify-spelling.sh
|
||||
./hack/verify-staticcheck.sh
|
||||
./hooks/nvidia-bootstrap/image/run.sh
|
||||
./hooks/nvidia-device-plugin/image/files/01-aws-nvidia-driver.sh
|
||||
./hooks/nvidia-device-plugin/image/files/02-nvidia-docker.sh
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ def file_passes(filename, refs, regexs):
|
|||
def file_extension(filename):
|
||||
return os.path.splitext(filename)[1].split(".")[-1].lower()
|
||||
|
||||
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
|
||||
skipped_dirs = ['third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
|
||||
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
|
||||
"pkg/generated/bindata.go"]
|
||||
|
||||
|
|
|
|||
|
|
@ -19,4 +19,50 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
KOPS_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )"
|
||||
|
||||
kube::util::array_contains() {
|
||||
local search="$1"
|
||||
local element
|
||||
shift
|
||||
for element; do
|
||||
if [[ "${element}" == "${search}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::util::read-array() {
|
||||
local i=0
|
||||
unset -v "$1"
|
||||
while IFS= read -r "$1[i++]"; do :; done
|
||||
eval "[[ \${$1[--i]} ]]" || unset "$1[i]" # ensures last element isn't empty
|
||||
}
|
||||
|
||||
kube::util::trap_add() {
|
||||
local trap_add_cmd
|
||||
trap_add_cmd=$1
|
||||
shift
|
||||
|
||||
for trap_add_name in "$@"; do
|
||||
local existing_cmd
|
||||
local new_cmd
|
||||
|
||||
# Grab the currently defined trap commands for this trap
|
||||
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
|
||||
|
||||
if [[ -z "${existing_cmd}" ]]; then
|
||||
new_cmd="${trap_add_cmd}"
|
||||
else
|
||||
new_cmd="${trap_add_cmd};${existing_cmd}"
|
||||
fi
|
||||
|
||||
# Assign the test. Disable the shellcheck warning telling that trap
|
||||
# commands should be single quoted to avoid evaluating them at this
|
||||
# point instead evaluating them at run time. The logic of adding new
|
||||
# commands to a single trap requires them to be evaluated right away.
|
||||
# shellcheck disable=SC2064
|
||||
trap "${new_cmd}" "${trap_add_name}"
|
||||
done
|
||||
}
|
||||
|
|
|
|||
72
hack/dep.py
72
hack/dep.py
|
|
@ -1,72 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This python script helps sync godeps from the k8s repos into our git submodules
|
||||
# It generates bash commands where changes are needed
|
||||
# We can probably also use it for deps when the time comes!
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from os.path import join
|
||||
|
||||
if not os.environ['GOPATH']:
|
||||
raise Exception("Must set GOPATH")
|
||||
|
||||
kops_dir = join(os.environ['GOPATH'], 'src', 'k8s.io', 'kops')
|
||||
k8s_dir = join(os.environ['GOPATH'], 'src', 'k8s.io', 'kubernetes')
|
||||
|
||||
with open(join(k8s_dir, 'Godeps', 'Godeps.json')) as data_file:
|
||||
godeps = json.load(data_file)
|
||||
|
||||
# For debugging, because dep status is unbearably slow
|
||||
# dep status -json | jq .> dep-status.json
|
||||
# with open(join(kops_dir, 'dep-status.json')) as data_file:
|
||||
# dep_status = json.load(data_file)
|
||||
|
||||
process = subprocess.Popen(['dep', 'status', '-json'], stdout=subprocess.PIPE, cwd=kops_dir)
|
||||
dep_status_stdout, err = process.communicate()
|
||||
dep_status = json.loads(dep_status_stdout)
|
||||
|
||||
#pprint(godeps)
|
||||
|
||||
godep_map = {}
|
||||
for godep in godeps['Deps']:
|
||||
#print("%s %s" % (godep['ImportPath'], godep['Rev']))
|
||||
godep_map[godep['ImportPath']] = godep['Rev']
|
||||
|
||||
dep_status_map = {}
|
||||
for dep in dep_status:
|
||||
#print("%s %s" % (godep['ImportPath'], godep['Rev']))
|
||||
dep_status_map[dep['ProjectRoot']] = dep['Revision']
|
||||
|
||||
|
||||
for dep in dep_status_map:
|
||||
sha = dep_status_map.get(dep)
|
||||
godep_sha = godep_map.get(dep)
|
||||
if not godep_sha:
|
||||
for k in godep_map:
|
||||
if k.startswith(dep):
|
||||
godep_sha = godep_map[k]
|
||||
break
|
||||
if godep_sha:
|
||||
if godep_sha != sha:
|
||||
print("# update needed: %s %s vs %s" % (dep, godep_sha, sha))
|
||||
print("[[override]]")
|
||||
print(' name = "%s"' % (dep))
|
||||
print(' revision = "%s"' % (godep_sha))
|
||||
else:
|
||||
print("# UNKNOWN dep %s" % dep)
|
||||
|
|
@ -52,7 +52,7 @@
|
|||
#
|
||||
###############################################################################
|
||||
|
||||
KOPS_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
#
|
||||
# Check that required binaries are installed
|
||||
|
|
@ -85,7 +85,7 @@ NETWORKING=${NETWORKING:-weave}
|
|||
# How verbose go logging is
|
||||
VERBOSITY=${VERBOSITY:-10}
|
||||
|
||||
cd $KOPS_DIRECTORY/..
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
GIT_VER=git-$(git describe --always)
|
||||
[ -z "$GIT_VER" ] && echo "we do not have GIT_VER something is very wrong" && exit 1;
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||
|
||||
WORK_DIR=`mktemp -d`
|
||||
WORK_DIR=$(mktemp -d)
|
||||
|
||||
cleanup() {
|
||||
chmod -R +w "${WORK_DIR}"
|
||||
|
|
@ -26,19 +26,19 @@ cleanup() {
|
|||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
mkdir -p ${WORK_DIR}/go/
|
||||
ln -s ${GOPATH}/src/k8s.io/kops/vendor/ ${WORK_DIR}/go/src
|
||||
mkdir -p "${WORK_DIR}/go/"
|
||||
cp -R "${GOPATH}/src/k8s.io/kops/vendor/" "${WORK_DIR}/go/src"
|
||||
|
||||
unset GOBIN
|
||||
GOPATH=${WORK_DIR}/go/ go install -v k8s.io/code-generator/cmd/conversion-gen/
|
||||
cp ${WORK_DIR}/go/bin/conversion-gen ${GOPATH}/bin/
|
||||
|
||||
GOPATH=${WORK_DIR}/go/ go install k8s.io/code-generator/cmd/deepcopy-gen/
|
||||
cp ${WORK_DIR}/go/bin/deepcopy-gen ${GOPATH}/bin/
|
||||
env GOBIN="${WORK_DIR}/go/bin" GOPATH="${WORK_DIR}/go/" go install -v k8s.io/code-generator/cmd/conversion-gen/
|
||||
cp "${WORK_DIR}/go/bin/conversion-gen" "${GOPATH}/bin/"
|
||||
|
||||
GOPATH=${WORK_DIR}/go/ go install k8s.io/code-generator/cmd/defaulter-gen/
|
||||
cp ${WORK_DIR}/go/bin/defaulter-gen ${GOPATH}/bin/
|
||||
env GOBIN="${WORK_DIR}/go/bin" GOPATH="${WORK_DIR}/go/" go install k8s.io/code-generator/cmd/deepcopy-gen/
|
||||
cp "${WORK_DIR}/go/bin/deepcopy-gen" "${GOPATH}/bin/"
|
||||
|
||||
GOPATH=${WORK_DIR}/go/ go install k8s.io/code-generator/cmd/client-gen/
|
||||
cp ${WORK_DIR}/go/bin/client-gen ${GOPATH}/bin/
|
||||
env GOBIN="${WORK_DIR}/go/bin" GOPATH="${WORK_DIR}/go/" go install k8s.io/code-generator/cmd/defaulter-gen/
|
||||
cp "${WORK_DIR}/go/bin/defaulter-gen" "${GOPATH}/bin/"
|
||||
|
||||
env GOBIN="${WORK_DIR}/go/bin" GOPATH="${WORK_DIR}/go/" go install k8s.io/code-generator/cmd/client-gen/
|
||||
cp "${WORK_DIR}/go/bin/client-gen" "${GOPATH}/bin/"
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if ! [ -z $DEBUG ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
if [ "$COMPONENT" != "docs" ]; then
|
||||
echo "This task runs only to publish docs"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
make -C ${DIR}/.. build-docs
|
||||
|
||||
git config --global user.email "travis@travis-ci.com"
|
||||
git config --global user.name "Travis Bot"
|
||||
|
||||
git clone --branch=gh-pages --depth=1 https://${GH_REF} ${DIR}/gh-pages
|
||||
cd ${DIR}/gh-pages
|
||||
|
||||
git rm -r .
|
||||
|
||||
cp -r ${DIR}/../site/* .
|
||||
|
||||
git add .
|
||||
git commit -m "Deploy GitHub Pages"
|
||||
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" gh-pages > /dev/null 2>&1
|
||||
|
|
@ -18,21 +18,15 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KOPS_ROOT=$(git rev-parse --show-toplevel)
|
||||
cd ${KOPS_ROOT}
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
export GOPATH=${KOPS_ROOT}/../../../
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
TMP_OUT=$(mktemp -d)
|
||||
trap "{ rm -rf ${TMP_OUT}; }" EXIT
|
||||
|
||||
GOBIN="${TMP_OUT}" go install ./vendor/github.com/bazelbuild/bazel-gazelle/cmd/gazelle
|
||||
|
||||
# manually remove BUILD file for k8s.io/apimachinery/pkg/util/sets/BUILD if it
|
||||
# exists; there is a specific set-gen rule that breaks importing
|
||||
# ref: https://github.com/kubernetes/kubernetes/blob/4e2f5e2212b05a305435ef96f4b49dc0932e1264/staging/src/k8s.io/apimachinery/pkg/util/sets/BUILD#L23-L49
|
||||
# rm -f ${KOPS_ROOT}/vendor/k8s.io/apimachinery/pkg/util/sets/{BUILD,BUILD.bazel}
|
||||
|
||||
"${TMP_OUT}/gazelle" fix \
|
||||
-external=vendored \
|
||||
-mode=fix \
|
||||
|
|
|
|||
|
|
@ -18,8 +18,9 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KOPS_ROOT=$(git rev-parse --show-toplevel)
|
||||
cd ${KOPS_ROOT}
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
# Update gobindata to reflect any yaml changes
|
||||
make kops-gobindata
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
BAD_HEADERS=$((${KUBE_ROOT}/hack/verify-boilerplate.sh || true) | awk '{ print $7}')
|
||||
BAD_HEADERS=$((${KOPS_ROOT}/hack/verify-boilerplate.sh || true) | awk '{ print $7}')
|
||||
FORMATS="sh go Makefile Dockerfile"
|
||||
|
||||
YEAR=`date +%Y`
|
||||
YEAR=`date -u +%Y`
|
||||
|
||||
for i in ${FORMATS}
|
||||
do
|
||||
|
|
@ -27,7 +27,7 @@ do
|
|||
for j in ${BAD_HEADERS}
|
||||
do
|
||||
:
|
||||
HEADER=$(cat ${KUBE_ROOT}/hack/boilerplate/boilerplate.${i}.txt | sed "s/YEAR/${YEAR}/")
|
||||
HEADER=$(cat ${KOPS_ROOT}/hack/boilerplate/boilerplate.${i}.txt | sed "s/YEAR/${YEAR}/")
|
||||
value=$(<${j})
|
||||
if [[ "$j" != *$i ]]
|
||||
then
|
||||
|
|
|
|||
|
|
@ -18,7 +18,9 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KOPS_ROOT=$(git rev-parse --show-toplevel)
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
export API_OPTIONS="--verify-only"
|
||||
if make apimachinery-codegen; then
|
||||
|
|
|
|||
|
|
@ -13,10 +13,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KOPS_ROOT=$(git rev-parse --show-toplevel)
|
||||
cd ${KOPS_ROOT}
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
export GOPATH=${KOPS_ROOT}/../../../
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
TMP_OUT=$(mktemp -d)
|
||||
trap "{ rm -rf ${TMP_OUT}; }" EXIT
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
boiler="${KUBE_ROOT}/hack/boilerplate/boilerplate.py $@"
|
||||
boiler="${KOPS_ROOT}/hack/boilerplate/boilerplate.py $@"
|
||||
|
||||
files_need_boilerplate=( `${boiler}` )
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ if [[ -z ${files_need_boilerplate+x} ]]; then
|
|||
exit
|
||||
fi
|
||||
|
||||
TO_REMOVE=(${PWD}/federation/model/bindata.go ${PWD}/upup/models/bindata.go)
|
||||
TO_REMOVE=(${KOPS_ROOT}/federation/model/bindata.go ${KOPS_ROOT}/upup/models/bindata.go)
|
||||
TEMP_ARRAY=()
|
||||
|
||||
for pkg in "${files_need_boilerplate[@]}"; do
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KOPS_ROOT=$(git rev-parse --show-toplevel)
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
make crds
|
||||
|
|
|
|||
|
|
@ -14,7 +14,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
GOFMT="bazel run //:gofmt -- -s -w"
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,9 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
export REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel)}"
|
||||
cd "${REPO_ROOT}"
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
changes=$(git status --porcelain || true)
|
||||
if [ -n "${changes}" ]; then
|
||||
|
|
|
|||
|
|
@ -14,10 +14,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# Check that the .packages file contains all packages
|
||||
packages_file="${KUBE_ROOT}/hack/.packages"
|
||||
packages_file="${KOPS_ROOT}/hack/.packages"
|
||||
if ! diff -u "${packages_file}" <(go list k8s.io/kops/... | grep -v vendor); then
|
||||
{
|
||||
echo
|
||||
|
|
|
|||
|
|
@ -18,45 +18,7 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
|
||||
kube::util::array_contains() {
|
||||
local search="$1"
|
||||
local element
|
||||
shift
|
||||
for element; do
|
||||
if [[ "${element}" == "${search}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
kube::util::trap_add() {
|
||||
local trap_add_cmd
|
||||
trap_add_cmd=$1
|
||||
shift
|
||||
|
||||
for trap_add_name in "$@"; do
|
||||
local existing_cmd
|
||||
local new_cmd
|
||||
|
||||
# Grab the currently defined trap commands for this trap
|
||||
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
|
||||
|
||||
if [[ -z "${existing_cmd}" ]]; then
|
||||
new_cmd="${trap_add_cmd}"
|
||||
else
|
||||
new_cmd="${trap_add_cmd};${existing_cmd}"
|
||||
fi
|
||||
|
||||
# Assign the test. Disable the shellcheck warning telling that trap
|
||||
# commands should be single quoted to avoid evaluating them at this
|
||||
# point instead evaluating them at run time. The logic of adding new
|
||||
# commands to a single trap requires them to be evaluated right away.
|
||||
# shellcheck disable=SC2064
|
||||
trap "${new_cmd}" "${trap_add_name}"
|
||||
done
|
||||
}
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# required version for this script, if not installed on the host we will
|
||||
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
|
||||
|
|
@ -92,7 +54,7 @@ create_container () {
|
|||
# we're done.
|
||||
# This is incredibly much faster than creating a container for each shellcheck
|
||||
# call ...
|
||||
docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647
|
||||
docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KOPS_ROOT}:/go/src/k8s.io/kops" -w "/go/src/k8s.io/kops" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647
|
||||
}
|
||||
# removes the shellcheck container
|
||||
remove_container () {
|
||||
|
|
@ -100,7 +62,7 @@ remove_container () {
|
|||
}
|
||||
|
||||
# ensure we're linting the k8s source tree
|
||||
cd "${KUBE_ROOT}"
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
# Find all shell scripts excluding:
|
||||
# - Anything git-ignored - No need to lint untracked files.
|
||||
|
|
@ -121,7 +83,7 @@ done < <(find . -name "*.sh" \
|
|||
\))
|
||||
|
||||
# make sure known failures are sorted
|
||||
failure_file="${KUBE_ROOT}/hack/.shellcheck_failures"
|
||||
failure_file="${KOPS_ROOT}/hack/.shellcheck_failures"
|
||||
if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
|
||||
{
|
||||
echo
|
||||
|
|
@ -178,7 +140,7 @@ fi
|
|||
# common arguments we'll pass to shellcheck
|
||||
SHELLCHECK_OPTIONS=(
|
||||
# allow following sourced files that are not specified in the command,
|
||||
# we need this because we specify one file at at time in order to trivially
|
||||
# we need this because we specify one file at a time in order to trivially
|
||||
# detect which files are failing
|
||||
"--external-sources"
|
||||
# include our disabled lints
|
||||
|
|
|
|||
|
|
@ -18,10 +18,11 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "${REPO_ROOT}"
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
OUTPUT_GOBIN="${REPO_ROOT}/_output/bin"
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
OUTPUT_GOBIN="${KOPS_ROOT}/_output/bin"
|
||||
|
||||
# Install tools we need, but from vendor/
|
||||
GOBIN="${OUTPUT_GOBIN}" go install ./vendor/github.com/client9/misspell/cmd/misspell
|
||||
|
|
|
|||
|
|
@ -18,26 +18,7 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
kube::util::array_contains() {
|
||||
local search="$1"
|
||||
local element
|
||||
shift
|
||||
for element; do
|
||||
if [[ "${element}" == "${search}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function kube::util::read-array {
|
||||
local i=0
|
||||
unset -v "$1"
|
||||
while IFS= read -r "$1[i++]"; do :; done
|
||||
eval "[[ \${$1[--i]} ]]" || unset "$1[i]" # ensures last element isn't empty
|
||||
}
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
FOCUS="${1:-}"
|
||||
|
||||
|
|
@ -65,7 +46,7 @@ IGNORE=(
|
|||
export IFS='|'; ignore_pattern="^(${IGNORE[*]})\$"; unset IFS
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${KUBE_ROOT}/_output/bin"
|
||||
export GOBIN="${KOPS_ROOT}/_output/bin"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install staticcheck from vendor
|
||||
|
|
@ -73,10 +54,10 @@ echo 'installing staticcheck from vendor'
|
|||
|
||||
go install k8s.io/kops/vendor/honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
cd "${KUBE_ROOT}"
|
||||
cd "${KOPS_ROOT}"
|
||||
|
||||
# Check that the file is in alphabetical order
|
||||
failure_file="${KUBE_ROOT}/hack/.staticcheck_failures"
|
||||
failure_file="${KOPS_ROOT}/hack/.staticcheck_failures"
|
||||
if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
|
||||
{
|
||||
echo
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ RUN apt-get update && apt-get install --yes --reinstall lsb-base \
|
|||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install golang
|
||||
RUN curl -L https://storage.googleapis.com/golang/go1.13.4.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||
RUN curl -L https://storage.googleapis.com/golang/go1.13.8.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||
ENV PATH $PATH:/usr/local/go/bin
|
||||
|
||||
COPY onbuild.sh /onbuild.sh
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
mkdocs-material~=4.4.0
|
||||
mkdocs~=1.0.4
|
||||
# mkdocs-awesome-pages-plugin~=1.2.0
|
||||
pymdown-extensions~=6.1
|
||||
pymdown-extensions==6.2.1
|
||||
pygments~=2.3.1
|
||||
|
|
@ -24,7 +24,7 @@ RUN apt-get update && apt-get install --yes --reinstall lsb-base \
|
|||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install golang
|
||||
RUN curl -L https://storage.googleapis.com/golang/go1.13.4.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||
RUN curl -L https://storage.googleapis.com/golang/go1.13.8.linux-amd64.tar.gz | tar zx -C /usr/local
|
||||
ENV PATH $PATH:/usr/local/go/bin
|
||||
|
||||
COPY onbuild.sh /onbuild.sh
|
||||
|
|
|
|||
|
|
@ -670,6 +670,14 @@ spec:
|
|||
(use to control whom can creates dns entries)
|
||||
type: string
|
||||
type: object
|
||||
externalPolicies:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: ExternalPolicies allows the insertion of pre-existing managed
|
||||
policies on IG Roles
|
||||
type: object
|
||||
fileAssets:
|
||||
description: A collection of files assets for deployed cluster wide
|
||||
items:
|
||||
|
|
@ -2408,8 +2416,7 @@ spec:
|
|||
VPC CNI networking
|
||||
properties:
|
||||
imageName:
|
||||
description: 'The container image name to use, which by default
|
||||
is: 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5'
|
||||
description: The container image name to use
|
||||
type: string
|
||||
type: object
|
||||
calico:
|
||||
|
|
@ -2547,6 +2554,8 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
agentPrometheusPort:
|
||||
type: integer
|
||||
allowLocalhost:
|
||||
type: string
|
||||
autoDirectNodeRoutes:
|
||||
|
|
@ -2593,6 +2602,8 @@ spec:
|
|||
type: boolean
|
||||
enablePolicy:
|
||||
type: string
|
||||
enablePrometheusMetrics:
|
||||
type: boolean
|
||||
enableTracing:
|
||||
type: boolean
|
||||
enableipv4:
|
||||
|
|
@ -2601,6 +2612,8 @@ spec:
|
|||
type: boolean
|
||||
envoyLog:
|
||||
type: string
|
||||
ipam:
|
||||
type: string
|
||||
ipv4ClusterCidrMaskSize:
|
||||
type: integer
|
||||
ipv4Node:
|
||||
|
|
|
|||
|
|
@ -64,8 +64,8 @@ nav:
|
|||
- Cluster Spec: "cluster_spec.md"
|
||||
- Instance Group API: "instance_groups.md"
|
||||
- Using Manifests and Customizing: "manifests_and_customizing_via_api.md"
|
||||
- Godocs for Cluster - ClusterSpec: "https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec"
|
||||
- Godocs for Instance Group - InstanceGroupSpec: "https://godoc.org/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec"
|
||||
- Godocs for Cluster - ClusterSpec: "https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec"
|
||||
- Godocs for Instance Group - InstanceGroupSpec: "https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#InstanceGroupSpec"
|
||||
|
||||
- Operations:
|
||||
- Updates & Upgrades: "operations/updates_and_upgrades.md"
|
||||
|
|
|
|||
|
|
@ -24,18 +24,19 @@ import (
|
|||
type Distribution string
|
||||
|
||||
var (
|
||||
DistributionJessie Distribution = "jessie"
|
||||
DistributionDebian9 Distribution = "debian9"
|
||||
DistributionDebian10 Distribution = "buster"
|
||||
DistributionXenial Distribution = "xenial"
|
||||
DistributionBionic Distribution = "bionic"
|
||||
DistributionRhel7 Distribution = "rhel7"
|
||||
DistributionCentos7 Distribution = "centos7"
|
||||
DistributionRhel8 Distribution = "rhel8"
|
||||
DistributionCentos8 Distribution = "centos8"
|
||||
DistributionCoreOS Distribution = "coreos"
|
||||
DistributionFlatcar Distribution = "flatcar"
|
||||
DistributionContainerOS Distribution = "containeros"
|
||||
DistributionJessie Distribution = "jessie"
|
||||
DistributionDebian9 Distribution = "debian9"
|
||||
DistributionDebian10 Distribution = "buster"
|
||||
DistributionXenial Distribution = "xenial"
|
||||
DistributionBionic Distribution = "bionic"
|
||||
DistributionAmazonLinux2 Distribution = "amazonlinux2"
|
||||
DistributionRhel7 Distribution = "rhel7"
|
||||
DistributionCentos7 Distribution = "centos7"
|
||||
DistributionRhel8 Distribution = "rhel8"
|
||||
DistributionCentos8 Distribution = "centos8"
|
||||
DistributionCoreOS Distribution = "coreos"
|
||||
DistributionFlatcar Distribution = "flatcar"
|
||||
DistributionContainerOS Distribution = "containeros"
|
||||
)
|
||||
|
||||
func (d Distribution) BuildTags() []string {
|
||||
|
|
@ -50,6 +51,8 @@ func (d Distribution) BuildTags() []string {
|
|||
t = []string{"_xenial"}
|
||||
case DistributionBionic:
|
||||
t = []string{"_bionic"}
|
||||
case DistributionAmazonLinux2:
|
||||
t = []string{"_amazonlinux2"}
|
||||
case DistributionCentos7:
|
||||
t = []string{"_centos7"}
|
||||
case DistributionRhel7:
|
||||
|
|
@ -88,7 +91,7 @@ func (d Distribution) IsDebianFamily() bool {
|
|||
return true
|
||||
case DistributionXenial, DistributionBionic:
|
||||
return true
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
|
||||
return false
|
||||
case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS:
|
||||
return false
|
||||
|
|
@ -104,7 +107,7 @@ func (d Distribution) IsUbuntu() bool {
|
|||
return false
|
||||
case DistributionXenial, DistributionBionic:
|
||||
return true
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
|
||||
return false
|
||||
case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS:
|
||||
return false
|
||||
|
|
@ -116,7 +119,7 @@ func (d Distribution) IsUbuntu() bool {
|
|||
|
||||
func (d Distribution) IsRHELFamily() bool {
|
||||
switch d {
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
|
||||
return true
|
||||
case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10:
|
||||
return false
|
||||
|
|
@ -132,7 +135,7 @@ func (d Distribution) IsSystemd() bool {
|
|||
switch d {
|
||||
case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10:
|
||||
return true
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
|
||||
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
|
||||
return true
|
||||
case DistributionCoreOS, DistributionFlatcar:
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -113,8 +113,7 @@ func FindDistribution(rootfs string) (Distribution, error) {
|
|||
return DistributionContainerOS, nil
|
||||
}
|
||||
if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") {
|
||||
// TODO: This is a hack. Amazon Linux is "special" and should get its own distro entry
|
||||
return DistributionRhel7, nil
|
||||
return DistributionAmazonLinux2, nil
|
||||
}
|
||||
}
|
||||
klog.Warningf("unhandled /etc/os-release info %q", string(osRelease))
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ go_library(
|
|||
"//pkg/systemd:go_default_library",
|
||||
"//pkg/tokens:go_default_library",
|
||||
"//pkg/try:go_default_library",
|
||||
"//pkg/wellknownusers:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/awsup:go_default_library",
|
||||
"//upup/pkg/fi/nodeup/nodetasks:go_default_library",
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ var containerdVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.2.10",
|
||||
Name: "containerd.io",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.2.10",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm",
|
||||
|
|
@ -135,6 +135,16 @@ var containerdVersions = []packageVersion{
|
|||
Hash: "f451d46280104588f236bee277bca1da8babc0e8",
|
||||
},
|
||||
|
||||
// 1.3.3 - Linux Generic
|
||||
{
|
||||
PackageVersion: "1.3.3",
|
||||
PlainBinary: true,
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.3.3",
|
||||
Source: "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.3.3.linux-amd64.tar.gz",
|
||||
Hash: "921b74e84da366ec3eaa72ff97fa8d6ae56834c6",
|
||||
},
|
||||
|
||||
// TIP: When adding the next version, copy the previous version, string replace the version and run:
|
||||
// VERIFY_HASHES=1 go test ./nodeup/pkg/model -run TestContainerdPackageHashes
|
||||
// (you might want to temporarily comment out older versions on a slower connection and then validate)
|
||||
|
|
|
|||
|
|
@ -360,11 +360,7 @@ func (c *NodeupModelContext) UseNodeAuthorizer() bool {
|
|||
|
||||
// UsesSecondaryIP checks if the CNI in use attaches secondary interfaces to the host.
|
||||
func (c *NodeupModelContext) UsesSecondaryIP() bool {
|
||||
if (c.Cluster.Spec.Networking.CNI != nil && c.Cluster.Spec.Networking.CNI.UsesSecondaryIP) || c.Cluster.Spec.Networking.AmazonVPC != nil || c.Cluster.Spec.Networking.LyftVPC != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return (c.Cluster.Spec.Networking.CNI != nil && c.Cluster.Spec.Networking.CNI.UsesSecondaryIP) || c.Cluster.Spec.Networking.AmazonVPC != nil || c.Cluster.Spec.Networking.LyftVPC != nil || (c.Cluster.Spec.Networking.Cilium != nil && c.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni)
|
||||
}
|
||||
|
||||
// UseBootstrapTokens checks if we are using bootstrap tokens
|
||||
|
|
|
|||
|
|
@ -40,11 +40,12 @@ func (b *DirectoryBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Path: dirname,
|
||||
Type: nodetasks.FileType_Directory,
|
||||
Mode: s("0755"),
|
||||
})
|
||||
|
||||
OnChangeExecute: [][]string{
|
||||
{"/bin/mount", "--bind", dirname, dirname},
|
||||
{"/bin/mount", "-o", "remount,exec", dirname},
|
||||
},
|
||||
c.AddTask(&nodetasks.BindMount{
|
||||
Source: dirname,
|
||||
Mountpoint: dirname,
|
||||
Options: []string{"exec"},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.11.2",
|
||||
Name: "docker-engine",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.11.2",
|
||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.11.2-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -117,7 +117,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.12.1",
|
||||
Name: "docker-engine",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.12.1",
|
||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.1-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -176,7 +176,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.12.3",
|
||||
Name: "docker-engine",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.12.3",
|
||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.3-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -250,7 +250,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.12.6",
|
||||
Name: "docker-engine",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.12.6",
|
||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -324,7 +324,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "1.13.1",
|
||||
Name: "docker-engine",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "1.13.1",
|
||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.13.1-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -409,7 +409,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "17.03.2",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "17.03.2.ce",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -508,7 +508,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "17.09.0",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "17.09.0.ce",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.09.0.ce-1.el7.centos.x86_64.rpm",
|
||||
|
|
@ -598,7 +598,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "18.06.1",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "18.06.1.ce",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.1.ce-3.el7.x86_64.rpm",
|
||||
|
|
@ -628,7 +628,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "18.06.2",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "18.06.2.ce",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.2.ce-3.el7.x86_64.rpm",
|
||||
|
|
@ -681,7 +681,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "18.06.3",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "18.06.3.ce",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm",
|
||||
|
|
@ -782,7 +782,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "18.09.9",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "18.09.9",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.09.9-3.el7.x86_64.rpm",
|
||||
|
|
@ -898,7 +898,7 @@ var dockerVersions = []packageVersion{
|
|||
{
|
||||
PackageVersion: "19.03.4",
|
||||
Name: "docker-ce",
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
|
||||
Architectures: []Architecture{ArchitectureAmd64},
|
||||
Version: "19.03.4",
|
||||
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-19.03.4-3.el7.x86_64.rpm",
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/kops/pkg/k8scodecs"
|
||||
"k8s.io/kops/pkg/kubeconfig"
|
||||
"k8s.io/kops/pkg/kubemanifest"
|
||||
"k8s.io/kops/pkg/wellknownusers"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/exec"
|
||||
|
|
@ -225,7 +226,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
|
|||
{
|
||||
c.AddTask(&nodetasks.UserTask{
|
||||
Name: "aws-iam-authenticator",
|
||||
UID: 10000,
|
||||
UID: wellknownusers.AWSAuthenticator,
|
||||
Shell: "/sbin/nologin",
|
||||
Home: "/srv/kubernetes/aws-iam-authenticator",
|
||||
})
|
||||
|
|
|
|||
|
|
@ -33,8 +33,8 @@ import (
|
|||
|
||||
func TestKubeProxyBuilder_buildPod(t *testing.T) {
|
||||
// kube proxy spec can be found here.
|
||||
// https://godoc.org/k8s.io/kops/pkg/apis/kops#ClusterSpec
|
||||
// https://godoc.org/k8s.io/kops/pkg/apis/kops#KubeProxyConfig
|
||||
// https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#ClusterSpec
|
||||
// https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#KubeProxyConfig
|
||||
|
||||
var cluster = &kops.Cluster{}
|
||||
cluster.Spec.MasterInternalName = "dev-cluster"
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if !b.IsMaster {
|
||||
return nil
|
||||
}
|
||||
useConfigFile := b.IsKubernetesGTE("1.11")
|
||||
useConfigFile := b.IsKubernetesGTE("1.12")
|
||||
{
|
||||
pod, err := b.buildPod(useConfigFile)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ func (b *LogrotateBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
b.addLogRotate(c, "kubelet", "/var/log/kubelet.log", logRotateOptions{})
|
||||
b.addLogRotate(c, "etcd", "/var/log/etcd.log", logRotateOptions{})
|
||||
b.addLogRotate(c, "etcd-events", "/var/log/etcd-events.log", logRotateOptions{})
|
||||
b.addLogRotate(c, "kops-controller", "/var/log/kops-controller.log", logRotateOptions{})
|
||||
|
||||
if err := b.addLogrotateService(c); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -56,17 +56,20 @@ func (b *PackagesBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(&nodetasks.Package{Name: "libseccomp"})
|
||||
c.AddTask(&nodetasks.Package{Name: "socat"})
|
||||
c.AddTask(&nodetasks.Package{Name: "util-linux"})
|
||||
|
||||
// Handle RHEL 7 and Amazon Linux 2 differently when installing "extras"
|
||||
if b.Distribution != distros.DistributionRhel7 {
|
||||
c.AddTask(&nodetasks.Package{Name: "container-selinux"})
|
||||
c.AddTask(&nodetasks.Package{Name: "pigz"})
|
||||
} else {
|
||||
// Handle some packages differently for each distro
|
||||
switch b.Distribution {
|
||||
case distros.DistributionRhel7:
|
||||
// Easier to install container-selinux from CentOS than extras
|
||||
c.AddTask(&nodetasks.Package{
|
||||
Name: "container-selinux",
|
||||
Source: s("http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm"),
|
||||
Hash: s("7de4211fa0dfd240d8827b93763e1eb5f0d56411"),
|
||||
})
|
||||
case distros.DistributionAmazonLinux2:
|
||||
// Amazon Linux 2 doesn't have SELinux enabled by default
|
||||
default:
|
||||
c.AddTask(&nodetasks.Package{Name: "container-selinux"})
|
||||
c.AddTask(&nodetasks.Package{Name: "pigz"})
|
||||
}
|
||||
} else {
|
||||
// Hopefully they are already installed
|
||||
|
|
|
|||
|
|
@ -12,5 +12,6 @@ go_library(
|
|||
"//upup/pkg/fi/cloudup/gce:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/google.golang.org/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kops/pkg/acls"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
|
|
@ -52,6 +53,16 @@ func (s *gcsAclStrategy) GetACL(p vfs.Path, cluster *kops.Cluster) (vfs.ACL, err
|
|||
return nil, fmt.Errorf("error querying bucket %q: %v", bucketName, err)
|
||||
}
|
||||
|
||||
bucketPolicyOnly := false
|
||||
if bucket.IamConfiguration != nil && bucket.IamConfiguration.BucketPolicyOnly != nil {
|
||||
bucketPolicyOnly = bucket.IamConfiguration.BucketPolicyOnly.Enabled
|
||||
}
|
||||
|
||||
if bucketPolicyOnly {
|
||||
klog.V(2).Infof("bucket gs://%s has bucket-policy only; won't try to set ACLs", bucketName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TODO: Cache?
|
||||
cloud, err := cloudup.BuildCloud(cluster)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -136,6 +136,8 @@ type ClusterSpec struct {
|
|||
// 'external' do not apply updates automatically - they are applied manually or by an external system
|
||||
// missing: default policy (currently OS security upgrades that do not require a reboot)
|
||||
UpdatePolicy *string `json:"updatePolicy,omitempty"`
|
||||
// ExternalPolicies allows the insertion of pre-existing managed policies on IG Roles
|
||||
ExternalPolicies *map[string][]string `json:"externalPolicies,omitempty"`
|
||||
// Additional policies to add for roles
|
||||
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
|
||||
// A collection of files assets for deployed cluster wide
|
||||
|
|
@ -416,6 +418,11 @@ const (
|
|||
EtcdProviderTypeLegacy EtcdProviderType = "Legacy"
|
||||
)
|
||||
|
||||
var SupportedEtcdProviderTypes = []string{
|
||||
string(EtcdProviderTypeManager),
|
||||
string(EtcdProviderTypeLegacy),
|
||||
}
|
||||
|
||||
// EtcdClusterSpec is the etcd cluster specification
|
||||
type EtcdClusterSpec struct {
|
||||
// Name is the name of the etcd cluster (main, events etc)
|
||||
|
|
@ -643,6 +650,23 @@ func (c *Cluster) IsKubernetesGTE(version string) bool {
|
|||
return clusterVersion.GTE(*parsedVersion)
|
||||
}
|
||||
|
||||
// EnvVar represents an environment variable present in a Container.
|
||||
type EnvVar struct {
|
||||
// Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Variable references $(VAR_NAME) are expanded
|
||||
// using the previous defined environment variables in the container and
|
||||
// any service environment variables. If a variable cannot be resolved,
|
||||
// the reference in the input string will be unchanged. The $(VAR_NAME)
|
||||
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
|
||||
// references will never be expanded, regardless of whether the variable
|
||||
// exists or not.
|
||||
// Defaults to "".
|
||||
// +optional
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type GossipConfig struct {
|
||||
Protocol *string `json:"protocol,omitempty"`
|
||||
Listen *string `json:"listen,omitempty"`
|
||||
|
|
|
|||
|
|
@ -187,12 +187,12 @@ type RomanaNetworkingSpec struct {
|
|||
|
||||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5
|
||||
// The container image name to use
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
const CiliumDefaultVersion = "v1.6.4"
|
||||
const CiliumDefaultVersion = "v1.6.6"
|
||||
const CiliumIpamEni = "eni"
|
||||
|
||||
// CiliumNetworkingSpec declares that we want Cilium networking
|
||||
type CiliumNetworkingSpec struct {
|
||||
|
|
@ -200,6 +200,7 @@ type CiliumNetworkingSpec struct {
|
|||
|
||||
AccessLog string `json:"accessLog,omitempty"`
|
||||
AgentLabels []string `json:"agentLabels,omitempty"`
|
||||
AgentPrometheusPort int `json:"agentPrometheusPort,omitempty"`
|
||||
AllowLocalhost string `json:"allowLocalhost,omitempty"`
|
||||
AutoIpv6NodeRoutes bool `json:"autoIpv6NodeRoutes,omitempty"`
|
||||
BPFRoot string `json:"bpfRoot,omitempty"`
|
||||
|
|
@ -213,6 +214,7 @@ type CiliumNetworkingSpec struct {
|
|||
DisableK8sServices bool `json:"disableK8sServices,omitempty"`
|
||||
EnablePolicy string `json:"enablePolicy,omitempty"`
|
||||
EnableTracing bool `json:"enableTracing,omitempty"`
|
||||
EnablePrometheusMetrics bool `json:"enablePrometheusMetrics,omitempty"`
|
||||
EnvoyLog string `json:"envoyLog,omitempty"`
|
||||
Ipv4ClusterCIDRMaskSize int `json:"ipv4ClusterCidrMaskSize,omitempty"`
|
||||
Ipv4Node string `json:"ipv4Node,omitempty"`
|
||||
|
|
@ -260,6 +262,7 @@ type CiliumNetworkingSpec struct {
|
|||
IPTablesRulesNoinstall bool `json:"IPTablesRulesNoinstall"`
|
||||
AutoDirectNodeRoutes bool `json:"autoDirectNodeRoutes"`
|
||||
EnableNodePort bool `json:"enableNodePort"`
|
||||
Ipam string `json:"ipam,omitempty"`
|
||||
|
||||
//node init options
|
||||
RemoveCbrBridge bool `json:"removeCbrBridge"`
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ const (
|
|||
|
||||
func ConfigBase(c *api.Cluster) (vfs.Path, error) {
|
||||
if c.Spec.ConfigBase == "" {
|
||||
return nil, field.Required(field.NewPath("Spec", "ConfigBase"), "")
|
||||
return nil, field.Required(field.NewPath("spec", "configBase"), "")
|
||||
}
|
||||
configBase, err := vfs.Context.BuildVfsPath(c.Spec.ConfigBase)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,11 @@ const (
|
|||
TopologyPrivate = "private"
|
||||
)
|
||||
|
||||
var SupportedTopologies = []string{
|
||||
TopologyPublic,
|
||||
TopologyPrivate,
|
||||
}
|
||||
|
||||
type TopologySpec struct {
|
||||
// The environment to launch the Kubernetes masters in public|private
|
||||
Masters string `json:"masters,omitempty"`
|
||||
|
|
|
|||
|
|
@ -130,6 +130,8 @@ type ClusterSpec struct {
|
|||
// 'external' do not apply updates automatically - they are applied manually or by an external system
|
||||
// missing: default policy (currently OS security upgrades that do not require a reboot)
|
||||
UpdatePolicy *string `json:"updatePolicy,omitempty"`
|
||||
// ExternalPolicies allows the insertion of pre-existing managed policies on IG Roles
|
||||
ExternalPolicies *map[string][]string `json:"externalPolicies,omitempty"`
|
||||
// Additional policies to add for roles
|
||||
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
|
||||
// A collection of files assets for deployed cluster wide
|
||||
|
|
@ -527,6 +529,23 @@ func (t *TerraformSpec) IsEmpty() bool {
|
|||
return t.ProviderExtraConfig == nil
|
||||
}
|
||||
|
||||
// EnvVar represents an environment variable present in a Container.
|
||||
type EnvVar struct {
|
||||
// Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Variable references $(VAR_NAME) are expanded
|
||||
// using the previous defined environment variables in the container and
|
||||
// any service environment variables. If a variable cannot be resolved,
|
||||
// the reference in the input string will be unchanged. The $(VAR_NAME)
|
||||
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
|
||||
// references will never be expanded, regardless of whether the variable
|
||||
// exists or not.
|
||||
// Defaults to "".
|
||||
// +optional
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type GossipConfig struct {
|
||||
Protocol *string `json:"protocol,omitempty"`
|
||||
Listen *string `json:"listen,omitempty"`
|
||||
|
|
|
|||
|
|
@ -187,8 +187,7 @@ type RomanaNetworkingSpec struct {
|
|||
|
||||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5
|
||||
// The container image name to use
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -197,6 +196,7 @@ type CiliumNetworkingSpec struct {
|
|||
|
||||
AccessLog string `json:"accessLog,omitempty"`
|
||||
AgentLabels []string `json:"agentLabels,omitempty"`
|
||||
AgentPrometheusPort int `json:"agentPrometheusPort,omitempty"`
|
||||
AllowLocalhost string `json:"allowLocalhost,omitempty"`
|
||||
AutoIpv6NodeRoutes bool `json:"autoIpv6NodeRoutes,omitempty"`
|
||||
BPFRoot string `json:"bpfRoot,omitempty"`
|
||||
|
|
@ -209,6 +209,7 @@ type CiliumNetworkingSpec struct {
|
|||
DisableIpv4 bool `json:"disableIpv4,omitempty"`
|
||||
DisableK8sServices bool `json:"disableK8sServices,omitempty"`
|
||||
EnablePolicy string `json:"enablePolicy,omitempty"`
|
||||
EnablePrometheusMetrics bool `json:"enablePrometheusMetrics,omitempty"`
|
||||
EnableTracing bool `json:"enableTracing,omitempty"`
|
||||
EnvoyLog string `json:"envoyLog,omitempty"`
|
||||
Ipv4ClusterCIDRMaskSize int `json:"ipv4ClusterCidrMaskSize,omitempty"`
|
||||
|
|
@ -257,6 +258,7 @@ type CiliumNetworkingSpec struct {
|
|||
IPTablesRulesNoinstall bool `json:"IPTablesRulesNoinstall"`
|
||||
AutoDirectNodeRoutes bool `json:"autoDirectNodeRoutes"`
|
||||
EnableNodePort bool `json:"enableNodePort"`
|
||||
Ipam string `json:"ipam,omitempty"`
|
||||
|
||||
//node init options
|
||||
RemoveCbrBridge bool `json:"removeCbrBridge"`
|
||||
|
|
|
|||
|
|
@ -273,6 +273,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*EnvVar)(nil), (*kops.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_EnvVar_To_kops_EnvVar(a.(*EnvVar), b.(*kops.EnvVar), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*kops.EnvVar)(nil), (*EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_kops_EnvVar_To_v1alpha1_EnvVar(a.(*kops.EnvVar), b.(*EnvVar), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*EtcdBackupSpec)(nil), (*kops.EtcdBackupSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_EtcdBackupSpec_To_kops_EtcdBackupSpec(a.(*EtcdBackupSpec), b.(*kops.EtcdBackupSpec), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -1240,6 +1250,7 @@ func autoConvert_v1alpha1_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.Version = in.Version
|
||||
out.AccessLog = in.AccessLog
|
||||
out.AgentLabels = in.AgentLabels
|
||||
out.AgentPrometheusPort = in.AgentPrometheusPort
|
||||
out.AllowLocalhost = in.AllowLocalhost
|
||||
out.AutoIpv6NodeRoutes = in.AutoIpv6NodeRoutes
|
||||
out.BPFRoot = in.BPFRoot
|
||||
|
|
@ -1252,6 +1263,7 @@ func autoConvert_v1alpha1_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.DisableIpv4 = in.DisableIpv4
|
||||
out.DisableK8sServices = in.DisableK8sServices
|
||||
out.EnablePolicy = in.EnablePolicy
|
||||
out.EnablePrometheusMetrics = in.EnablePrometheusMetrics
|
||||
out.EnableTracing = in.EnableTracing
|
||||
out.EnvoyLog = in.EnvoyLog
|
||||
out.Ipv4ClusterCIDRMaskSize = in.Ipv4ClusterCIDRMaskSize
|
||||
|
|
@ -1299,6 +1311,7 @@ func autoConvert_v1alpha1_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.IPTablesRulesNoinstall = in.IPTablesRulesNoinstall
|
||||
out.AutoDirectNodeRoutes = in.AutoDirectNodeRoutes
|
||||
out.EnableNodePort = in.EnableNodePort
|
||||
out.Ipam = in.Ipam
|
||||
out.RemoveCbrBridge = in.RemoveCbrBridge
|
||||
out.RestartPods = in.RestartPods
|
||||
out.ReconfigureKubelet = in.ReconfigureKubelet
|
||||
|
|
@ -1316,6 +1329,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha1_CiliumNetworkingSpec(in *
|
|||
out.Version = in.Version
|
||||
out.AccessLog = in.AccessLog
|
||||
out.AgentLabels = in.AgentLabels
|
||||
out.AgentPrometheusPort = in.AgentPrometheusPort
|
||||
out.AllowLocalhost = in.AllowLocalhost
|
||||
out.AutoIpv6NodeRoutes = in.AutoIpv6NodeRoutes
|
||||
out.BPFRoot = in.BPFRoot
|
||||
|
|
@ -1329,6 +1343,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha1_CiliumNetworkingSpec(in *
|
|||
out.DisableK8sServices = in.DisableK8sServices
|
||||
out.EnablePolicy = in.EnablePolicy
|
||||
out.EnableTracing = in.EnableTracing
|
||||
out.EnablePrometheusMetrics = in.EnablePrometheusMetrics
|
||||
out.EnvoyLog = in.EnvoyLog
|
||||
out.Ipv4ClusterCIDRMaskSize = in.Ipv4ClusterCIDRMaskSize
|
||||
out.Ipv4Node = in.Ipv4Node
|
||||
|
|
@ -1375,6 +1390,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha1_CiliumNetworkingSpec(in *
|
|||
out.IPTablesRulesNoinstall = in.IPTablesRulesNoinstall
|
||||
out.AutoDirectNodeRoutes = in.AutoDirectNodeRoutes
|
||||
out.EnableNodePort = in.EnableNodePort
|
||||
out.Ipam = in.Ipam
|
||||
out.RemoveCbrBridge = in.RemoveCbrBridge
|
||||
out.RestartPods = in.RestartPods
|
||||
out.ReconfigureKubelet = in.ReconfigureKubelet
|
||||
|
|
@ -1658,6 +1674,7 @@ func autoConvert_v1alpha1_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
|
|||
// WARNING: in.AdminAccess requires manual conversion: does not exist in peer-type
|
||||
out.IsolateMasters = in.IsolateMasters
|
||||
out.UpdatePolicy = in.UpdatePolicy
|
||||
out.ExternalPolicies = in.ExternalPolicies
|
||||
out.AdditionalPolicies = in.AdditionalPolicies
|
||||
if in.FileAssets != nil {
|
||||
in, out := &in.FileAssets, &out.FileAssets
|
||||
|
|
@ -1975,6 +1992,7 @@ func autoConvert_kops_ClusterSpec_To_v1alpha1_ClusterSpec(in *kops.ClusterSpec,
|
|||
// WARNING: in.KubernetesAPIAccess requires manual conversion: does not exist in peer-type
|
||||
out.IsolateMasters = in.IsolateMasters
|
||||
out.UpdatePolicy = in.UpdatePolicy
|
||||
out.ExternalPolicies = in.ExternalPolicies
|
||||
out.AdditionalPolicies = in.AdditionalPolicies
|
||||
if in.FileAssets != nil {
|
||||
in, out := &in.FileAssets, &out.FileAssets
|
||||
|
|
@ -2415,6 +2433,28 @@ func Convert_kops_EgressProxySpec_To_v1alpha1_EgressProxySpec(in *kops.EgressPro
|
|||
return autoConvert_kops_EgressProxySpec_To_v1alpha1_EgressProxySpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_EnvVar_To_kops_EnvVar(in *EnvVar, out *kops.EnvVar, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_EnvVar_To_kops_EnvVar is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_EnvVar_To_kops_EnvVar(in *EnvVar, out *kops.EnvVar, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_EnvVar_To_kops_EnvVar(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kops_EnvVar_To_v1alpha1_EnvVar(in *kops.EnvVar, out *EnvVar, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kops_EnvVar_To_v1alpha1_EnvVar is an autogenerated conversion function.
|
||||
func Convert_kops_EnvVar_To_v1alpha1_EnvVar(in *kops.EnvVar, out *EnvVar, s conversion.Scope) error {
|
||||
return autoConvert_kops_EnvVar_To_v1alpha1_EnvVar(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_EtcdBackupSpec_To_kops_EtcdBackupSpec(in *EtcdBackupSpec, out *kops.EtcdBackupSpec, s conversion.Scope) error {
|
||||
out.BackupStore = in.BackupStore
|
||||
out.Image = in.Image
|
||||
|
|
|
|||
|
|
@ -651,6 +651,25 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExternalPolicies != nil {
|
||||
in, out := &in.ExternalPolicies, &out.ExternalPolicies
|
||||
*out = new(map[string][]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.AdditionalPolicies != nil {
|
||||
in, out := &in.AdditionalPolicies, &out.AdditionalPolicies
|
||||
*out = new(map[string]string)
|
||||
|
|
@ -1120,6 +1139,22 @@ func (in *EgressProxySpec) DeepCopy() *EgressProxySpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvVar) DeepCopyInto(out *EnvVar) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
|
||||
func (in *EnvVar) DeepCopy() *EnvVar {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvVar)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -135,6 +135,8 @@ type ClusterSpec struct {
|
|||
// 'external' do not apply updates automatically - they are applied manually or by an external system
|
||||
// missing: default policy (currently OS security upgrades that do not require a reboot)
|
||||
UpdatePolicy *string `json:"updatePolicy,omitempty"`
|
||||
// ExternalPolicies allows the insertion of pre-existing managed policies on IG Roles
|
||||
ExternalPolicies *map[string][]string `json:"externalPolicies,omitempty"`
|
||||
// Additional policies to add for roles
|
||||
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
|
||||
// A collection of files assets for deployed cluster wide
|
||||
|
|
@ -540,6 +542,23 @@ func (t *TerraformSpec) IsEmpty() bool {
|
|||
return t.ProviderExtraConfig == nil
|
||||
}
|
||||
|
||||
// EnvVar represents an environment variable present in a Container.
|
||||
type EnvVar struct {
|
||||
// Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Variable references $(VAR_NAME) are expanded
|
||||
// using the previous defined environment variables in the container and
|
||||
// any service environment variables. If a variable cannot be resolved,
|
||||
// the reference in the input string will be unchanged. The $(VAR_NAME)
|
||||
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
|
||||
// references will never be expanded, regardless of whether the variable
|
||||
// exists or not.
|
||||
// Defaults to "".
|
||||
// +optional
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type GossipConfig struct {
|
||||
Protocol *string `json:"protocol,omitempty"`
|
||||
Listen *string `json:"listen,omitempty"`
|
||||
|
|
|
|||
|
|
@ -187,8 +187,7 @@ type RomanaNetworkingSpec struct {
|
|||
|
||||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5
|
||||
// The container image name to use
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -198,6 +197,7 @@ type CiliumNetworkingSpec struct {
|
|||
|
||||
AccessLog string `json:"accessLog,omitempty"`
|
||||
AgentLabels []string `json:"agentLabels,omitempty"`
|
||||
AgentPrometheusPort int `json:"agentPrometheusPort,omitempty"`
|
||||
AllowLocalhost string `json:"allowLocalhost,omitempty"`
|
||||
AutoIpv6NodeRoutes bool `json:"autoIpv6NodeRoutes,omitempty"`
|
||||
BPFRoot string `json:"bpfRoot,omitempty"`
|
||||
|
|
@ -210,6 +210,7 @@ type CiliumNetworkingSpec struct {
|
|||
DisableIpv4 bool `json:"disableIpv4,omitempty"`
|
||||
DisableK8sServices bool `json:"disableK8sServices,omitempty"`
|
||||
EnablePolicy string `json:"enablePolicy,omitempty"`
|
||||
EnablePrometheusMetrics bool `json:"enablePrometheusMetrics,omitempty"`
|
||||
EnableTracing bool `json:"enableTracing,omitempty"`
|
||||
EnvoyLog string `json:"envoyLog,omitempty"`
|
||||
Ipv4ClusterCIDRMaskSize int `json:"ipv4ClusterCidrMaskSize,omitempty"`
|
||||
|
|
@ -258,6 +259,7 @@ type CiliumNetworkingSpec struct {
|
|||
IPTablesRulesNoinstall bool `json:"IPTablesRulesNoinstall"`
|
||||
AutoDirectNodeRoutes bool `json:"autoDirectNodeRoutes"`
|
||||
EnableNodePort bool `json:"enableNodePort"`
|
||||
Ipam string `json:"ipam,omitempty"`
|
||||
|
||||
//node init options
|
||||
RemoveCbrBridge bool `json:"removeCbrBridge"`
|
||||
|
|
|
|||
|
|
@ -293,6 +293,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*EnvVar)(nil), (*kops.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_EnvVar_To_kops_EnvVar(a.(*EnvVar), b.(*kops.EnvVar), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*kops.EnvVar)(nil), (*EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_kops_EnvVar_To_v1alpha2_EnvVar(a.(*kops.EnvVar), b.(*EnvVar), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*EtcdBackupSpec)(nil), (*kops.EtcdBackupSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_EtcdBackupSpec_To_kops_EtcdBackupSpec(a.(*EtcdBackupSpec), b.(*kops.EtcdBackupSpec), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -1282,6 +1292,7 @@ func autoConvert_v1alpha2_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.Version = in.Version
|
||||
out.AccessLog = in.AccessLog
|
||||
out.AgentLabels = in.AgentLabels
|
||||
out.AgentPrometheusPort = in.AgentPrometheusPort
|
||||
out.AllowLocalhost = in.AllowLocalhost
|
||||
out.AutoIpv6NodeRoutes = in.AutoIpv6NodeRoutes
|
||||
out.BPFRoot = in.BPFRoot
|
||||
|
|
@ -1294,6 +1305,7 @@ func autoConvert_v1alpha2_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.DisableIpv4 = in.DisableIpv4
|
||||
out.DisableK8sServices = in.DisableK8sServices
|
||||
out.EnablePolicy = in.EnablePolicy
|
||||
out.EnablePrometheusMetrics = in.EnablePrometheusMetrics
|
||||
out.EnableTracing = in.EnableTracing
|
||||
out.EnvoyLog = in.EnvoyLog
|
||||
out.Ipv4ClusterCIDRMaskSize = in.Ipv4ClusterCIDRMaskSize
|
||||
|
|
@ -1341,6 +1353,7 @@ func autoConvert_v1alpha2_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(in *
|
|||
out.IPTablesRulesNoinstall = in.IPTablesRulesNoinstall
|
||||
out.AutoDirectNodeRoutes = in.AutoDirectNodeRoutes
|
||||
out.EnableNodePort = in.EnableNodePort
|
||||
out.Ipam = in.Ipam
|
||||
out.RemoveCbrBridge = in.RemoveCbrBridge
|
||||
out.RestartPods = in.RestartPods
|
||||
out.ReconfigureKubelet = in.ReconfigureKubelet
|
||||
|
|
@ -1358,6 +1371,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha2_CiliumNetworkingSpec(in *
|
|||
out.Version = in.Version
|
||||
out.AccessLog = in.AccessLog
|
||||
out.AgentLabels = in.AgentLabels
|
||||
out.AgentPrometheusPort = in.AgentPrometheusPort
|
||||
out.AllowLocalhost = in.AllowLocalhost
|
||||
out.AutoIpv6NodeRoutes = in.AutoIpv6NodeRoutes
|
||||
out.BPFRoot = in.BPFRoot
|
||||
|
|
@ -1371,6 +1385,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha2_CiliumNetworkingSpec(in *
|
|||
out.DisableK8sServices = in.DisableK8sServices
|
||||
out.EnablePolicy = in.EnablePolicy
|
||||
out.EnableTracing = in.EnableTracing
|
||||
out.EnablePrometheusMetrics = in.EnablePrometheusMetrics
|
||||
out.EnvoyLog = in.EnvoyLog
|
||||
out.Ipv4ClusterCIDRMaskSize = in.Ipv4ClusterCIDRMaskSize
|
||||
out.Ipv4Node = in.Ipv4Node
|
||||
|
|
@ -1417,6 +1432,7 @@ func autoConvert_kops_CiliumNetworkingSpec_To_v1alpha2_CiliumNetworkingSpec(in *
|
|||
out.IPTablesRulesNoinstall = in.IPTablesRulesNoinstall
|
||||
out.AutoDirectNodeRoutes = in.AutoDirectNodeRoutes
|
||||
out.EnableNodePort = in.EnableNodePort
|
||||
out.Ipam = in.Ipam
|
||||
out.RemoveCbrBridge = in.RemoveCbrBridge
|
||||
out.RestartPods = in.RestartPods
|
||||
out.ReconfigureKubelet = in.ReconfigureKubelet
|
||||
|
|
@ -1721,6 +1737,7 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
|
|||
out.KubernetesAPIAccess = in.KubernetesAPIAccess
|
||||
out.IsolateMasters = in.IsolateMasters
|
||||
out.UpdatePolicy = in.UpdatePolicy
|
||||
out.ExternalPolicies = in.ExternalPolicies
|
||||
out.AdditionalPolicies = in.AdditionalPolicies
|
||||
if in.FileAssets != nil {
|
||||
in, out := &in.FileAssets, &out.FileAssets
|
||||
|
|
@ -2043,6 +2060,7 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
|
|||
out.KubernetesAPIAccess = in.KubernetesAPIAccess
|
||||
out.IsolateMasters = in.IsolateMasters
|
||||
out.UpdatePolicy = in.UpdatePolicy
|
||||
out.ExternalPolicies = in.ExternalPolicies
|
||||
out.AdditionalPolicies = in.AdditionalPolicies
|
||||
if in.FileAssets != nil {
|
||||
in, out := &in.FileAssets, &out.FileAssets
|
||||
|
|
@ -2522,6 +2540,28 @@ func Convert_kops_EgressProxySpec_To_v1alpha2_EgressProxySpec(in *kops.EgressPro
|
|||
return autoConvert_kops_EgressProxySpec_To_v1alpha2_EgressProxySpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_EnvVar_To_kops_EnvVar(in *EnvVar, out *kops.EnvVar, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_EnvVar_To_kops_EnvVar is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_EnvVar_To_kops_EnvVar(in *EnvVar, out *kops.EnvVar, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_EnvVar_To_kops_EnvVar(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kops_EnvVar_To_v1alpha2_EnvVar(in *kops.EnvVar, out *EnvVar, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kops_EnvVar_To_v1alpha2_EnvVar is an autogenerated conversion function.
|
||||
func Convert_kops_EnvVar_To_v1alpha2_EnvVar(in *kops.EnvVar, out *EnvVar, s conversion.Scope) error {
|
||||
return autoConvert_kops_EnvVar_To_v1alpha2_EnvVar(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_EtcdBackupSpec_To_kops_EtcdBackupSpec(in *EtcdBackupSpec, out *kops.EtcdBackupSpec, s conversion.Scope) error {
|
||||
out.BackupStore = in.BackupStore
|
||||
out.Image = in.Image
|
||||
|
|
|
|||
|
|
@ -634,6 +634,25 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExternalPolicies != nil {
|
||||
in, out := &in.ExternalPolicies, &out.ExternalPolicies
|
||||
*out = new(map[string][]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.AdditionalPolicies != nil {
|
||||
in, out := &in.AdditionalPolicies, &out.AdditionalPolicies
|
||||
*out = new(map[string]string)
|
||||
|
|
@ -1093,6 +1112,22 @@ func (in *EgressProxySpec) DeepCopy() *EgressProxySpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvVar) DeepCopyInto(out *EnvVar) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
|
||||
func (in *EnvVar) DeepCopy() *EnvVar {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvVar)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func awsValidateAdditionalSecurityGroups(fieldPath *field.Path, groups []string)
|
|||
names := sets.NewString()
|
||||
for i, s := range groups {
|
||||
if names.Has(s) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Index(i), s, "security groups with duplicate name found"))
|
||||
allErrs = append(allErrs, field.Duplicate(fieldPath.Index(i), s))
|
||||
}
|
||||
names.Insert(s)
|
||||
if strings.TrimSpace(s) == "" {
|
||||
|
|
|
|||
|
|
@ -23,11 +23,7 @@ import (
|
|||
)
|
||||
|
||||
func ValidateClusterUpdate(obj *kops.Cluster, status *kops.ClusterStatus, old *kops.Cluster) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := ValidateCluster(obj, false); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
allErrs := ValidateCluster(obj, false)
|
||||
|
||||
// Validate etcd cluster changes
|
||||
{
|
||||
|
|
@ -64,7 +60,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if obj.Name != old.Name {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("Name"), "Name cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("name"), "name cannot be changed"))
|
||||
}
|
||||
|
||||
var etcdClusterStatus *kops.EtcdClusterStatus
|
||||
|
|
@ -89,7 +85,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
}
|
||||
|
||||
for k, newMember := range newMembers {
|
||||
fp := fp.Child("Members").Key(k)
|
||||
fp := fp.Child("etcdMembers").Key(k)
|
||||
|
||||
oldMember := oldMembers[k]
|
||||
if oldMember == nil {
|
||||
|
|
@ -101,7 +97,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
for k := range oldMembers {
|
||||
newCluster := newMembers[k]
|
||||
if newCluster == nil {
|
||||
fp := fp.Child("Members").Key(k)
|
||||
fp := fp.Child("etcdMembers").Key(k)
|
||||
allErrs = append(allErrs, field.Forbidden(fp, "EtcdCluster members cannot be removed"))
|
||||
}
|
||||
}
|
||||
|
|
@ -114,31 +110,31 @@ func validateEtcdMemberUpdate(fp *field.Path, obj *kops.EtcdMemberSpec, status *
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if obj.Name != old.Name {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("Name"), "Name cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("name"), "name cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.InstanceGroup) != fi.StringValue(old.InstanceGroup) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("InstanceGroup"), "InstanceGroup cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("instanceGroup"), "instanceGroup cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.VolumeType) != fi.StringValue(old.VolumeType) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeType"), "VolumeType cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeType"), "volumeType cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.Int32Value(obj.VolumeIops) != fi.Int32Value(old.VolumeIops) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeIops"), "VolumeIops cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeIops"), "volumeIops cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.Int32Value(obj.VolumeSize) != fi.Int32Value(old.VolumeSize) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeSize"), "VolumeSize cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeSize"), "volumeSize cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.KmsKeyId) != fi.StringValue(old.KmsKeyId) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("KmsKeyId"), "KmsKeyId cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("kmsKeyId"), "kmsKeyId cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.BoolValue(obj.EncryptedVolume) != fi.BoolValue(old.EncryptedVolume) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("EncryptedVolume"), "EncryptedVolume cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("encryptedVolume"), "encryptedVolume cannot be changed"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
|
|||
|
|
@ -17,9 +17,6 @@ limitations under the License.
|
|||
package validation
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
)
|
||||
|
|
@ -29,22 +26,22 @@ func gceValidateCluster(c *kops.Cluster) field.ErrorList {
|
|||
|
||||
fieldSpec := field.NewPath("spec")
|
||||
|
||||
regions := sets.NewString()
|
||||
region := ""
|
||||
for i, subnet := range c.Spec.Subnets {
|
||||
f := fieldSpec.Child("Subnets").Index(i)
|
||||
f := fieldSpec.Child("subnets").Index(i)
|
||||
if subnet.Zone != "" {
|
||||
allErrs = append(allErrs, field.Invalid(f.Child("Zone"), subnet.Zone, "zones should not be specified for GCE subnets, as GCE subnets are regional"))
|
||||
allErrs = append(allErrs, field.Invalid(f.Child("zone"), subnet.Zone, "zones should not be specified for GCE subnets, as GCE subnets are regional"))
|
||||
}
|
||||
if subnet.Region == "" {
|
||||
allErrs = append(allErrs, field.Required(f.Child("Region"), "region must be specified for GCE subnets"))
|
||||
allErrs = append(allErrs, field.Required(f.Child("region"), "region must be specified for GCE subnets"))
|
||||
} else {
|
||||
regions.Insert(subnet.Region)
|
||||
if region == "" {
|
||||
region = subnet.Region
|
||||
} else if region != subnet.Region {
|
||||
allErrs = append(allErrs, field.Forbidden(f.Child("region"), "clusters cannot span GCE regions"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(regions) > 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Subnets"), strings.Join(regions.List(), ","), "clusters cannot span GCE regions"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
|
|
@ -29,84 +28,74 @@ import (
|
|||
)
|
||||
|
||||
// ValidateInstanceGroup is responsible for validating the configuration of a instancegroup
|
||||
func ValidateInstanceGroup(g *kops.InstanceGroup) error {
|
||||
func ValidateInstanceGroup(g *kops.InstanceGroup) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if g.ObjectMeta.Name == "" {
|
||||
return field.Required(field.NewPath("Name"), "")
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), ""))
|
||||
}
|
||||
|
||||
switch g.Spec.Role {
|
||||
case "":
|
||||
return field.Required(field.NewPath("Role"), "Role must be set")
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "role"), "Role must be set"))
|
||||
case kops.InstanceGroupRoleMaster:
|
||||
if len(g.Spec.Subnets) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "subnets"), "master InstanceGroup must specify at least one Subnet"))
|
||||
}
|
||||
case kops.InstanceGroupRoleNode:
|
||||
case kops.InstanceGroupRoleBastion:
|
||||
default:
|
||||
return field.Invalid(field.NewPath("Role"), g.Spec.Role, "Unknown role")
|
||||
var supported []string
|
||||
for _, role := range kops.AllInstanceGroupRoles {
|
||||
supported = append(supported, string(role))
|
||||
}
|
||||
allErrs = append(allErrs, field.NotSupported(field.NewPath("spec", "role"), g.Spec.Role, supported))
|
||||
}
|
||||
|
||||
if g.Spec.Tenancy != "" {
|
||||
if g.Spec.Tenancy != "default" && g.Spec.Tenancy != "dedicated" && g.Spec.Tenancy != "host" {
|
||||
return field.Invalid(field.NewPath("Tenancy"), g.Spec.Tenancy, "Unknown tenancy. Must be Default, Dedicated or Host.")
|
||||
allErrs = append(allErrs, field.NotSupported(field.NewPath("spec", "tenancy"), g.Spec.Tenancy, []string{"default", "dedicated", "host"}))
|
||||
}
|
||||
}
|
||||
|
||||
if g.Spec.MaxSize != nil && g.Spec.MinSize != nil {
|
||||
if *g.Spec.MaxSize < *g.Spec.MinSize {
|
||||
return field.Invalid(field.NewPath("MaxSize"), *g.Spec.MaxSize, "maxSize must be greater than or equal to minSize.")
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "maxSize"), "maxSize must be greater than or equal to minSize."))
|
||||
}
|
||||
}
|
||||
|
||||
if fi.Int32Value(g.Spec.RootVolumeIops) < 0 {
|
||||
return field.Invalid(field.NewPath("RootVolumeIops"), g.Spec.RootVolumeIops, "RootVolumeIops must be greater than 0")
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "rootVolumeIops"), g.Spec.RootVolumeIops, "RootVolumeIops must be greater than 0"))
|
||||
}
|
||||
|
||||
// @check all the hooks are valid in this instancegroup
|
||||
for i := range g.Spec.Hooks {
|
||||
if errs := validateHookSpec(&g.Spec.Hooks[i], field.NewPath("hooks").Index(i)); len(errs) > 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
allErrs = append(allErrs, validateHookSpec(&g.Spec.Hooks[i], field.NewPath("spec", "hooks").Index(i))...)
|
||||
}
|
||||
|
||||
// @check the fileAssets for this instancegroup are valid
|
||||
for i := range g.Spec.FileAssets {
|
||||
if errs := validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath("fileAssets").Index(i)); len(errs) > 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
}
|
||||
|
||||
if g.IsMaster() {
|
||||
if len(g.Spec.Subnets) == 0 {
|
||||
return fmt.Errorf("master InstanceGroup %s did not specify any Subnets", g.ObjectMeta.Name)
|
||||
}
|
||||
allErrs = append(allErrs, validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath("spec", "fileAssets").Index(i))...)
|
||||
}
|
||||
|
||||
if g.Spec.MixedInstancesPolicy != nil {
|
||||
if errs := validatedMixedInstancesPolicy(field.NewPath(g.Name), g.Spec.MixedInstancesPolicy, g); len(errs) > 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
allErrs = append(allErrs, validatedMixedInstancesPolicy(field.NewPath("spec", "mixedInstancesPolicy"), g.Spec.MixedInstancesPolicy, g)...)
|
||||
}
|
||||
|
||||
if len(g.Spec.AdditionalUserData) > 0 {
|
||||
for _, UserDataInfo := range g.Spec.AdditionalUserData {
|
||||
err := validateExtraUserData(&UserDataInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, UserDataInfo := range g.Spec.AdditionalUserData {
|
||||
allErrs = append(allErrs, validateExtraUserData(&UserDataInfo)...)
|
||||
}
|
||||
|
||||
// @step: iterate and check the volume specs
|
||||
for i, x := range g.Spec.Volumes {
|
||||
devices := make(map[string]bool)
|
||||
path := field.NewPath("volumes").Index(i)
|
||||
path := field.NewPath("spec", "volumes").Index(i)
|
||||
|
||||
if err := validateVolumeSpec(path, x); err != nil {
|
||||
return err
|
||||
}
|
||||
allErrs = append(allErrs, validateVolumeSpec(path, x)...)
|
||||
|
||||
// @check the device name has not been used already
|
||||
if _, found := devices[x.Device]; found {
|
||||
return field.Invalid(path.Child("device"), x.Device, "duplicate device name found in volumes")
|
||||
allErrs = append(allErrs, field.Duplicate(path.Child("device"), x.Device))
|
||||
}
|
||||
|
||||
devices[x.Device] = true
|
||||
|
|
@ -115,30 +104,24 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) error {
|
|||
// @step: iterate and check the volume mount specs
|
||||
for i, x := range g.Spec.VolumeMounts {
|
||||
used := make(map[string]bool)
|
||||
path := field.NewPath("volumeMounts").Index(i)
|
||||
path := field.NewPath("spec", "volumeMounts").Index(i)
|
||||
|
||||
if err := validateVolumeMountSpec(path, x); err != nil {
|
||||
return err
|
||||
}
|
||||
allErrs = append(allErrs, validateVolumeMountSpec(path, x)...)
|
||||
if _, found := used[x.Device]; found {
|
||||
return field.Invalid(path.Child("device"), x.Device, "duplicate device reference")
|
||||
allErrs = append(allErrs, field.Duplicate(path.Child("device"), x.Device))
|
||||
}
|
||||
if _, found := used[x.Path]; found {
|
||||
return field.Invalid(path.Child("path"), x.Path, "duplicate mount path specified")
|
||||
allErrs = append(allErrs, field.Duplicate(path.Child("path"), x.Path))
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateInstanceProfile(g.Spec.IAM, field.NewPath("iam")); err != nil {
|
||||
return err
|
||||
}
|
||||
allErrs = append(allErrs, validateInstanceProfile(g.Spec.IAM, field.NewPath("spec", "iam"))...)
|
||||
|
||||
if g.Spec.RollingUpdate != nil {
|
||||
if errs := validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("rollingUpdate")); len(errs) > 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
allErrs = append(allErrs, validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("spec", "rollingUpdate"))...)
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatedMixedInstancesPolicy is responsible for validating the user input of a mixed instance policy
|
||||
|
|
@ -172,109 +155,110 @@ func validatedMixedInstancesPolicy(path *field.Path, spec *kops.MixedInstancesPo
|
|||
}
|
||||
|
||||
if spec.SpotAllocationStrategy != nil && !slice.Contains(kops.SpotAllocationStrategies, fi.StringValue(spec.SpotAllocationStrategy)) {
|
||||
errs = append(errs, field.Invalid(path.Child("spotAllocationStrategy"), spec.SpotAllocationStrategy, "unsupported spot allocation strategy"))
|
||||
errs = append(errs, field.NotSupported(path.Child("spotAllocationStrategy"), spec.SpotAllocationStrategy, kops.SpotAllocationStrategies))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// validateVolumeSpec is responsible for checking a volume spec is ok
|
||||
func validateVolumeSpec(path *field.Path, v *kops.VolumeSpec) error {
|
||||
func validateVolumeSpec(path *field.Path, v *kops.VolumeSpec) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Device == "" {
|
||||
return field.Required(path.Child("device"), "device name required")
|
||||
allErrs = append(allErrs, field.Required(path.Child("device"), "device name required"))
|
||||
}
|
||||
if v.Size <= 0 {
|
||||
return field.Invalid(path.Child("size"), v.Size, "must be greater than zero")
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("size"), v.Size, "must be greater than zero"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateVolumeMountSpec is responsible for checking the volume mount is ok
|
||||
func validateVolumeMountSpec(path *field.Path, spec *kops.VolumeMountSpec) error {
|
||||
func validateVolumeMountSpec(path *field.Path, spec *kops.VolumeMountSpec) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if spec.Device == "" {
|
||||
return field.Required(path.Child("device"), "device name required")
|
||||
allErrs = append(allErrs, field.Required(path.Child("device"), "device name required"))
|
||||
}
|
||||
if spec.Filesystem == "" {
|
||||
return field.Required(path.Child("filesystem"), "filesystem type required")
|
||||
allErrs = append(allErrs, field.Required(path.Child("filesystem"), "filesystem type required"))
|
||||
}
|
||||
if spec.Path == "" {
|
||||
return field.Required(path.Child("path"), "mount path required")
|
||||
allErrs = append(allErrs, field.Required(path.Child("path"), "mount path required"))
|
||||
}
|
||||
if !slice.Contains(kops.SupportedFilesystems, spec.Filesystem) {
|
||||
return field.Invalid(path.Child("filesystem"), spec.Filesystem,
|
||||
fmt.Sprintf("unsupported filesystem, available types: %s", strings.Join(kops.SupportedFilesystems, ",")))
|
||||
allErrs = append(allErrs, field.NotSupported(path.Child("filesystem"), spec.Filesystem, kops.SupportedFilesystems))
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// CrossValidateInstanceGroup performs validation of the instance group, including that it is consistent with the Cluster
|
||||
// It calls ValidateInstanceGroup, so all that validation is included.
|
||||
func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, strict bool) error {
|
||||
err := ValidateInstanceGroup(g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, strict bool) field.ErrorList {
|
||||
allErrs := ValidateInstanceGroup(g)
|
||||
|
||||
// Check that instance groups are defined in subnets that are defined in the cluster
|
||||
{
|
||||
clusterSubnets := make(map[string]*kops.ClusterSubnetSpec)
|
||||
for i := range cluster.Spec.Subnets {
|
||||
s := &cluster.Spec.Subnets[i]
|
||||
if clusterSubnets[s.Name] != nil {
|
||||
return fmt.Errorf("subnets contained a duplicate value: %v", s.Name)
|
||||
}
|
||||
clusterSubnets[s.Name] = s
|
||||
}
|
||||
|
||||
for _, z := range g.Spec.Subnets {
|
||||
for i, z := range g.Spec.Subnets {
|
||||
if clusterSubnets[z] == nil {
|
||||
return fmt.Errorf("InstanceGroup %q is configured in %q, but this is not configured as a Subnet in the cluster", g.ObjectMeta.Name, z)
|
||||
allErrs = append(allErrs, field.NotFound(field.NewPath("spec", "subnets").Index(i), z))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateExtraUserData(userData *kops.UserData) error {
|
||||
fieldPath := field.NewPath("AdditionalUserData")
|
||||
var validUserDataTypes = []string{
|
||||
"text/x-include-once-url",
|
||||
"text/x-include-url",
|
||||
"text/cloud-config-archive",
|
||||
"text/upstart-job",
|
||||
"text/cloud-config",
|
||||
"text/part-handler",
|
||||
"text/x-shellscript",
|
||||
"text/cloud-boothook",
|
||||
}
|
||||
|
||||
func validateExtraUserData(userData *kops.UserData) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
fieldPath := field.NewPath("additionalUserData")
|
||||
|
||||
if userData.Name == "" {
|
||||
return field.Required(fieldPath.Child("Name"), "field must be set")
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "field must be set"))
|
||||
}
|
||||
|
||||
if userData.Content == "" {
|
||||
return field.Required(fieldPath.Child("Content"), "field must be set")
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("content"), "field must be set"))
|
||||
}
|
||||
|
||||
switch userData.Type {
|
||||
case "text/x-include-once-url":
|
||||
case "text/x-include-url":
|
||||
case "text/cloud-config-archive":
|
||||
case "text/upstart-job":
|
||||
case "text/cloud-config":
|
||||
case "text/part-handler":
|
||||
case "text/x-shellscript":
|
||||
case "text/cloud-boothook":
|
||||
|
||||
default:
|
||||
return field.Invalid(fieldPath.Child("Type"), userData.Type, "Invalid user-data content type")
|
||||
if !slice.Contains(validUserDataTypes, userData.Type) {
|
||||
allErrs = append(allErrs, field.NotSupported(fieldPath.Child("type"), userData.Type, validUserDataTypes))
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateInstanceProfile checks the String values for the AuthProfile
|
||||
func validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) *field.Error {
|
||||
func validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if v != nil && v.Profile != nil {
|
||||
instanceProfileARN := *v.Profile
|
||||
parsedARN, err := arn.Parse(instanceProfileARN)
|
||||
if err != nil || !strings.HasPrefix(parsedARN.Resource, "instance-profile") {
|
||||
return field.Invalid(fldPath.Child("Profile"), instanceProfileARN,
|
||||
"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole")
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("profile"), instanceProfileARN,
|
||||
"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole"))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,24 +57,20 @@ func TestValidateInstanceProfile(t *testing.T) {
|
|||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("42"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedErrors: []string{"Invalid value::iam.profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_A/Developers"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedErrors: []string{"Invalid value::iam.profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range grid {
|
||||
err := validateInstanceProfile(g.Input, field.NewPath("IAMProfile"))
|
||||
allErrs := field.ErrorList{}
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
allErrs := validateInstanceProfile(g.Input, field.NewPath("iam"))
|
||||
testErrors(t, g.Input, allErrs, g.ExpectedErrors)
|
||||
|
||||
if g.ExpectedDetail != "" {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package validation
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
|
@ -37,67 +36,68 @@ import (
|
|||
// legacy contains validation functions that don't match the apimachinery style
|
||||
|
||||
// ValidateCluster is responsible for checking the validity of the Cluster spec
|
||||
func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
||||
func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
||||
fieldSpec := field.NewPath("spec")
|
||||
var err error
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// kubernetesRelease is the version with only major & minor fields
|
||||
var kubernetesRelease semver.Version
|
||||
// We initialize to an arbitrary value, preferably in the supported range,
|
||||
// in case the value in c.Spec.KubernetesVersion is blank or unparseable.
|
||||
kubernetesRelease := semver.Version{Major: 1, Minor: 15}
|
||||
|
||||
// KubernetesVersion
|
||||
if c.Spec.KubernetesVersion == "" {
|
||||
return field.Required(fieldSpec.Child("KubernetesVersion"), "")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubernetesVersion"), ""))
|
||||
} else {
|
||||
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version"))
|
||||
} else {
|
||||
kubernetesRelease = semver.Version{Major: sv.Major, Minor: sv.Minor}
|
||||
}
|
||||
}
|
||||
|
||||
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("KubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version")
|
||||
}
|
||||
kubernetesRelease = semver.Version{Major: sv.Major, Minor: sv.Minor}
|
||||
|
||||
if c.ObjectMeta.Name == "" {
|
||||
return field.Required(field.NewPath("Name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)")
|
||||
}
|
||||
|
||||
{
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)"))
|
||||
} else {
|
||||
// Must be a dns name
|
||||
errs := validation.IsDNS1123Subdomain(c.ObjectMeta.Name)
|
||||
if len(errs) != 0 {
|
||||
return field.Invalid(field.NewPath("Name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", ")))
|
||||
}
|
||||
|
||||
if !strings.Contains(c.ObjectMeta.Name, ".") {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))))
|
||||
} else if !strings.Contains(c.ObjectMeta.Name, ".") {
|
||||
// Tolerate if this is a cluster we are importing for upgrade
|
||||
if c.ObjectMeta.Annotations[kops.AnnotationNameManagement] != kops.AnnotationValueManagementImported {
|
||||
return field.Invalid(field.NewPath("Name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)")
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Assets != nil && c.Spec.Assets.ContainerProxy != nil && c.Spec.Assets.ContainerRegistry != nil {
|
||||
return field.Forbidden(fieldSpec.Child("Assets", "ContainerProxy"), "ContainerProxy cannot be used in conjunction with ContainerRegistry as represent mutually exclusive concepts. Please consult the documentation for details.")
|
||||
}
|
||||
|
||||
if c.Spec.CloudProvider == "" {
|
||||
return field.Required(fieldSpec.Child("CloudProvider"), "")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("Assets", "ContainerProxy"), "ContainerProxy cannot be used in conjunction with ContainerRegistry as represent mutually exclusive concepts. Please consult the documentation for details."))
|
||||
}
|
||||
|
||||
requiresSubnets := true
|
||||
requiresNetworkCIDR := true
|
||||
requiresSubnetCIDR := true
|
||||
switch kops.CloudProviderID(c.Spec.CloudProvider) {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("cloudProvider"), ""))
|
||||
requiresSubnets = false
|
||||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
|
||||
case kops.CloudProviderBareMetal:
|
||||
requiresSubnets = false
|
||||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
return field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on bare metal")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on bare metal"))
|
||||
}
|
||||
|
||||
case kops.CloudProviderGCE:
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
return field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on GCE")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on GCE"))
|
||||
}
|
||||
requiresSubnetCIDR = false
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
return field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on DigitalOcean")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on DigitalOcean"))
|
||||
}
|
||||
case kops.CloudProviderALI:
|
||||
requiresSubnets = false
|
||||
|
|
@ -119,50 +119,59 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
requiresSubnetCIDR = false
|
||||
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("CloudProvider"), c.Spec.CloudProvider, "CloudProvider not recognized")
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("cloudProvider"), c.Spec.CloudProvider, []string{
|
||||
string(kops.CloudProviderBareMetal),
|
||||
string(kops.CloudProviderGCE),
|
||||
string(kops.CloudProviderDO),
|
||||
string(kops.CloudProviderALI),
|
||||
string(kops.CloudProviderAWS),
|
||||
string(kops.CloudProviderVSphere),
|
||||
string(kops.CloudProviderOpenstack),
|
||||
}))
|
||||
}
|
||||
|
||||
if requiresSubnets && len(c.Spec.Subnets) == 0 {
|
||||
// TODO: Auto choose zones from region?
|
||||
return field.Required(fieldSpec.Child("Subnets"), "must configure at least one Subnet (use --zones)")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("subnets"), "must configure at least one subnet (use --zones)"))
|
||||
}
|
||||
|
||||
if strict && c.Spec.Kubelet == nil {
|
||||
return field.Required(fieldSpec.Child("Kubelet"), "Kubelet not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubelet"), "kubelet not configured"))
|
||||
}
|
||||
if strict && c.Spec.MasterKubelet == nil {
|
||||
return field.Required(fieldSpec.Child("MasterKubelet"), "MasterKubelet not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("masterKubelet"), "masterKubelet not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeControllerManager == nil {
|
||||
return field.Required(fieldSpec.Child("KubeControllerManager"), "KubeControllerManager not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeControllerManager"), "kubeControllerManager not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeDNS == nil {
|
||||
return field.Required(fieldSpec.Child("KubeDNS"), "KubeDNS not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeDNS"), "kubeDNS not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeScheduler == nil {
|
||||
return field.Required(fieldSpec.Child("KubeScheduler"), "KubeScheduler not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeScheduler"), "kubeScheduler not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeAPIServer == nil {
|
||||
return field.Required(fieldSpec.Child("KubeAPIServer"), "KubeAPIServer not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "kubeAPIServer not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeProxy == nil {
|
||||
return field.Required(fieldSpec.Child("KubeProxy"), "KubeProxy not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeProxy"), "kubeProxy not configured"))
|
||||
}
|
||||
if strict && c.Spec.Docker == nil {
|
||||
return field.Required(fieldSpec.Child("Docker"), "Docker not configured")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("docker"), "docker not configured"))
|
||||
}
|
||||
|
||||
// Check NetworkCIDR
|
||||
var networkCIDR *net.IPNet
|
||||
var err error
|
||||
{
|
||||
if c.Spec.NetworkCIDR == "" {
|
||||
if requiresNetworkCIDR {
|
||||
return field.Required(fieldSpec.Child("NetworkCIDR"), "Cluster did not have NetworkCIDR set")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("networkCIDR"), "Cluster did not have networkCIDR set"))
|
||||
}
|
||||
} else {
|
||||
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid NetworkCIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid networkCIDR")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -174,7 +183,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
for _, AdditionalNetworkCIDR := range c.Spec.AdditionalNetworkCIDRs {
|
||||
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("AdditionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid AdditionalNetworkCIDRs"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("additionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid additionalNetworkCIDRs")))
|
||||
}
|
||||
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
|
||||
}
|
||||
|
|
@ -195,27 +204,28 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
nonMasqueradeCIDRString := c.Spec.NonMasqueradeCIDR
|
||||
if nonMasqueradeCIDRString == "" {
|
||||
if nonMasqueradeCIDRRequired {
|
||||
return field.Required(fieldSpec.Child("NonMasqueradeCIDR"), "Cluster did not have NonMasqueradeCIDR set")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
|
||||
}
|
||||
} else {
|
||||
_, nonMasqueradeCIDR, err = net.ParseCIDR(nonMasqueradeCIDRString)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid NonMasqueradeCIDR")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
|
||||
}
|
||||
|
||||
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.LyftVPC == nil {
|
||||
|
||||
return field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, fmt.Sprintf("NonMasqueradeCIDR %q cannot overlap with NetworkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR))
|
||||
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.LyftVPC == nil && (c.Spec.Networking.Cilium == nil || c.Spec.Networking.Cilium.Ipam != kops.CiliumIpamEni) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
|
||||
// TODO Remove the Spec.Kubelet.NonMasqueradeCIDR field?
|
||||
if strict || c.Spec.Kubelet.NonMasqueradeCIDR != "" {
|
||||
return field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "Kubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
|
||||
}
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
|
||||
// TODO remove the Spec.MasterKubelet.NonMasqueradeCIDR field?
|
||||
if strict || c.Spec.MasterKubelet.NonMasqueradeCIDR != "" {
|
||||
return field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "MasterKubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "nonMasqueradeCIDR"), "masterKubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -227,21 +237,21 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
serviceClusterIPRangeString := c.Spec.ServiceClusterIPRange
|
||||
if serviceClusterIPRangeString == "" {
|
||||
if strict {
|
||||
return field.Required(fieldSpec.Child("ServiceClusterIPRange"), "Cluster did not have ServiceClusterIPRange set")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
|
||||
}
|
||||
} else {
|
||||
_, serviceClusterIPRange, err = net.ParseCIDR(serviceClusterIPRangeString)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid ServiceClusterIPRange")
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
|
||||
} else {
|
||||
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("serviceClusterIPRange"), fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
|
||||
}
|
||||
|
||||
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
|
||||
return field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, fmt.Sprintf("ServiceClusterIPRange %q must be a subnet of NonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR))
|
||||
}
|
||||
|
||||
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
|
||||
if strict || c.Spec.KubeAPIServer.ServiceClusterIPRange != "" {
|
||||
return field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, "KubeAPIServer ServiceClusterIPRange did not match cluster ServiceClusterIPRange")
|
||||
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
|
||||
if strict || c.Spec.KubeAPIServer.ServiceClusterIPRange != "" {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "serviceClusterIPRange"), "kubeAPIServer serviceClusterIPRange did not match cluster serviceClusterIPRange"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -253,21 +263,21 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
switch action {
|
||||
case "", "ACCEPT", "DROP", "RETURN":
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("Networking", "Canal", "DefaultEndpointToHostAction"), action, fmt.Sprintf("Unsupported value: %s, supports 'ACCEPT', 'DROP' or 'RETURN'", action))
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("networking", "canal", "defaultEndpointToHostAction"), action, []string{"ACCEPT", "DROP", "RETURN"}))
|
||||
}
|
||||
|
||||
chainInsertMode := c.Spec.Networking.Canal.ChainInsertMode
|
||||
switch chainInsertMode {
|
||||
case "", "insert", "append":
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("Networking", "Canal", "ChainInsertMode"), chainInsertMode, fmt.Sprintf("Unsupported value: %s, supports 'insert' or 'append'", chainInsertMode))
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("networking", "canal", "chainInsertMode"), chainInsertMode, []string{"insert", "append"}))
|
||||
}
|
||||
|
||||
logSeveritySys := c.Spec.Networking.Canal.LogSeveritySys
|
||||
switch logSeveritySys {
|
||||
case "", "INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL", "NONE":
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("Networking", "Canal", "LogSeveritySys"), logSeveritySys, fmt.Sprintf("Unsupported value: %s, supports 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL' or 'NONE'", logSeveritySys))
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("networking", "canal", "logSeveritySys"), logSeveritySys, []string{"INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL", "NONE"}))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -278,11 +288,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if clusterCIDRString != "" {
|
||||
_, clusterCIDR, err = net.ParseCIDR(clusterCIDRString)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSpec.Child("KubeControllerManager", "ClusterCIDR"), clusterCIDRString, "Cluster had an invalid KubeControllerManager.ClusterCIDR")
|
||||
}
|
||||
|
||||
if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
|
||||
return field.Invalid(fieldSpec.Child("KubeControllerManager", "ClusterCIDR"), clusterCIDRString, fmt.Sprintf("KubeControllerManager.ClusterCIDR %q must be a subnet of NonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, "cluster had an invalid kubeControllerManager.clusterCIDR"))
|
||||
} else if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -293,17 +301,18 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
address := c.Spec.KubeDNS.ServerIP
|
||||
ip := net.ParseIP(address)
|
||||
if ip == nil {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Cluster had an invalid kubeDNS.serverIP")
|
||||
}
|
||||
if serviceClusterIPRange != nil && !serviceClusterIPRange.Contains(ip) {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, address))
|
||||
}
|
||||
if !featureflag.ExperimentalClusterDNS.Enabled() {
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Kubelet ClusterDNS did not match cluster kubeDNS.serverIP")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Cluster had an invalid kubeDNS.serverIP"))
|
||||
} else {
|
||||
if serviceClusterIPRange != nil && !serviceClusterIPRange.Contains(ip) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, address)))
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "MasterKubelet ClusterDNS did not match cluster kubeDNS.serverIP")
|
||||
if !featureflag.ExperimentalClusterDNS.Enabled() {
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "Kubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "MasterKubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -311,20 +320,18 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
// @check the nameservers are valid
|
||||
for i, x := range c.Spec.KubeDNS.UpstreamNameservers {
|
||||
if ip := net.ParseIP(x); ip == nil {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "upstreamNameservers").Index(i), x, "Invalid nameserver given, should be a valid ip address")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "upstreamNameservers").Index(i), x, "Invalid nameserver given, should be a valid ip address"))
|
||||
}
|
||||
}
|
||||
|
||||
// @check the stubdomain if any
|
||||
if c.Spec.KubeDNS.StubDomains != nil {
|
||||
for domain, nameservers := range c.Spec.KubeDNS.StubDomains {
|
||||
if len(nameservers) <= 0 {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain), domain, "No nameservers specified for the stub domain")
|
||||
}
|
||||
for i, x := range nameservers {
|
||||
if ip := net.ParseIP(x); ip == nil {
|
||||
return field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain).Index(i), x, "Invalid nameserver given, should be a valid ip address")
|
||||
}
|
||||
for domain, nameservers := range c.Spec.KubeDNS.StubDomains {
|
||||
if len(nameservers) <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain), domain, "No nameservers specified for the stub domain"))
|
||||
}
|
||||
for i, x := range nameservers {
|
||||
if ip := net.ParseIP(x); ip == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain).Index(i), x, "Invalid nameserver given, should be a valid ip address"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -350,28 +357,30 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
case kops.CloudProviderALI:
|
||||
k8sCloudProvider = "alicloud"
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("CloudProvider"), c.Spec.CloudProvider, "unknown cloudprovider")
|
||||
// We already added an error above
|
||||
k8sCloudProvider = "ignore"
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
|
||||
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("Kubelet", "CloudProvider"), c.Spec.Kubelet.CloudProvider, "Did not match cluster CloudProvider")
|
||||
if k8sCloudProvider != "ignore" {
|
||||
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
|
||||
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
|
||||
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("MasterKubelet", "CloudProvider"), c.Spec.MasterKubelet.CloudProvider, "Did not match cluster CloudProvider")
|
||||
|
||||
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
|
||||
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
|
||||
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("KubeAPIServer", "CloudProvider"), c.Spec.KubeAPIServer.CloudProvider, "Did not match cluster CloudProvider")
|
||||
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
|
||||
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "cloudProvider"), "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
|
||||
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("KubeControllerManager", "CloudProvider"), c.Spec.KubeControllerManager.CloudProvider, "Did not match cluster CloudProvider")
|
||||
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
|
||||
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "cloudProvider"), "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -379,19 +388,17 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
// Check that the subnet CIDRs are all consistent
|
||||
{
|
||||
for i, s := range c.Spec.Subnets {
|
||||
fieldSubnet := fieldSpec.Child("Subnets").Index(i)
|
||||
fieldSubnet := fieldSpec.Child("subnets").Index(i)
|
||||
if s.CIDR == "" {
|
||||
if requiresSubnetCIDR && strict {
|
||||
return field.Required(fieldSubnet.Child("CIDR"), "Subnet did not have a CIDR set")
|
||||
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
|
||||
}
|
||||
} else {
|
||||
_, subnetCIDR, err := net.ParseCIDR(s.CIDR)
|
||||
if err != nil {
|
||||
return field.Invalid(fieldSubnet.Child("CIDR"), s.CIDR, "Subnet had an invalid CIDR")
|
||||
}
|
||||
|
||||
if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
|
||||
return field.Invalid(fieldSubnet.Child("CIDR"), s.CIDR, fmt.Sprintf("Subnet %q had a CIDR %q that was not a subnet of the NetworkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, "subnet had an invalid cidr"))
|
||||
} else if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("cidr"), fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -401,34 +408,30 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if c.Spec.NodeAuthorization != nil {
|
||||
// @check the feature gate is enabled for this
|
||||
if !featureflag.EnableNodeAuthorization.Enabled() {
|
||||
return field.Invalid(field.NewPath("nodeAuthorization"), nil, "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`")
|
||||
}
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer == nil {
|
||||
return field.Invalid(field.NewPath("nodeAuthorization"), nil, "no node authorization policy has been set")
|
||||
}
|
||||
// NodeAuthorizer
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer != nil {
|
||||
path := field.NewPath("nodeAuthorization").Child("nodeAuthorizer")
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.Port < 0 || c.Spec.NodeAuthorization.NodeAuthorizer.Port >= 65535 {
|
||||
return field.Invalid(path.Child("port"), c.Spec.NodeAuthorization.NodeAuthorizer.Port, "invalid port")
|
||||
}
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.Timeout != nil && c.Spec.NodeAuthorization.NodeAuthorizer.Timeout.Duration <= 0 {
|
||||
return field.Invalid(path.Child("timeout"), c.Spec.NodeAuthorization.NodeAuthorizer.Timeout, "must be greater than zero")
|
||||
}
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL != nil && c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL.Duration < 0 {
|
||||
return field.Invalid(path.Child("tokenTTL"), c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL, "must be greater than or equal to zero")
|
||||
}
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`"))
|
||||
} else {
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer == nil {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "no node authorization policy has been set"))
|
||||
} else {
|
||||
path := field.NewPath("spec", "nodeAuthorization").Child("nodeAuthorizer")
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.Port < 0 || c.Spec.NodeAuthorization.NodeAuthorizer.Port >= 65535 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("port"), c.Spec.NodeAuthorization.NodeAuthorizer.Port, "invalid port"))
|
||||
}
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.Timeout != nil && c.Spec.NodeAuthorization.NodeAuthorizer.Timeout.Duration <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("timeout"), c.Spec.NodeAuthorization.NodeAuthorizer.Timeout, "must be greater than zero"))
|
||||
}
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL != nil && c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL.Duration < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("tokenTTL"), c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL, "must be greater than or equal to zero"))
|
||||
}
|
||||
|
||||
// @question: we could probably just default these settings in the model when the node-authorizer is enabled??
|
||||
if c.Spec.KubeAPIServer == nil {
|
||||
return field.Invalid(field.NewPath("kubeAPIServer"), c.Spec.KubeAPIServer, "bootstrap token authentication is not enabled in the kube-apiserver")
|
||||
}
|
||||
if c.Spec.KubeAPIServer.EnableBootstrapAuthToken == nil {
|
||||
return field.Invalid(field.NewPath("kubeAPIServer").Child("enableBootstrapAuthToken"), nil, "kube-apiserver has not been configured to use bootstrap tokens")
|
||||
}
|
||||
if !fi.BoolValue(c.Spec.KubeAPIServer.EnableBootstrapAuthToken) {
|
||||
return field.Invalid(field.NewPath("kubeAPIServer").Child("enableBootstrapAuthToken"),
|
||||
c.Spec.KubeAPIServer.EnableBootstrapAuthToken, "bootstrap tokens in the kube-apiserver has been disabled")
|
||||
// @question: we could probably just default these settings in the model when the node-authorizer is enabled??
|
||||
if c.Spec.KubeAPIServer == nil {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer"), "bootstrap token authentication is not enabled in the kube-apiserver"))
|
||||
} else if c.Spec.KubeAPIServer.EnableBootstrapAuthToken == nil {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "kube-apiserver has not been configured to use bootstrap tokens"))
|
||||
} else if !fi.BoolValue(c.Spec.KubeAPIServer.EnableBootstrapAuthToken) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "bootstrap tokens in the kube-apiserver has been disabled"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -439,23 +442,23 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
case kops.UpdatePolicyExternal:
|
||||
// Valid
|
||||
default:
|
||||
return field.Invalid(fieldSpec.Child("UpdatePolicy"), *c.Spec.UpdatePolicy, "unrecognized value for UpdatePolicy")
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("updatePolicy"), *c.Spec.UpdatePolicy, []string{kops.UpdatePolicyExternal}))
|
||||
}
|
||||
}
|
||||
|
||||
// KubeProxy
|
||||
if c.Spec.KubeProxy != nil {
|
||||
kubeProxyPath := fieldSpec.Child("KubeProxy")
|
||||
kubeProxyPath := fieldSpec.Child("kubeProxy")
|
||||
master := c.Spec.KubeProxy.Master
|
||||
|
||||
for i, x := range c.Spec.KubeProxy.IPVSExcludeCIDRS {
|
||||
if _, _, err := net.ParseCIDR(x); err != nil {
|
||||
return field.Invalid(kubeProxyPath.Child("ipvsExcludeCIDRS").Index(i), x, "Invalid network CIDR")
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("ipvsExcludeCidrs").Index(i), x, "Invalid network CIDR"))
|
||||
}
|
||||
}
|
||||
|
||||
if master != "" && !isValidAPIServersURL(master) {
|
||||
return field.Invalid(kubeProxyPath.Child("Master"), master, "Not a valid APIServer URL")
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("master"), master, "Not a valid APIServer URL"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -464,9 +467,8 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if kubernetesRelease.GTE(semver.MustParse("1.10.0")) {
|
||||
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
|
||||
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
|
||||
return field.Invalid(fieldSpec.Child("KubeAPIServer").Child("DisableAdmissionPlugins"),
|
||||
strings.Join(c.Spec.KubeAPIServer.DisableAdmissionPlugins, ","),
|
||||
"DisableAdmissionPlugins is mutually exclusive, you cannot use both AdmissionControl and DisableAdmissionPlugins together")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "disableAdmissionPlugins"),
|
||||
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -474,65 +476,65 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
|
||||
// Kubelet
|
||||
if c.Spec.Kubelet != nil {
|
||||
kubeletPath := fieldSpec.Child("Kubelet")
|
||||
kubeletPath := fieldSpec.Child("kubelet")
|
||||
|
||||
{
|
||||
// Flag removed in 1.6
|
||||
if c.Spec.Kubelet.APIServers != "" {
|
||||
return field.Invalid(
|
||||
kubeletPath.Child("APIServers"),
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
kubeletPath.Child("apiServers"),
|
||||
c.Spec.Kubelet.APIServers,
|
||||
"api-servers flag was removed in 1.6")
|
||||
"api-servers flag was removed in 1.6"))
|
||||
}
|
||||
}
|
||||
|
||||
if kubernetesRelease.GTE(semver.MustParse("1.10.0")) {
|
||||
// Flag removed in 1.10
|
||||
if c.Spec.Kubelet.RequireKubeconfig != nil {
|
||||
return field.Invalid(
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
kubeletPath.Child("requireKubeconfig"),
|
||||
*c.Spec.Kubelet.RequireKubeconfig,
|
||||
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)")
|
||||
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet.BootstrapKubeconfig != "" {
|
||||
if c.Spec.KubeAPIServer == nil {
|
||||
return field.Required(fieldSpec.Child("KubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet.APIServers != "" && !isValidAPIServersURL(c.Spec.Kubelet.APIServers) {
|
||||
return field.Invalid(kubeletPath.Child("APIServers"), c.Spec.Kubelet.APIServers, "Not a valid APIServer URL")
|
||||
allErrs = append(allErrs, field.Invalid(kubeletPath.Child("apiServers"), c.Spec.Kubelet.APIServers, "Not a valid apiServer URL"))
|
||||
}
|
||||
}
|
||||
|
||||
// MasterKubelet
|
||||
if c.Spec.MasterKubelet != nil {
|
||||
masterKubeletPath := fieldSpec.Child("MasterKubelet")
|
||||
masterKubeletPath := fieldSpec.Child("masterKubelet")
|
||||
|
||||
{
|
||||
// Flag removed in 1.6
|
||||
if c.Spec.MasterKubelet.APIServers != "" {
|
||||
return field.Invalid(
|
||||
masterKubeletPath.Child("APIServers"),
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
masterKubeletPath.Child("apiServers"),
|
||||
c.Spec.MasterKubelet.APIServers,
|
||||
"api-servers flag was removed in 1.6")
|
||||
"api-servers flag was removed in 1.6"))
|
||||
}
|
||||
}
|
||||
|
||||
if kubernetesRelease.GTE(semver.MustParse("1.10.0")) {
|
||||
// Flag removed in 1.10
|
||||
if c.Spec.MasterKubelet.RequireKubeconfig != nil {
|
||||
return field.Invalid(
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
masterKubeletPath.Child("requireKubeconfig"),
|
||||
*c.Spec.MasterKubelet.RequireKubeconfig,
|
||||
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)")
|
||||
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.MasterKubelet.APIServers != "" && !isValidAPIServersURL(c.Spec.MasterKubelet.APIServers) {
|
||||
return field.Invalid(masterKubeletPath.Child("APIServers"), c.Spec.MasterKubelet.APIServers, "Not a valid APIServer URL")
|
||||
allErrs = append(allErrs, field.Invalid(masterKubeletPath.Child("apiServers"), c.Spec.MasterKubelet.APIServers, "Not a valid apiServers URL"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -540,24 +542,25 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if c.Spec.Topology != nil {
|
||||
if c.Spec.Topology.Masters != "" && c.Spec.Topology.Nodes != "" {
|
||||
if c.Spec.Topology.Masters != kops.TopologyPublic && c.Spec.Topology.Masters != kops.TopologyPrivate {
|
||||
return field.Invalid(fieldSpec.Child("Topology", "Masters"), c.Spec.Topology.Masters, "Invalid Masters value for Topology")
|
||||
} else if c.Spec.Topology.Nodes != kops.TopologyPublic && c.Spec.Topology.Nodes != kops.TopologyPrivate {
|
||||
return field.Invalid(fieldSpec.Child("Topology", "Nodes"), c.Spec.Topology.Nodes, "Invalid Nodes value for Topology")
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("topology", "masters"), c.Spec.Topology.Masters, kops.SupportedTopologies))
|
||||
}
|
||||
if c.Spec.Topology.Nodes != kops.TopologyPublic && c.Spec.Topology.Nodes != kops.TopologyPrivate {
|
||||
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("topology", "nodes"), c.Spec.Topology.Nodes, kops.SupportedTopologies))
|
||||
}
|
||||
|
||||
} else {
|
||||
return field.Required(fieldSpec.Child("Masters"), "Topology requires non-nil values for Masters and Nodes")
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("masters"), "topology requires non-nil values for masters and nodes"))
|
||||
}
|
||||
if c.Spec.Topology.Bastion != nil {
|
||||
bastion := c.Spec.Topology.Bastion
|
||||
if c.Spec.Topology.Masters == kops.TopologyPublic || c.Spec.Topology.Nodes == kops.TopologyPublic {
|
||||
return field.Invalid(fieldSpec.Child("Topology", "Masters"), c.Spec.Topology.Masters, "Bastion supports only Private Masters and Nodes")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("topology", "bastion"), "bastion requires masters and nodes to have private topology"))
|
||||
}
|
||||
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds <= 0 {
|
||||
return field.Invalid(fieldSpec.Child("Topology", "Bastion", "IdleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "Bastion IdleTimeoutSeconds should be greater than zero")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds should be greater than zero"))
|
||||
}
|
||||
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds > 3600 {
|
||||
return field.Invalid(fieldSpec.Child("Topology", "Bastion", "IdleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "Bastion IdleTimeoutSeconds cannot be greater than one hour")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds cannot be greater than one hour"))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -568,52 +571,63 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if s.Egress == "" {
|
||||
continue
|
||||
}
|
||||
fieldSubnet := fieldSpec.Child("Subnets").Index(i)
|
||||
fieldSubnet := fieldSpec.Child("subnets").Index(i)
|
||||
if !strings.HasPrefix(s.Egress, "nat-") && !strings.HasPrefix(s.Egress, "i-") && s.Egress != kops.EgressExternal {
|
||||
return field.Invalid(fieldSubnet.Child("Egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
|
||||
}
|
||||
if s.Egress != kops.EgressExternal && s.Type != "Private" {
|
||||
return field.Invalid(fieldSubnet.Child("Egress"), s.Egress, "egress can only be specified for Private subnets")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("egress"), "egress can only be specified for private subnets"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Etcd
|
||||
{
|
||||
fieldEtcdClusters := fieldSpec.Child("EtcdClusters")
|
||||
fieldEtcdClusters := fieldSpec.Child("etcdClusters")
|
||||
|
||||
if len(c.Spec.EtcdClusters) == 0 {
|
||||
return field.Required(fieldEtcdClusters, "")
|
||||
}
|
||||
for i, x := range c.Spec.EtcdClusters {
|
||||
if err := validateEtcdClusterSpecLegacy(x, fieldEtcdClusters.Index(i)); err != nil {
|
||||
return err
|
||||
allErrs = append(allErrs, field.Required(fieldEtcdClusters, ""))
|
||||
} else {
|
||||
for i, x := range c.Spec.EtcdClusters {
|
||||
allErrs = append(allErrs, validateEtcdClusterSpecLegacy(x, fieldEtcdClusters.Index(i))...)
|
||||
}
|
||||
}
|
||||
if err := validateEtcdTLS(c.Spec.EtcdClusters, fieldEtcdClusters); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateEtcdStorage(c.Spec.EtcdClusters, fieldEtcdClusters); err != nil {
|
||||
return err
|
||||
allErrs = append(allErrs, validateEtcdTLS(c.Spec.EtcdClusters, fieldEtcdClusters)...)
|
||||
allErrs = append(allErrs, validateEtcdStorage(c.Spec.EtcdClusters, fieldEtcdClusters)...)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if c.Spec.Networking != nil && c.Spec.Networking.Classic != nil {
|
||||
return field.Invalid(fieldSpec.Child("Networking"), "classic", "classic networking is not supported with kubernetes versions 1.4 and later")
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking"), "classic", "classic networking is not supported"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Networking != nil && (c.Spec.Networking.AmazonVPC != nil || c.Spec.Networking.LyftVPC != nil) &&
|
||||
c.Spec.CloudProvider != "aws" {
|
||||
return field.Invalid(fieldSpec.Child("Networking"), "amazon-vpc-routed-eni", "amazon-vpc-routed-eni networking is supported only in AWS")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking"), "amazon-vpc-routed-eni networking is supported only in AWS"))
|
||||
}
|
||||
|
||||
if errs := newValidateCluster(c); len(errs) != 0 {
|
||||
return errs[0]
|
||||
allErrs = append(allErrs, newValidateCluster(c)...)
|
||||
|
||||
if c.Spec.Networking != nil && c.Spec.Networking.Cilium != nil {
|
||||
ciliumSpec := c.Spec.Networking.Cilium
|
||||
|
||||
if ciliumSpec.EnableNodePort && c.Spec.KubeProxy != nil && *c.Spec.KubeProxy.Enabled {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeProxy", "enabled"), "When Cilium NodePort is enabled, kubeProxy must be disabled"))
|
||||
}
|
||||
|
||||
if ciliumSpec.Ipam == kops.CiliumIpamEni {
|
||||
if c.Spec.CloudProvider != string(kops.CloudProviderAWS) {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("cilium", "ipam"), "Cilum ENI IPAM is supported only in AWS"))
|
||||
}
|
||||
if !ciliumSpec.DisableMasquerade {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("cilium", "disableMasquerade"), "Masquerade must be disabled when ENI IPAM is used"))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateSubnetCIDR is responsible for validating subnets are part of the CIDRs assigned to the cluster.
|
||||
|
|
@ -632,61 +646,59 @@ func validateSubnetCIDR(networkCIDR *net.IPNet, additionalNetworkCIDRs []*net.IP
|
|||
}
|
||||
|
||||
// validateEtcdClusterSpecLegacy is responsible for validating the etcd cluster spec
|
||||
func validateEtcdClusterSpecLegacy(spec *kops.EtcdClusterSpec, fieldPath *field.Path) *field.Error {
|
||||
func validateEtcdClusterSpecLegacy(spec *kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if spec.Name == "" {
|
||||
return field.Required(fieldPath.Child("Name"), "EtcdCluster did not have name")
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdCluster did not have name"))
|
||||
}
|
||||
if len(spec.Members) == 0 {
|
||||
return field.Required(fieldPath.Child("Members"), "No members defined in etcd cluster")
|
||||
}
|
||||
if (len(spec.Members) % 2) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("members"), "No members defined in etcd cluster"))
|
||||
} else if (len(spec.Members) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
return field.Invalid(fieldPath.Child("Members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately")
|
||||
}
|
||||
if err := validateEtcdVersion(spec, fieldPath, nil); err != nil {
|
||||
return err
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately"))
|
||||
}
|
||||
allErrs = append(allErrs, validateEtcdVersion(spec, fieldPath, nil)...)
|
||||
for _, m := range spec.Members {
|
||||
if err := validateEtcdMemberSpec(m, fieldPath); err != nil {
|
||||
return err
|
||||
}
|
||||
allErrs = append(allErrs, validateEtcdMemberSpec(m, fieldPath)...)
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateEtcdTLS checks the TLS settings for etcd are valid
|
||||
func validateEtcdTLS(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) *field.Error {
|
||||
func validateEtcdTLS(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
var usingTLS int
|
||||
for _, x := range specs {
|
||||
if x.EnableEtcdTLS {
|
||||
usingTLS++
|
||||
}
|
||||
}
|
||||
// check both clusters are using tls if one us enabled
|
||||
// check both clusters are using tls if one is enabled
|
||||
if usingTLS > 0 && usingTLS != len(specs) {
|
||||
return field.Invalid(fieldPath.Index(0).Child("EnableEtcdTLS"), false, "Both etcd clusters must have TLS enabled or none at all")
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(0).Child("enableEtcdTLS"), "both etcd clusters must have TLS enabled or none at all"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateEtcdStorage is responsible for checks version are identical
|
||||
func validateEtcdStorage(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) *field.Error {
|
||||
// validateEtcdStorage is responsible for checking versions are identical.
|
||||
func validateEtcdStorage(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
version := specs[0].Version
|
||||
for i, x := range specs {
|
||||
if x.Version != "" && x.Version != version {
|
||||
return field.Invalid(fieldPath.Index(i).Child("Version"), x.Version, fmt.Sprintf("cluster: %q, has a different storage versions: %q, both must be the same", x.Name, x.Version))
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(i).Child("version"), fmt.Sprintf("cluster: %q, has a different storage version: %q, both must be the same", x.Name, x.Version)))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateEtcdVersion is responsible for validating the storage version of etcd
|
||||
// @TODO semvar package doesn't appear to ignore a 'v' in v1.1.1 should could be a problem later down the line
|
||||
func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, minimalVersion *semver.Version) *field.Error {
|
||||
// @check if the storage is specified, that's is valid
|
||||
// @TODO semvar package doesn't appear to ignore a 'v' in v1.1.1; could be a problem later down the line
|
||||
func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, minimalVersion *semver.Version) field.ErrorList {
|
||||
// @check if the storage is specified that it's valid
|
||||
|
||||
if minimalVersion == nil {
|
||||
v := semver.MustParse("0.0.0")
|
||||
|
|
@ -700,37 +712,38 @@ func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, mini
|
|||
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
if err != nil {
|
||||
return field.Invalid(fieldPath.Child("Version"), version, "the storage version is invalid")
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "the storage version is invalid")}
|
||||
}
|
||||
|
||||
// we only support v3 and v2 for now
|
||||
if sem.Major == 3 || sem.Major == 2 {
|
||||
if sem.LT(*minimalVersion) {
|
||||
return field.Invalid(fieldPath.Child("Version"), version, fmt.Sprintf("minimal version required is %s", minimalVersion.String()))
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, fmt.Sprintf("minimum version required is %s", minimalVersion.String()))}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return field.Invalid(fieldPath.Child("Version"), version, "unsupported storage version, we only support major versions 2 and 3")
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major versions 2 and 3")}
|
||||
}
|
||||
|
||||
// validateEtcdMemberSpec is responsible for validate the cluster member
|
||||
func validateEtcdMemberSpec(spec *kops.EtcdMemberSpec, fieldPath *field.Path) *field.Error {
|
||||
func validateEtcdMemberSpec(spec *kops.EtcdMemberSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if spec.Name == "" {
|
||||
return field.Required(fieldPath.Child("Name"), "EtcdMember did not have Name")
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdMember did not have name"))
|
||||
}
|
||||
|
||||
if fi.StringValue(spec.InstanceGroup) == "" {
|
||||
return field.Required(fieldPath.Child("InstanceGroup"), "EtcdMember did not have InstanceGroup")
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("instanceGroup"), "etcdMember did not have instanceGroup"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// DeepValidate is responsible for validating the instancegroups within the cluster spec
|
||||
func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) error {
|
||||
if err := ValidateCluster(c, strict); err != nil {
|
||||
return err
|
||||
if errs := ValidateCluster(c, strict); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
|
||||
if len(groups) == 0 {
|
||||
|
|
@ -756,24 +769,22 @@ func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) er
|
|||
}
|
||||
|
||||
for _, g := range groups {
|
||||
err := CrossValidateInstanceGroup(g, c, strict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := CrossValidateInstanceGroup(g, c, strict)
|
||||
|
||||
// Additional cloud-specific validation rules,
|
||||
// such as making sure that identifiers match the expected formats for the given cloud
|
||||
switch kops.CloudProviderID(c.Spec.CloudProvider) {
|
||||
case kops.CloudProviderAWS:
|
||||
errs := awsValidateInstanceGroup(g)
|
||||
if len(errs) != 0 {
|
||||
return errs[0]
|
||||
}
|
||||
errs = append(errs, awsValidateInstanceGroup(g)...)
|
||||
default:
|
||||
if len(g.Spec.Volumes) > 0 {
|
||||
return errors.New("instancegroup volumes are only available with aws at present")
|
||||
errs = append(errs, field.Forbidden(field.NewPath("spec", "volumes"), "instancegroup volumes are only available with aws at present"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
|
|||
if spec.Networking != nil {
|
||||
allErrs = append(allErrs, validateNetworking(spec, spec.Networking, fieldPath.Child("networking"))...)
|
||||
if spec.Networking.Calico != nil {
|
||||
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking").Child("Calico"))...)
|
||||
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking", "calico"))...)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -169,23 +169,20 @@ func validateSubnets(subnets []kops.ClusterSubnetSpec, fieldPath *field.Path) fi
|
|||
for i := range subnets {
|
||||
name := subnets[i].Name
|
||||
if names.Has(name) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, subnets, fmt.Sprintf("subnets with duplicate name %q found", name)))
|
||||
allErrs = append(allErrs, field.Duplicate(fieldPath.Index(i).Child("name"), name))
|
||||
}
|
||||
names.Insert(name)
|
||||
}
|
||||
}
|
||||
|
||||
// cannot mix subnets with specified ID and without specified id
|
||||
{
|
||||
hasID := 0
|
||||
if len(subnets) > 0 {
|
||||
hasID := subnets[0].ProviderID != ""
|
||||
for i := range subnets {
|
||||
if subnets[i].ProviderID != "" {
|
||||
hasID++
|
||||
if (subnets[i].ProviderID != "") != hasID {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(i).Child("id"), "cannot mix subnets with specified ID and unspecified ID"))
|
||||
}
|
||||
}
|
||||
if hasID != 0 && hasID != len(subnets) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, subnets, "cannot mix subnets with specified ID and unspecified ID"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -196,12 +193,12 @@ func validateSubnet(subnet *kops.ClusterSubnetSpec, fieldPath *field.Path) field
|
|||
|
||||
// name is required
|
||||
if subnet.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), ""))
|
||||
}
|
||||
|
||||
// CIDR
|
||||
if subnet.CIDR != "" {
|
||||
allErrs = append(allErrs, validateCIDR(subnet.CIDR, fieldPath.Child("CIDR"))...)
|
||||
allErrs = append(allErrs, validateCIDR(subnet.CIDR, fieldPath.Child("cidr"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -212,10 +209,10 @@ func validateFileAssetSpec(v *kops.FileAssetSpec, fieldPath *field.Path) field.E
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), ""))
|
||||
}
|
||||
if v.Content == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Content"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("content"), ""))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -250,7 +247,7 @@ func validateHookSpec(v *kops.HookSpec, fieldPath *field.Path) field.ErrorList {
|
|||
}
|
||||
|
||||
if v.ExecContainer != nil {
|
||||
allErrs = append(allErrs, validateExecContainerAction(v.ExecContainer, fieldPath.Child("ExecContainer"))...)
|
||||
allErrs = append(allErrs, validateExecContainerAction(v.ExecContainer, fieldPath.Child("execContainer"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -260,7 +257,7 @@ func validateExecContainerAction(v *kops.ExecContainerAction, fldPath *field.Pat
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Image == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("Image"), "Image must be specified"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("image"), "image must be specified"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -273,22 +270,20 @@ func validateKubeAPIServer(v *kops.KubeAPIServerConfig, fldPath *field.Path) fie
|
|||
proxyClientKeyIsNil := v.ProxyClientKeyFile == nil
|
||||
|
||||
if (proxyClientCertIsNil && !proxyClientKeyIsNil) || (!proxyClientCertIsNil && proxyClientKeyIsNil) {
|
||||
flds := [2]*string{v.ProxyClientCertFile, v.ProxyClientKeyFile}
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, flds, "ProxyClientCertFile and ProxyClientKeyFile must both be specified (or not all)"))
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "proxyClientCertFile and proxyClientKeyFile must both be specified (or neither)"))
|
||||
}
|
||||
|
||||
if v.ServiceNodePortRange != "" {
|
||||
pr := &utilnet.PortRange{}
|
||||
err := pr.Set(v.ServiceNodePortRange)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, v.ServiceNodePortRange, err.Error()))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNodePortRange"), v.ServiceNodePortRange, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
if v.AuthorizationMode != nil && strings.Contains(*v.AuthorizationMode, "Webhook") {
|
||||
if v.AuthorizationWebhookConfigFile == nil {
|
||||
flds := [2]*string{v.AuthorizationMode, v.AuthorizationWebhookConfigFile}
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, flds, "Authorization mode Webhook requires AuthorizationWebhookConfigFile to be specified"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("authorizationWebhookConfigFile"), "Authorization mode Webhook requires authorizationWebhookConfigFile to be specified"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -299,7 +294,7 @@ func validateNetworking(c *kops.ClusterSpec, v *kops.NetworkingSpec, fldPath *fi
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Flannel != nil {
|
||||
allErrs = append(allErrs, validateNetworkingFlannel(v.Flannel, fldPath.Child("Flannel"))...)
|
||||
allErrs = append(allErrs, validateNetworkingFlannel(v.Flannel, fldPath.Child("flannel"))...)
|
||||
}
|
||||
|
||||
if v.GCE != nil {
|
||||
|
|
@ -314,11 +309,11 @@ func validateNetworkingFlannel(v *kops.FlannelNetworkingSpec, fldPath *field.Pat
|
|||
|
||||
switch v.Backend {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("Backend"), "Flannel backend must be specified"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("backend"), "Flannel backend must be specified"))
|
||||
case "udp", "vxlan":
|
||||
// OK
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("Backend"), v.Backend, []string{"udp", "vxlan"}))
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("backend"), v.Backend, []string{"udp", "vxlan"}))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -328,7 +323,7 @@ func validateNetworkingGCE(c *kops.ClusterSpec, v *kops.GCENetworkingSpec, fldPa
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if c.CloudProvider != "gce" {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, "gce", "gce networking is supported only when on GCP"))
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "gce networking is supported only when on GCP"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -343,8 +338,7 @@ func validateAdditionalPolicy(role string, policy string, fldPath *field.Path) f
|
|||
valid.Insert(k)
|
||||
}
|
||||
if !valid.Has(role) {
|
||||
message := fmt.Sprintf("role is not known (valid values: %s)", strings.Join(valid.List(), ","))
|
||||
errs = append(errs, field.Invalid(fldPath, role, message))
|
||||
errs = append(errs, field.NotSupported(fldPath, role, valid.List()))
|
||||
}
|
||||
|
||||
statements, err := iam.ParseStatements(policy)
|
||||
|
|
@ -363,7 +357,7 @@ func validateAdditionalPolicy(role string, policy string, fldPath *field.Path) f
|
|||
errs = append(errs, field.Required(fldEffect, "Effect must be specified for IAM policy"))
|
||||
|
||||
default:
|
||||
errs = append(errs, field.Invalid(fldEffect, statement.Effect, "Effect must be 'Allow' or 'Deny'"))
|
||||
errs = append(errs, field.NotSupported(fldEffect, statement.Effect, []string{"Allow", "Deny"}))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -383,7 +377,7 @@ func validateEtcdClusterSpec(spec *kops.EtcdClusterSpec, fieldPath *field.Path)
|
|||
// blank means that the user accepts the recommendation
|
||||
|
||||
default:
|
||||
errs = append(errs, field.Invalid(fieldPath.Child("provider"), spec.Provider, "Provider must be Manager or Legacy"))
|
||||
errs = append(errs, field.NotSupported(fieldPath.Child("provider"), spec.Provider, kops.SupportedEtcdProviderTypes))
|
||||
}
|
||||
|
||||
return errs
|
||||
|
|
@ -398,18 +392,18 @@ func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string
|
|||
}
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("failed to parse Etcd version to check compatibility: %s", err)))
|
||||
allErrs = append(allErrs, field.InternalError(fldPath.Child("majorVersion"), fmt.Errorf("failed to parse Etcd version to check compatibility: %s", err)))
|
||||
}
|
||||
|
||||
if sem.Major != 3 {
|
||||
if e.Version == "" {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
field.Forbidden(fldPath.Child("majorVersion"),
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is default(%s)",
|
||||
e.Name, components.DefaultEtcd2Version)))
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
field.Forbidden(fldPath.Child("majorVersion"),
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is %s", e.Name, e.Version)))
|
||||
}
|
||||
}
|
||||
|
|
@ -418,11 +412,9 @@ func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string
|
|||
|
||||
func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if v.TyphaReplicas >= 0 {
|
||||
|
||||
} else {
|
||||
if v.TyphaReplicas < 0 {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("TyphaReplicas"), v.TyphaReplicas,
|
||||
field.Invalid(fldPath.Child("typhaReplicas"), v.TyphaReplicas,
|
||||
fmt.Sprintf("Unable to set number of Typha replicas to less than 0, you've specified %d", v.TyphaReplicas)))
|
||||
}
|
||||
switch v.MajorVersion {
|
||||
|
|
@ -431,7 +423,7 @@ func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterS
|
|||
case "v3":
|
||||
allErrs = append(allErrs, ValidateEtcdVersionForCalicoV3(e, v.MajorVersion, fldPath)...)
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("MajorVersion"), v.MajorVersion, []string{"v3"}))
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("majorVersion"), v.MajorVersion, []string{"v3"}))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
@ -451,11 +443,11 @@ func validateRollingUpdate(rollingUpdate *kops.RollingUpdate, fldpath *field.Pat
|
|||
if rollingUpdate.MaxUnavailable != nil {
|
||||
unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, 1, false)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable,
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable,
|
||||
fmt.Sprintf("Unable to parse: %v", err)))
|
||||
}
|
||||
if unavailable < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative"))
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative"))
|
||||
}
|
||||
}
|
||||
if rollingUpdate.MaxSurge != nil {
|
||||
|
|
|
|||
|
|
@ -125,14 +125,14 @@ func TestValidateSubnets(t *testing.T) {
|
|||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: ""},
|
||||
},
|
||||
ExpectedErrors: []string{"Required value::Subnets[0].Name"},
|
||||
ExpectedErrors: []string{"Required value::subnets[0].name"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: "a"},
|
||||
{Name: "a"},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets"},
|
||||
ExpectedErrors: []string{"Duplicate value::subnets[1].name"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
|
|
@ -145,17 +145,17 @@ func TestValidateSubnets(t *testing.T) {
|
|||
{Name: "a", ProviderID: "a"},
|
||||
{Name: "b", ProviderID: ""},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets"},
|
||||
ExpectedErrors: []string{"Forbidden::subnets[1].id"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: "a", CIDR: "10.128.0.0/8"},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets[0].CIDR"},
|
||||
ExpectedErrors: []string{"Invalid value::subnets[0].cidr"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateSubnets(g.Input, field.NewPath("Subnets"))
|
||||
errs := validateSubnets(g.Input, field.NewPath("subnets"))
|
||||
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
|
|
@ -175,25 +175,25 @@ func TestValidateKubeAPIServer(t *testing.T) {
|
|||
ProxyClientCertFile: &str,
|
||||
},
|
||||
ExpectedErrors: []string{
|
||||
"Invalid value::KubeAPIServer",
|
||||
"Forbidden::KubeAPIServer",
|
||||
},
|
||||
ExpectedDetail: "ProxyClientCertFile and ProxyClientKeyFile must both be specified (or not all)",
|
||||
ExpectedDetail: "proxyClientCertFile and proxyClientKeyFile must both be specified (or neither)",
|
||||
},
|
||||
{
|
||||
Input: kops.KubeAPIServerConfig{
|
||||
ProxyClientKeyFile: &str,
|
||||
},
|
||||
ExpectedErrors: []string{
|
||||
"Invalid value::KubeAPIServer",
|
||||
"Forbidden::KubeAPIServer",
|
||||
},
|
||||
ExpectedDetail: "ProxyClientCertFile and ProxyClientKeyFile must both be specified (or not all)",
|
||||
ExpectedDetail: "proxyClientCertFile and proxyClientKeyFile must both be specified (or neither)",
|
||||
},
|
||||
{
|
||||
Input: kops.KubeAPIServerConfig{
|
||||
ServiceNodePortRange: str,
|
||||
},
|
||||
ExpectedErrors: []string{
|
||||
"Invalid value::KubeAPIServer",
|
||||
"Invalid value::KubeAPIServer.serviceNodePortRange",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -201,9 +201,9 @@ func TestValidateKubeAPIServer(t *testing.T) {
|
|||
AuthorizationMode: &authzMode,
|
||||
},
|
||||
ExpectedErrors: []string{
|
||||
"Invalid value::KubeAPIServer",
|
||||
"Required value::KubeAPIServer.authorizationWebhookConfigFile",
|
||||
},
|
||||
ExpectedDetail: "Authorization mode Webhook requires AuthorizationWebhookConfigFile to be specified",
|
||||
ExpectedDetail: "Authorization mode Webhook requires authorizationWebhookConfigFile to be specified",
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
|
|
@ -270,13 +270,13 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
|
|||
Input: kops.FlannelNetworkingSpec{
|
||||
Backend: "",
|
||||
},
|
||||
ExpectedErrors: []string{"Required value::Networking.Flannel.Backend"},
|
||||
ExpectedErrors: []string{"Required value::networking.flannel.backend"},
|
||||
},
|
||||
{
|
||||
Input: kops.FlannelNetworkingSpec{
|
||||
Backend: "nope",
|
||||
},
|
||||
ExpectedErrors: []string{"Unsupported value::Networking.Flannel.Backend"},
|
||||
ExpectedErrors: []string{"Unsupported value::networking.flannel.backend"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
|
|
@ -286,7 +286,7 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
|
|||
cluster := &kops.Cluster{}
|
||||
cluster.Spec.Networking = networking
|
||||
|
||||
errs := validateNetworking(&cluster.Spec, networking, field.NewPath("Networking"))
|
||||
errs := validateNetworking(&cluster.Spec, networking, field.NewPath("networking"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
|
@ -308,7 +308,7 @@ func Test_Validate_AdditionalPolicies(t *testing.T) {
|
|||
Input: map[string]string{
|
||||
"notarole": `[ { "Action": [ "s3:GetObject" ], "Resource": [ "*" ], "Effect": "Allow" } ]`,
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::spec.additionalPolicies"},
|
||||
ExpectedErrors: []string{"Unsupported value::spec.additionalPolicies"},
|
||||
},
|
||||
{
|
||||
Input: map[string]string{
|
||||
|
|
@ -326,7 +326,7 @@ func Test_Validate_AdditionalPolicies(t *testing.T) {
|
|||
Input: map[string]string{
|
||||
"master": `[ { "Action": [ "s3:GetObject" ], "Resource": [ "*" ], "Effect": "allow" } ]`,
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::spec.additionalPolicies[master][0].Effect"},
|
||||
ExpectedErrors: []string{"Unsupported value::spec.additionalPolicies[master][0].Effect"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
|
|
@ -372,7 +372,7 @@ func Test_Validate_Calico(t *testing.T) {
|
|||
},
|
||||
Etcd: &kops.EtcdClusterSpec{},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Calico.TyphaReplicas"},
|
||||
ExpectedErrors: []string{"Invalid value::calico.typhaReplicas"},
|
||||
},
|
||||
{
|
||||
Input: caliInput{
|
||||
|
|
@ -393,11 +393,11 @@ func Test_Validate_Calico(t *testing.T) {
|
|||
Version: "2.2.18",
|
||||
},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Calico.MajorVersion"},
|
||||
ExpectedErrors: []string{"Forbidden::calico.majorVersion"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("Calico"))
|
||||
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("calico"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
|
@ -424,19 +424,19 @@ func Test_Validate_RollingUpdate(t *testing.T) {
|
|||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromString("nope")),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
{
|
||||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromInt(-1)),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
{
|
||||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromString("-1%")),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
{
|
||||
Input: kops.RollingUpdate{
|
||||
|
|
@ -468,7 +468,7 @@ func Test_Validate_RollingUpdate(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateRollingUpdate(&g.Input, field.NewPath("TestField"))
|
||||
errs := validateRollingUpdate(&g.Input, field.NewPath("testField"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -734,6 +734,25 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExternalPolicies != nil {
|
||||
in, out := &in.ExternalPolicies, &out.ExternalPolicies
|
||||
*out = new(map[string][]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.AdditionalPolicies != nil {
|
||||
in, out := &in.AdditionalPolicies, &out.AdditionalPolicies
|
||||
*out = new(map[string]string)
|
||||
|
|
@ -1216,6 +1235,22 @@ func (in *EgressProxySpec) DeepCopy() *EgressProxySpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvVar) DeepCopyInto(out *EnvVar) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
|
||||
func (in *EnvVar) DeepCopy() *EnvVar {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvVar)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -99,8 +99,8 @@ func (c *ClusterVFS) List(options metav1.ListOptions) (*api.ClusterList, error)
|
|||
}
|
||||
|
||||
func (r *ClusterVFS) Create(c *api.Cluster) (*api.Cluster, error) {
|
||||
if err := validation.ValidateCluster(c, false); err != nil {
|
||||
return nil, err
|
||||
if errs := validation.ValidateCluster(c, false); len(errs) != 0 {
|
||||
return nil, errs.ToAggregate()
|
||||
}
|
||||
|
||||
if c.ObjectMeta.CreationTimestamp.IsZero() {
|
||||
|
|
@ -125,7 +125,7 @@ func (r *ClusterVFS) Create(c *api.Cluster) (*api.Cluster, error) {
|
|||
func (r *ClusterVFS) Update(c *api.Cluster, status *api.ClusterStatus) (*api.Cluster, error) {
|
||||
clusterName := c.ObjectMeta.Name
|
||||
if clusterName == "" {
|
||||
return nil, field.Required(field.NewPath("Name"), "clusterName is required")
|
||||
return nil, field.Required(field.NewPath("objectMeta", "name"), "clusterName is required")
|
||||
}
|
||||
|
||||
old, err := r.Get(clusterName, metav1.GetOptions{})
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func NewInstanceGroupMirror(cluster *kopsapi.Cluster, configBase vfs.Path) Insta
|
|||
defaultReadVersion := v1alpha1.SchemeGroupVersion.WithKind(kind)
|
||||
r.defaultReadVersion = &defaultReadVersion
|
||||
r.validate = func(o runtime.Object) error {
|
||||
return validation.ValidateInstanceGroup(o.(*kopsapi.InstanceGroup))
|
||||
return validation.ValidateInstanceGroup(o.(*kopsapi.InstanceGroup)).ToAggregate()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
|
@ -84,7 +84,7 @@ func newInstanceGroupVFS(c *VFSClientset, cluster *kopsapi.Cluster) *InstanceGro
|
|||
defaultReadVersion := v1alpha1.SchemeGroupVersion.WithKind(kind)
|
||||
r.defaultReadVersion = &defaultReadVersion
|
||||
r.validate = func(o runtime.Object) error {
|
||||
return validation.ValidateInstanceGroup(o.(*kopsapi.InstanceGroup))
|
||||
return validation.ValidateInstanceGroup(o.(*kopsapi.InstanceGroup)).ToAggregate()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ go_library(
|
|||
"//pkg/assets:go_default_library",
|
||||
"//pkg/client/simple:go_default_library",
|
||||
"//pkg/featureflag:go_default_library",
|
||||
"//pkg/resources/digitalocean:go_default_library",
|
||||
"//upup/pkg/fi/cloudup:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/aliup:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/awstasks:go_default_library",
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue