mirror of https://github.com/kubernetes/kops.git
Merge branch 'master' into fix-roll-validation
This commit is contained in:
commit
5a0b199119
67
Makefile
67
Makefile
|
@ -98,20 +98,7 @@ PROTOKUBE_TAG := $(subst +,-,${VERSION})
|
|||
KOPS_SERVER_TAG := $(subst +,-,${VERSION})
|
||||
|
||||
# Go exports:
|
||||
|
||||
GO15VENDOREXPERIMENT=1
|
||||
export GO15VENDOREXPERIMENT
|
||||
|
||||
COMPILERVERSION := $(shell go version | cut -d' ' -f3 | sed 's/go//g' | tr -d '\n')
|
||||
COMPILER_VER_MAJOR := $(shell echo $(COMPILERVERSION) | cut -f1 -d.)
|
||||
COMPILER_VER_MINOR := $(shell echo $(COMPILERVERSION) | cut -f2 -d.)
|
||||
COMPILER_GT_1_10 := $(shell [ $(COMPILER_VER_MAJOR) -gt 1 -o \( $(COMPILER_VER_MAJOR) -eq 1 -a $(COMPILER_VER_MINOR) -ge 10 \) ] && echo true)
|
||||
|
||||
ifeq ($(COMPILER_GT_1_10), true)
|
||||
LDFLAGS := -ldflags=all=
|
||||
else
|
||||
LDFLAGS := -ldflags=
|
||||
endif
|
||||
|
||||
ifdef STATIC_BUILD
|
||||
CGO_ENABLED=0
|
||||
|
@ -197,23 +184,17 @@ upup/models/bindata.go: ${GOBINDATA} ${UPUP_MODELS_BINDATA_SOURCES}
|
|||
|
||||
# Build in a docker container with golang 1.X
|
||||
# Used to test we have not broken 1.X
|
||||
# 1.10 is the default for k8s 1.11. Others are best-effort
|
||||
.PHONY: check-builds-in-go18
|
||||
check-builds-in-go18:
|
||||
# Note we only check that kops builds; we know the tests don't compile because of type aliasing in uber zap
|
||||
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.8 make -C /go/src/k8s.io/kops kops
|
||||
|
||||
.PHONY: check-builds-in-go19
|
||||
check-builds-in-go19:
|
||||
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.9 make -C /go/src/k8s.io/kops ci
|
||||
|
||||
.PHONY: check-builds-in-go110
|
||||
check-builds-in-go110:
|
||||
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.10 make -C /go/src/k8s.io/kops ci
|
||||
|
||||
.PHONY: check-builds-in-go111
|
||||
check-builds-in-go111:
|
||||
docker run -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.11 make -C /go/src/k8s.io/kops ci
|
||||
docker run -e GO111MODULE=on -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.11 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: check-builds-in-go112
|
||||
check-builds-in-go112:
|
||||
docker run -e GO111MODULE=on -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.12 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: check-builds-in-go113
|
||||
check-builds-in-go113:
|
||||
docker run -e EXTRA_BUILDFLAGS=-mod=vendor -v ${GOPATH_1ST}/src/k8s.io/kops:/go/src/k8s.io/kops golang:1.13 make -C /go/src/k8s.io/kops all
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: kops-gobindata
|
||||
|
@ -408,7 +389,7 @@ push-aws-run: push
|
|||
|
||||
.PHONY: ${PROTOKUBE}
|
||||
${PROTOKUBE}:
|
||||
go build ${GCFLAGS} -o $@ -tags 'peer_name_alternative peer_name_hash' k8s.io/kops/protokube/cmd/protokube
|
||||
go build ${GCFLAGS} ${EXTRA_BUILDFLAGS} -o $@ -tags 'peer_name_alternative peer_name_hash' k8s.io/kops/protokube/cmd/protokube
|
||||
|
||||
.PHONY: protokube
|
||||
protokube: ${PROTOKUBE}
|
||||
|
@ -494,35 +475,17 @@ bazel-utils-dist:
|
|||
# --------------------------------------------------
|
||||
# development targets
|
||||
|
||||
.PHONY: dep-prereqs
|
||||
dep-prereqs:
|
||||
(which hg > /dev/null) || (echo "dep requires that mercurial is installed"; exit 1)
|
||||
(which dep > /dev/null) || (echo "dep-ensure requires that dep is installed"; exit 1)
|
||||
(which bazel > /dev/null) || (echo "dep-ensure requires that bazel is installed"; exit 1)
|
||||
.PHONY: gomod-prereqs
|
||||
gomod-prereqs:
|
||||
(which bazel > /dev/null) || (echo "gomod requires that bazel is installed"; exit 1)
|
||||
|
||||
.PHONY: dep-ensure
|
||||
dep-ensure: dep-prereqs
|
||||
dep-ensure:
|
||||
echo "`make dep-ensure` has been replaced by `make gomod`"
|
||||
exit 1
|
||||
dep ensure -v
|
||||
# Switch weavemesh to use peer_name_hash - bazel rule-go doesn't support build tags yet
|
||||
rm vendor/github.com/weaveworks/mesh/peer_name_mac.go
|
||||
sed -i -e 's/peer_name_hash/!peer_name_mac/g' vendor/github.com/weaveworks/mesh/peer_name_hash.go
|
||||
# Remove all bazel build files that were vendored and regenerate (we assume they are go-gettable)
|
||||
find vendor/ -name "BUILD" -delete
|
||||
find vendor/ -name "BUILD.bazel" -delete
|
||||
# Remove recursive symlinks that really confuse bazel
|
||||
rm -rf vendor/github.com/coreos/etcd/cmd/
|
||||
rm -rf vendor/github.com/jteeuwen/go-bindata/testdata/
|
||||
# Remove dependencies that dep just can't figure out
|
||||
rm -rf vendor/k8s.io/code-generator/cmd/set-gen/
|
||||
rm -rf vendor/k8s.io/code-generator/cmd/go-to-protobuf/
|
||||
rm -rf vendor/k8s.io/code-generator/cmd/import-boss/
|
||||
rm -rf vendor/github.com/docker/docker/contrib/
|
||||
make gazelle
|
||||
|
||||
.PHONY: gomod
|
||||
gomod:
|
||||
gomod: gomod-prereqs
|
||||
GO111MODULE=on go mod vendor
|
||||
# Switch weavemesh to use peer_name_hash - bazel rule-go doesn't support build tags yet
|
||||
rm vendor/github.com/weaveworks/mesh/peer_name_mac.go
|
||||
|
|
1
OWNERS
1
OWNERS
|
@ -3,7 +3,6 @@
|
|||
approvers:
|
||||
- justinsb
|
||||
- chrislovecnm
|
||||
- andrewsykim
|
||||
- geojaz
|
||||
- kashifsaadat
|
||||
- gambol99
|
||||
|
|
|
@ -42,7 +42,7 @@ Para instalar un Kubernetes cluster en GCE por fabor siga esta [guide](/docs/get
|
|||
* Un Despliegue Altamente Disponible (HA) Kubernetes Masters
|
||||
* Construye en un modelo de estado sincronizado para **dry-runs** y **idempotency** automático
|
||||
* Capacidad de generar [Terraform](/docs/terraform.md)
|
||||
* Soporta un Kubernetes personalizado [add-ons](/docs/addons.md)
|
||||
* Soporta un Kubernetes personalizado [add-ons](/docs/operations/addons.md)
|
||||
* Línea de comando [autocompletion](/docs/cli/kops_completion.md)
|
||||
* YAML Archivo de Manifiesto Basado en API [Configuration](/docs/manifests_and_customizing_via_api.md)
|
||||
* [Templating](/docs/cluster_template.md) y ejecutar modos de simulacro para crear
|
||||
|
|
|
@ -37,9 +37,9 @@ launching a Kubernetes cluster hosted on AWS.
|
|||
|
||||
To install a Kubernetes cluster on GCE please follow this [guide](/docs/getting_started/gce.md).
|
||||
|
||||
To install a Kubernetes cluster on DigitalOcean, follow this [guide](/docs/tutorial/digitalocean.md).
|
||||
To install a Kubernetes cluster on DigitalOcean, follow this [guide](/docs/getting_started/digitalocean.md).
|
||||
|
||||
To install a Kubernetes cluster on OpenStack, follow this [guide](/docs/tutorial/openstack.md).
|
||||
To install a Kubernetes cluster on OpenStack, follow this [guide](/docs/getting_started/openstack.md).
|
||||
|
||||
**For anything beyond experimental clusters it is highly encouraged to [version control the cluster manifest files](/docs/manifests_and_customizing_via_api.md) and [run kops in a CI environment](/docs/continuous_integration.md).**
|
||||
|
||||
|
@ -49,7 +49,7 @@ To install a Kubernetes cluster on OpenStack, follow this [guide](/docs/tutorial
|
|||
* Deploys Highly Available (HA) Kubernetes Masters
|
||||
* Built on a state-sync model for **dry-runs** and automatic **idempotency**
|
||||
* Ability to generate [Terraform](/docs/terraform.md)
|
||||
* Supports custom Kubernetes [add-ons](/docs/addons.md)
|
||||
* Supports custom Kubernetes [add-ons](/docs/operations/addons.md)
|
||||
* Command line [autocompletion](/docs/cli/kops_completion.md)
|
||||
* YAML Manifest Based API [Configuration](/docs/manifests_and_customizing_via_api.md)
|
||||
* [Templating](/docs/cluster_template.md) and dry-run modes for creating
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
## Addons
|
||||
|
||||
Read on [addons](../docs/addons.md)
|
||||
Read on [addons](../docs/operations/addons.md)
|
||||
|
|
|
@ -19,7 +19,7 @@ addons:
|
|||
- manifest: ingress-citrix
|
||||
|
||||
```
|
||||
For more information on how to enable addon during cluster creation refer [Kops Addon guide](https://github.com/kubernetes/kops/blob/master/docs/addons.md#installing-kubernetes-addons)
|
||||
For more information on how to enable addon during cluster creation refer [Kops Addon guide](https://github.com/kubernetes/kops/blob/master/docs/operations/addons.md#installing-kubernetes-addons)
|
||||
|
||||
**NOTE:** This method only works for Google Cloud Platform. For using this addon on AWS, please use the `kubectl` method below.
|
||||
|
||||
|
|
|
@ -131,9 +131,8 @@ func (c *ChannelVersion) replaces(existing *ChannelVersion) bool {
|
|||
if c.ManifestHash == existing.ManifestHash {
|
||||
klog.V(4).Infof("Manifest Match")
|
||||
return false
|
||||
} else {
|
||||
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
||||
}
|
||||
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
||||
} else {
|
||||
klog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
timeout: 1200s
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
# Start by just pushing the image
|
||||
- name: 'gcr.io/k8s-testimages/bazelbuild:v20190916-ec59af8-0.29.1'
|
||||
entrypoint: make
|
||||
env:
|
||||
- VERSION=$_GIT_TAG
|
||||
- PULL_BASE_REF=$_PULL_BASE_REF
|
||||
- DOCKER_REGISTRY=$_DOCKER_REGISTRY
|
||||
- DOCKER_IMAGE_PREFIX=$_DOCKER_IMAGE_PREFIX
|
||||
args:
|
||||
- kops-controller-push
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
_GIT_TAG: '12345'
|
||||
_PULL_BASE_REF: 'dev'
|
||||
_DOCKER_REGISTRY: 'gcr.io'
|
||||
_DOCKER_IMAGE_PREFIX: 'k8s-staging-kops/'
|
|
@ -139,6 +139,8 @@ func (m *MockAutoscaling) DescribeAutoScalingGroups(input *autoscaling.DescribeA
|
|||
match = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match = true
|
||||
}
|
||||
|
||||
if match {
|
||||
|
|
|
@ -137,9 +137,8 @@ func (m *MockRoute53) ChangeResourceRecordSets(request *route53.ChangeResourceRe
|
|||
if foundIndex == -1 {
|
||||
// TODO: Use correct error
|
||||
return nil, fmt.Errorf("record not found %s %q", changeType, changeName)
|
||||
} else {
|
||||
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
||||
}
|
||||
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
||||
|
||||
default:
|
||||
// TODO: Use correct error
|
||||
|
|
|
@ -183,9 +183,8 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
|
|||
return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name)
|
||||
}
|
||||
return fmt.Errorf("error creating instanceGroup: %v", err)
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
||||
}
|
||||
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
||||
|
||||
case *kopsapi.SSHCredential:
|
||||
clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
|
||||
|
|
|
@ -357,7 +357,7 @@ func RunUpdateCluster(f *util.Factory, clusterName string, out io.Writer, c *Upd
|
|||
}
|
||||
}
|
||||
fmt.Fprintf(sb, " * the admin user is specific to Debian. If not using Debian please use the appropriate user based on your OS.\n")
|
||||
fmt.Fprintf(sb, " * read about installing addons at: https://github.com/kubernetes/kops/blob/master/docs/addons.md.\n")
|
||||
fmt.Fprintf(sb, " * read about installing addons at: https://github.com/kubernetes/kops/blob/master/docs/operations/addons.md.\n")
|
||||
fmt.Fprintf(sb, "\n")
|
||||
}
|
||||
|
||||
|
|
|
@ -272,27 +272,26 @@ func (c *UpgradeClusterCmd) Run(args []string) error {
|
|||
if !c.Yes {
|
||||
fmt.Printf("\nMust specify --yes to perform upgrade\n")
|
||||
return nil
|
||||
} else {
|
||||
for _, action := range actions {
|
||||
action.apply()
|
||||
}
|
||||
|
||||
if err := commands.UpdateCluster(clientset, cluster, instanceGroups); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range instanceGroups {
|
||||
_, err := clientset.InstanceGroupsFor(cluster).Update(g)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing InstanceGroup %q: %v", g.ObjectMeta.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nUpdates applied to configuration.\n")
|
||||
|
||||
// TODO: automate this step
|
||||
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
||||
}
|
||||
for _, action := range actions {
|
||||
action.apply()
|
||||
}
|
||||
|
||||
if err := commands.UpdateCluster(clientset, cluster, instanceGroups); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range instanceGroups {
|
||||
_, err := clientset.InstanceGroupsFor(cluster).Update(g)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing InstanceGroup %q: %v", g.ObjectMeta.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nUpdates applied to configuration.\n")
|
||||
|
||||
// TODO: automate this step
|
||||
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -144,11 +144,10 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
|
|||
if err != nil {
|
||||
if time.Now().After(timeout) {
|
||||
return nil, fmt.Errorf("unexpected error during validation: %v", err)
|
||||
} else {
|
||||
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
switch options.output {
|
||||
|
|
|
@ -48,8 +48,8 @@
|
|||
* [Using Manifests and Customizing via the API](manifests_and_customizing_via_api.md)
|
||||
|
||||
## Operations
|
||||
* [Cluster addon manager](addon_manager.md)
|
||||
* [Cluster addons](addons.md)
|
||||
* [Cluster addon manager](operations/addons.md#addon_management)
|
||||
* [Cluster addons](operations/addons.md)
|
||||
* [Cluster configuration management](changing_configuration.md)
|
||||
* [Cluster desired configuration creation from template](cluster_template.md)
|
||||
* [Cluster upgrades and migrations](operations/cluster_upgrades_and_migrations.md)
|
||||
|
|
|
@ -18,7 +18,7 @@ Please follow our [basic-requirements document](basic-requirements.md) that is c
|
|||
|
||||
## DNS Setup - AWS Route53
|
||||
|
||||
For our setup we already have a hosted DNS domain in AWS:
|
||||
For our setup we already have a hosted DNS domain in AWS:
|
||||
|
||||
```bash
|
||||
aws route53 list-hosted-zones --output=table
|
||||
|
@ -321,7 +321,7 @@ Suggestions:
|
|||
* list nodes: kubectl get nodes --show-labels
|
||||
* ssh to the master: ssh -i ~/.ssh/id_rsa admin@api.mycluster01.kopsclustertest.example.org
|
||||
The admin user is specific to Debian. If not using Debian please use the appropriate user based on your OS.
|
||||
* read about installing addons: https://github.com/kubernetes/kops/blob/master/docs/addons.md
|
||||
* read about installing addons: https://github.com/kubernetes/kops/blob/master/docs/operations/addons.md
|
||||
```
|
||||
|
||||
Note that KOPS will create a DNS record for your API: api.mycluster01.kopsclustertest.example.org. You can check this record with the following "dig" command:
|
||||
|
@ -731,7 +731,7 @@ Save it and review with `kops update cluster $NAME`:
|
|||
|
||||
```bash
|
||||
kops update cluster $NAME
|
||||
```
|
||||
```
|
||||
|
||||
The last command will output:
|
||||
|
||||
|
@ -747,13 +747,13 @@ Will modify resources:
|
|||
MaxSize 2 -> 3
|
||||
|
||||
Must specify --yes to apply changes
|
||||
```
|
||||
```
|
||||
|
||||
Now, let's apply the change:
|
||||
|
||||
```bash
|
||||
kops update cluster $NAME --yes
|
||||
```
|
||||
```
|
||||
|
||||
Go for another coffee (or maybe a tee) and after some minutes check your cluster again with "kops validate cluster"
|
||||
|
||||
|
@ -784,7 +784,7 @@ Your cluster mycluster01.kopsclustertest.example.org is ready
|
|||
|
||||
```
|
||||
|
||||
You can see how your cluster scaled up to 3 nodes.
|
||||
You can see how your cluster scaled up to 3 nodes.
|
||||
|
||||
**SCALING RECOMMENDATIONS:**
|
||||
- Always think ahead. If you want to ensure to have the capability to scale-up to all available zones in the region, ensure to add them to the "--zones=" argument when using the "kops create cluster" command. Example: --zones=us-east-1a,us-east-1b,us-east-1c,us-east-1d,us-east-1e. That will make things simpler later.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
## Addons
|
||||
With kops you manage addons by using kubectl.
|
||||
|
||||
(For a description of the addon-manager, please see [addon_manager.md](#addon-management).)
|
||||
(For a description of the addon-manager, please see [addon_management](#addon-management).)
|
||||
|
||||
Addons in Kubernetes are traditionally done by copying files to `/etc/kubernetes/addons` on the master. But this
|
||||
doesn't really make sense in HA master configurations. We also have kubectl available, and addons are just a thin
|
||||
|
@ -21,7 +21,7 @@ This document describes how to install some common addons and how to create your
|
|||
|
||||
### Custom addons
|
||||
|
||||
The docs about the [addon manager](#addon-management) describe in more detail how to define a addon resource with regards to versioning.
|
||||
The docs about the [addon management](#addon-management) describe in more detail how to define a addon resource with regards to versioning.
|
||||
Here is a minimal example of an addon manifest that would install two different addons.
|
||||
|
||||
```yaml
|
||||
|
@ -155,7 +155,7 @@ The project is created by wearemolecule, and maintained at
|
|||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.3.0.yml
|
||||
```
|
||||
|
||||
## Addons Management
|
||||
## Addon Management
|
||||
|
||||
kops incorporates management of some addons; we _have_ to manage some addons which are needed before
|
||||
the kubernetes API is functional.
|
||||
|
|
|
@ -5,7 +5,7 @@ At some point you will almost definitely want to upgrade the Kubernetes version
|
|||
- Upgrade an existing `kube-up` managed cluster to one managed by `kops`
|
||||
+ [The simple method with downtime](#kube-up---kops-downtime)
|
||||
+ [The more complex method with zero-downtime](#kube-up---kops-sans-downtime)
|
||||
- [Upgrade a `kops` cluster from one Kubernetes version to another](cluster_upgrades_and_migrations.md)
|
||||
- [Upgrade a `kops` cluster from one Kubernetes version to another](updates_and_upgrades.md)
|
||||
|
||||
## `kube-up` -> `kops`, with downtime
|
||||
|
||||
|
|
|
@ -149,6 +149,7 @@ k8s.io/kops/protokube/pkg/gossip/aws
|
|||
k8s.io/kops/protokube/pkg/gossip/dns
|
||||
k8s.io/kops/protokube/pkg/gossip/dns/hosts
|
||||
k8s.io/kops/protokube/pkg/gossip/dns/provider
|
||||
k8s.io/kops/protokube/pkg/gossip/do
|
||||
k8s.io/kops/protokube/pkg/gossip/gce
|
||||
k8s.io/kops/protokube/pkg/gossip/memberlist
|
||||
k8s.io/kops/protokube/pkg/gossip/mesh
|
||||
|
|
|
@ -2221,7 +2221,7 @@ spec:
|
|||
properties:
|
||||
imageName:
|
||||
description: 'The container image name to use, which by default
|
||||
is: 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.4'
|
||||
is: 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.0.0'
|
||||
type: string
|
||||
type: object
|
||||
calico:
|
||||
|
|
|
@ -76,7 +76,7 @@ func (b *CloudConfigBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
lines = append(lines, "ElbSecurityGroup = "+*cloudConfig.ElbSecurityGroup)
|
||||
}
|
||||
case "vsphere":
|
||||
vm_uuid, err := getVMUUID(b.Cluster.Spec.KubernetesVersion)
|
||||
VMUUID, err := getVMUUID(b.Cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -99,8 +99,8 @@ func (b *CloudConfigBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if cloudConfig.VSphereDatastore != nil {
|
||||
lines = append(lines, "datastore = "+*cloudConfig.VSphereDatastore)
|
||||
}
|
||||
if vm_uuid != "" {
|
||||
lines = append(lines, "vm-uuid = "+strings.Trim(vm_uuid, "\n"))
|
||||
if VMUUID != "" {
|
||||
lines = append(lines, "vm-uuid = "+strings.Trim(VMUUID, "\n"))
|
||||
}
|
||||
// Disk Config for vSphere CloudProvider
|
||||
// We need this to support Kubernetes vSphere CloudProvider < v1.5.3
|
||||
|
@ -198,11 +198,11 @@ func getVMUUID(kubernetesVersion string) (string, error) {
|
|||
|
||||
defer try.CloseFile(file)
|
||||
|
||||
vm_uuid, err := bufio.NewReader(file).ReadString('\n')
|
||||
VMUUID, err := bufio.NewReader(file).ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return vm_uuid, err
|
||||
return VMUUID, err
|
||||
}
|
||||
|
||||
return "", err
|
||||
|
|
|
@ -503,7 +503,7 @@ func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string {
|
|||
annotations := make(map[string]string)
|
||||
|
||||
if b.Cluster.Spec.API != nil {
|
||||
if b.Cluster.Spec.API.LoadBalancer == nil || b.Cluster.Spec.API.LoadBalancer.UseForInternalApi != true {
|
||||
if b.Cluster.Spec.API.LoadBalancer == nil || !b.Cluster.Spec.API.LoadBalancer.UseForInternalApi {
|
||||
annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ var _ fi.ModelBuilder = &KubeAPIServerBuilder{}
|
|||
// @TODO we should probably change this to a daemonset in the future and follow the kubeadm path
|
||||
func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
|
||||
if b.Cluster.Spec.KubeProxy.Enabled != nil && *b.Cluster.Spec.KubeProxy.Enabled == false {
|
||||
if b.Cluster.Spec.KubeProxy.Enabled != nil && !*b.Cluster.Spec.KubeProxy.Enabled {
|
||||
klog.V(2).Infof("Kube-proxy is disabled, will not create configuration for it.")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -380,7 +380,6 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube
|
|||
f.DNSProvider = fi.String("aws-route53")
|
||||
case kops.CloudProviderDO:
|
||||
f.DNSProvider = fi.String("digitalocean")
|
||||
f.ClusterID = fi.String(t.Cluster.Name)
|
||||
case kops.CloudProviderGCE:
|
||||
f.DNSProvider = fi.String("google-clouddns")
|
||||
case kops.CloudProviderVSphere:
|
||||
|
|
|
@ -79,9 +79,8 @@ func (s *s3PublicAclStrategy) GetACL(p vfs.Path, cluster *kops.Cluster) (vfs.ACL
|
|||
return &vfs.S3Acl{
|
||||
RequestACL: values.String("public-read"),
|
||||
}, nil
|
||||
} else {
|
||||
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
||||
}
|
||||
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -140,9 +140,8 @@ func (v *KubernetesVersionSpec) FindRecommendedUpgrade(version semver.Version) (
|
|||
if recommendedVersion.GT(version) {
|
||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||
return recommendedVersion, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -160,9 +159,8 @@ func (v *KopsVersionSpec) FindRecommendedUpgrade(version semver.Version) (*semve
|
|||
if recommendedVersion.GT(version) {
|
||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||
return &recommendedVersion, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -180,9 +178,8 @@ func (v *KubernetesVersionSpec) IsUpgradeRequired(version semver.Version) (bool,
|
|||
if requiredVersion.GT(version) {
|
||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||
return true, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -200,9 +197,8 @@ func (v *KopsVersionSpec) IsUpgradeRequired(version semver.Version) (bool, error
|
|||
if requiredVersion.GT(version) {
|
||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||
return true, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ type RomanaNetworkingSpec struct {
|
|||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.4
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.0
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ type RomanaNetworkingSpec struct {
|
|||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.4
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.0.0
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ type RomanaNetworkingSpec struct {
|
|||
// AmazonVPCNetworkingSpec declares that we want Amazon VPC CNI networking
|
||||
type AmazonVPCNetworkingSpec struct {
|
||||
// The container image name to use, which by default is:
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.4
|
||||
// 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.0.0
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -79,18 +79,23 @@ func (c *CloudInstanceGroup) NewCloudInstanceGroupMember(instanceId string, newG
|
|||
func (c *CloudInstanceGroup) Status() string {
|
||||
if len(c.NeedUpdate) == 0 {
|
||||
return "Ready"
|
||||
} else {
|
||||
return "NeedsUpdate"
|
||||
}
|
||||
return "NeedsUpdate"
|
||||
}
|
||||
|
||||
// GetNodeMap returns a list of nodes keyed by their external id
|
||||
func GetNodeMap(nodes []v1.Node, cluster *kops.Cluster) map[string]*v1.Node {
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
delimiter := "/"
|
||||
// Alicloud CCM uses the "{region}.{instance-id}" of a instance as ProviderID.
|
||||
// We need to set delimiter to "." for Alicloud.
|
||||
if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderALI {
|
||||
delimiter = "."
|
||||
}
|
||||
|
||||
for i := range nodes {
|
||||
node := &nodes[i]
|
||||
|
||||
providerIDs := strings.Split(node.Spec.ProviderID, "/")
|
||||
providerIDs := strings.Split(node.Spec.ProviderID, delimiter)
|
||||
instanceID := providerIDs[len(providerIDs)-1]
|
||||
nodeMap[instanceID] = node
|
||||
}
|
||||
|
|
|
@ -131,10 +131,9 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
if err = r.validateCluster(rollingUpdateData, cluster); err != nil {
|
||||
if rollingUpdateData.FailOnValidate {
|
||||
return err
|
||||
} else {
|
||||
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
||||
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
||||
}
|
||||
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
||||
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,9 +159,8 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
if err = r.DrainNode(u, rollingUpdateData); err != nil {
|
||||
if rollingUpdateData.FailOnDrainError {
|
||||
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
||||
} else {
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
||||
|
|
|
@ -127,9 +127,8 @@ func (b *ScalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
||||
}
|
||||
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
||||
if launchConfiguration.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, b.Cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
// replace "." with "-" since DO API does not accept "."
|
||||
clusterTag := do.TagKubernetesClusterNamePrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
|
||||
clusterMasterTag := do.TagKubernetesClusterMasterPrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
|
||||
|
||||
masterIndexCount := 0
|
||||
// In the future, DigitalOcean will use Machine API to manage groups,
|
||||
|
@ -71,6 +72,7 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
masterIndexCount++
|
||||
clusterTagIndex := do.TagKubernetesClusterIndex + ":" + strconv.Itoa(masterIndexCount)
|
||||
droplet.Tags = append(droplet.Tags, clusterTagIndex)
|
||||
droplet.Tags = append(droplet.Tags, clusterMasterTag)
|
||||
}
|
||||
|
||||
userData, err := d.BootstrapScript.ResourceNodeUp(ig, d.Cluster)
|
||||
|
|
|
@ -329,21 +329,18 @@ func (b *InstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, igs ..
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
|
||||
case InstanceGroupLabelUtilizeReservedInstances:
|
||||
ocean.UtilizeReservedInstances, err = parseBool(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
|
||||
case InstanceGroupLabelFallbackToOnDemand:
|
||||
ocean.FallbackToOnDemand, err = parseBool(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -640,7 +637,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
return nil, err
|
||||
}
|
||||
opts.Enabled = fi.Bool(!fi.BoolValue(v))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerDefaultNodeLabels:
|
||||
|
@ -650,7 +646,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
return nil, err
|
||||
}
|
||||
defaultNodeLabels = fi.BoolValue(v)
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerHeadroomCPUPerUnit:
|
||||
|
@ -663,7 +658,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts)
|
||||
}
|
||||
opts.Headroom.CPUPerUnit = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerHeadroomGPUPerUnit:
|
||||
|
@ -676,7 +670,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts)
|
||||
}
|
||||
opts.Headroom.GPUPerUnit = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerHeadroomMemPerUnit:
|
||||
|
@ -689,7 +682,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts)
|
||||
}
|
||||
opts.Headroom.MemPerUnit = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerHeadroomNumOfUnits:
|
||||
|
@ -702,7 +694,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts)
|
||||
}
|
||||
opts.Headroom.NumOfUnits = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerScaleDownMaxPercentage:
|
||||
|
@ -715,7 +706,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Down = new(spotinsttasks.AutoScalerDownOpts)
|
||||
}
|
||||
opts.Down.MaxPercentage = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
|
||||
case InstanceGroupLabelAutoScalerScaleDownEvaluationPeriods:
|
||||
|
@ -728,7 +718,6 @@ func (b *InstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *ko
|
|||
opts.Down = new(spotinsttasks.AutoScalerDownOpts)
|
||||
}
|
||||
opts.Down.EvaluationPeriods = fi.Int(int(fi.Int64Value(v)))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,9 +102,8 @@ func parsePEMCertificate(pemData []byte) (*x509.Certificate, error) {
|
|||
if block.Type == "CERTIFICATE" {
|
||||
klog.V(10).Infof("Parsing pem block: %q", block.Type)
|
||||
return x509.ParseCertificate(block.Bytes)
|
||||
} else {
|
||||
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
||||
}
|
||||
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
||||
|
||||
pemData = rest
|
||||
}
|
||||
|
|
|
@ -453,11 +453,10 @@ func DeleteRoleRam(cloud fi.Cloud, r *resources.Resource) error {
|
|||
response, err := c.RamClient().ListPoliciesForRole(roleQueryRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("err listing Policies for role:%v", err)
|
||||
} else {
|
||||
if len(response.Policies.Policy) != 0 {
|
||||
for _, policy := range response.Policies.Policy {
|
||||
policies = append(policies, policy.PolicyName)
|
||||
}
|
||||
}
|
||||
if len(response.Policies.Policy) != 0 {
|
||||
for _, policy := range response.Policies.Policy {
|
||||
policies = append(policies, policy.PolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"github.com/digitalocean/godo"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kops/dns-controller/pkg/dns"
|
||||
"k8s.io/kops/pkg/resources"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
|
@ -195,6 +196,11 @@ func listDNS(cloud fi.Cloud, clusterName string) ([]*resources.Resource, error)
|
|||
}
|
||||
|
||||
if domainName == "" {
|
||||
if strings.HasSuffix(clusterName, ".k8s.local") {
|
||||
klog.Info("Domain Name is empty. Ok to have an empty domain name since cluster is configured as gossip cluster.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find domain for cluster: %s", clusterName)
|
||||
}
|
||||
|
||||
|
|
|
@ -54,15 +54,14 @@ func (s *StringOrSlice) UnmarshalJSON(value []byte) error {
|
|||
return nil
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
s.forceEncodeAsArray = false
|
||||
var stringValue string
|
||||
if err := json.Unmarshal(value, &stringValue); err != nil {
|
||||
return err
|
||||
}
|
||||
s.values = []string{stringValue}
|
||||
return nil
|
||||
}
|
||||
s.forceEncodeAsArray = false
|
||||
var stringValue string
|
||||
if err := json.Unmarshal(value, &stringValue); err != nil {
|
||||
return err
|
||||
}
|
||||
s.values = []string{stringValue}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string value, or the Itoa of the int value.
|
||||
|
|
|
@ -138,19 +138,21 @@ func run() error {
|
|||
internalIP = awsVolumes.InternalIP()
|
||||
}
|
||||
} else if cloud == "digitalocean" {
|
||||
if clusterID == "" {
|
||||
klog.Error("digitalocean requires --cluster-id")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
doVolumes, err := protokube.NewDOVolumes(clusterID)
|
||||
doVolumes, err := protokube.NewDOVolumes()
|
||||
if err != nil {
|
||||
klog.Errorf("Error initializing DigitalOcean: %q", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
volumes = doVolumes
|
||||
|
||||
if clusterID == "" {
|
||||
clusterID, err = protokube.GetClusterID()
|
||||
if err != nil {
|
||||
klog.Errorf("Error getting clusterid: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if internalIP == nil {
|
||||
internalIP, err = protokube.GetDropletInternalIP()
|
||||
if err != nil {
|
||||
|
@ -158,7 +160,6 @@ func run() error {
|
|||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
} else if cloud == "gce" {
|
||||
gceVolumes, err := protokube.NewGCEVolumes()
|
||||
if err != nil {
|
||||
|
@ -294,6 +295,12 @@ func run() error {
|
|||
return err
|
||||
}
|
||||
gossipName = volumes.(*protokube.ALIVolumes).InstanceID()
|
||||
} else if cloud == "digitalocean" {
|
||||
gossipSeeds, err = volumes.(*protokube.DOVolumes).GossipSeeds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gossipName = volumes.(*protokube.DOVolumes).InstanceName()
|
||||
} else {
|
||||
klog.Fatalf("seed provider for %q not yet implemented", cloud)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["seeds.go"],
|
||||
importpath = "k8s.io/kops/protokube/pkg/gossip/do",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/resources/digitalocean:go_default_library",
|
||||
"//protokube/pkg/gossip:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package do
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kops/pkg/resources/digitalocean"
|
||||
"k8s.io/kops/protokube/pkg/gossip"
|
||||
)
|
||||
|
||||
type SeedProvider struct {
|
||||
cloud *digitalocean.Cloud
|
||||
tag string
|
||||
}
|
||||
|
||||
var _ gossip.SeedProvider = &SeedProvider{}
|
||||
|
||||
func (p *SeedProvider) GetSeeds() ([]string, error) {
|
||||
var seeds []string
|
||||
|
||||
droplets, _, err := p.cloud.Droplets().List(context.TODO(), nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Droplets.ListByTag returned error: %v", err)
|
||||
}
|
||||
|
||||
for _, droplet := range droplets {
|
||||
for _, dropTag := range droplet.Tags {
|
||||
klog.V(4).Infof("Get Seeds - droplet found=%s,SeedProvider Tag=%s", dropTag, p.tag)
|
||||
if strings.Contains(dropTag, strings.Replace(p.tag, ".", "-", -1)) {
|
||||
klog.V(4).Infof("Tag matched for droplet tag =%s. Getting private IP", p.tag)
|
||||
ip, err := droplet.PrivateIPv4()
|
||||
if err == nil {
|
||||
klog.V(4).Infof("Appending a seed for cluster tag:%s, with ip=%s", p.tag, ip)
|
||||
seeds = append(seeds, ip)
|
||||
} else {
|
||||
klog.V(4).Infof("Ah ...Private IP failed for tag=%s, error=%v", p.tag, err)
|
||||
}
|
||||
} else {
|
||||
klog.V(4).Infof("Tag NOT matched for droplet tag =%s. and pTag=%s", dropTag, p.tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Get seeds function done now")
|
||||
return seeds, nil
|
||||
}
|
||||
|
||||
func NewSeedProvider(cloud *digitalocean.Cloud, tag string) (*SeedProvider, error) {
|
||||
klog.V(4).Infof("Trying new seed provider with cluster tag:%s", tag)
|
||||
|
||||
return &SeedProvider{
|
||||
cloud: cloud,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
|
@ -114,10 +114,9 @@ func (p *peer) OnGossip(buf []byte) (delta mesh.GossipData, err error) {
|
|||
// per OnGossip requirements
|
||||
klog.V(4).Infof("OnGossip %v => delta empty", message)
|
||||
return nil, nil
|
||||
} else {
|
||||
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
||||
return deltas, nil
|
||||
}
|
||||
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
||||
return deltas, nil
|
||||
}
|
||||
|
||||
// Merge the gossiped data represented by buf into our state.
|
||||
|
|
|
@ -40,6 +40,7 @@ go_library(
|
|||
"//protokube/pkg/gossip/ali:go_default_library",
|
||||
"//protokube/pkg/gossip/aws:go_default_library",
|
||||
"//protokube/pkg/gossip/dns:go_default_library",
|
||||
"//protokube/pkg/gossip/do:go_default_library",
|
||||
"//protokube/pkg/gossip/gce:go_default_library",
|
||||
"//protokube/pkg/gossip/openstack:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/aliup:go_default_library",
|
||||
|
|
|
@ -423,11 +423,10 @@ func (a *AWSVolumes) AttachVolume(volume *Volume) error {
|
|||
|
||||
volume.LocalDevice = device
|
||||
return nil
|
||||
} else {
|
||||
a.releaseDevice(device, volumeID)
|
||||
|
||||
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
||||
}
|
||||
a.releaseDevice(device, volumeID)
|
||||
|
||||
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
||||
}
|
||||
|
||||
switch v.Status {
|
||||
|
|
|
@ -31,12 +31,15 @@ import (
|
|||
|
||||
"k8s.io/kops/pkg/resources/digitalocean"
|
||||
"k8s.io/kops/protokube/pkg/etcd"
|
||||
"k8s.io/kops/protokube/pkg/gossip"
|
||||
gossipdo "k8s.io/kops/protokube/pkg/gossip/do"
|
||||
)
|
||||
|
||||
const (
|
||||
dropletRegionMetadataURL = "http://169.254.169.254/metadata/v1/region"
|
||||
dropletNameMetadataURL = "http://169.254.169.254/metadata/v1/hostname"
|
||||
dropletIDMetadataURL = "http://169.254.169.254/metadata/v1/id"
|
||||
dropletIDMetadataTags = "http://169.254.169.254/metadata/v1/tags"
|
||||
dropletInternalIPMetadataURL = "http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address"
|
||||
localDevicePrefix = "/dev/disk/by-id/scsi-0DO_Volume_"
|
||||
)
|
||||
|
@ -48,11 +51,38 @@ type DOVolumes struct {
|
|||
region string
|
||||
dropletName string
|
||||
dropletID int
|
||||
dropletTags []string
|
||||
}
|
||||
|
||||
var _ Volumes = &DOVolumes{}
|
||||
|
||||
func NewDOVolumes(clusterID string) (*DOVolumes, error) {
|
||||
func GetClusterID() (string, error) {
|
||||
var clusterID = ""
|
||||
|
||||
dropletTags, err := getMetadataDropletTags()
|
||||
if err != nil {
|
||||
return clusterID, fmt.Errorf("GetClusterID failed - unable to retrieve droplet tags: %s", err)
|
||||
}
|
||||
|
||||
for _, dropletTag := range dropletTags {
|
||||
if strings.Contains(dropletTag, "KubernetesCluster:") {
|
||||
clusterID = strings.Replace(dropletTag, ".", "-", -1)
|
||||
|
||||
tokens := strings.Split(clusterID, ":")
|
||||
if len(tokens) != 2 {
|
||||
return clusterID, fmt.Errorf("invalid clusterID (expected two tokens): %q", clusterID)
|
||||
}
|
||||
|
||||
clusterID := tokens[1]
|
||||
|
||||
return clusterID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return clusterID, fmt.Errorf("failed to get droplet clusterID")
|
||||
}
|
||||
|
||||
func NewDOVolumes() (*DOVolumes, error) {
|
||||
region, err := getMetadataRegion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get droplet region: %s", err)
|
||||
|
@ -78,12 +108,23 @@ func NewDOVolumes(clusterID string) (*DOVolumes, error) {
|
|||
return nil, fmt.Errorf("failed to initialize digitalocean cloud: %s", err)
|
||||
}
|
||||
|
||||
dropletTags, err := getMetadataDropletTags()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get droplet tags: %s", err)
|
||||
}
|
||||
|
||||
clusterID, err := GetClusterID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get clusterID: %s", err)
|
||||
}
|
||||
|
||||
return &DOVolumes{
|
||||
Cloud: cloud,
|
||||
ClusterID: clusterID,
|
||||
dropletID: dropletIDInt,
|
||||
dropletName: dropletName,
|
||||
region: region,
|
||||
dropletTags: dropletTags,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -236,6 +277,20 @@ func getLocalDeviceName(vol *godo.Volume) string {
|
|||
return localDevicePrefix + vol.Name
|
||||
}
|
||||
|
||||
func (d *DOVolumes) GossipSeeds() (gossip.SeedProvider, error) {
|
||||
for _, dropletTag := range d.dropletTags {
|
||||
if strings.Contains(dropletTag, strings.Replace(d.ClusterID, ".", "-", -1)) {
|
||||
return gossipdo.NewSeedProvider(d.Cloud, dropletTag)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not determine a matching droplet tag for gossip seeding")
|
||||
}
|
||||
|
||||
func (d *DOVolumes) InstanceName() string {
|
||||
return d.dropletName
|
||||
}
|
||||
|
||||
// GetDropletInternalIP gets the private IP of the droplet running this program
|
||||
// This function is exported so it can be called from protokube
|
||||
func GetDropletInternalIP() (net.IP, error) {
|
||||
|
@ -259,6 +314,12 @@ func getMetadataDropletID() (string, error) {
|
|||
return getMetadata(dropletIDMetadataURL)
|
||||
}
|
||||
|
||||
func getMetadataDropletTags() ([]string, error) {
|
||||
|
||||
tagString, err := getMetadata(dropletIDMetadataTags)
|
||||
return strings.Split(tagString, "\n"), err
|
||||
}
|
||||
|
||||
func getMetadata(url string) (string, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
|
|
|
@ -178,9 +178,8 @@ func (k *VolumeMountController) safeFormatAndMount(volume *Volume, mountpoint st
|
|||
}
|
||||
|
||||
return fmt.Errorf("found multiple existing mounts of %q at %q", device, mountpoint)
|
||||
} else {
|
||||
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
||||
}
|
||||
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
||||
}
|
||||
|
||||
// If we're containerized we also want to mount the device (again) into our container
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# Vendored from https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.5.4/config/v1.5/aws-k8s-cni.yaml
|
||||
---
|
||||
# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.5.0/config/v1.5/aws-k8s-cni.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -79,7 +78,7 @@ spec:
|
|||
tolerations:
|
||||
- operator: Exists
|
||||
containers:
|
||||
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.4" }}"
|
||||
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.0" }}"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 61678
|
||||
|
|
|
@ -817,6 +817,20 @@ spec:
|
|||
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
|
||||
# kops additions
|
||||
# Set to enable the experimental Prometheus metrics server
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
|
||||
# TCP port that the Prometheus metrics server should bind to
|
||||
- name: FELIX_PROMETHEUSMETRICSPORT
|
||||
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
|
||||
# Enable Prometheus Go runtime metrics collection
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
|
||||
# Enable Prometheus process metrics collection
|
||||
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
|
|
|
@ -257,5 +257,6 @@ spec:
|
|||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
priorityClassName: system-node-critical
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
|
|
|
@ -663,7 +663,7 @@ func awsBuildCloudInstanceGroup(c AWSCloud, ig *kops.InstanceGroup, g *autoscali
|
|||
}
|
||||
// @step: check if the instance is terminating
|
||||
if aws.StringValue(i.LifecycleState) == autoscaling.LifecycleStateTerminating {
|
||||
klog.Warningf("ignoring instance as it is terminating: %s in autoscaling group: %s", id, cg.HumanName)
|
||||
klog.Warningf("ignoring instance as it is terminating: %s in autoscaling group: %s", id, cg.HumanName)
|
||||
continue
|
||||
}
|
||||
currentConfigName := findInstanceLaunchConfiguration(i)
|
||||
|
|
|
@ -85,7 +85,7 @@ func findEtcdStatus(c AWSCloud, cluster *kops.Cluster) ([]kops.EtcdClusterStatus
|
|||
v := aws.StringValue(tag.Value)
|
||||
|
||||
if strings.HasPrefix(k, TagNameEtcdClusterPrefix) {
|
||||
etcdClusterName := strings.TrimPrefix(k, TagNameEtcdClusterPrefix)
|
||||
etcdClusterName = strings.TrimPrefix(k, TagNameEtcdClusterPrefix)
|
||||
etcdClusterSpec, err = etcd.ParseEtcdClusterSpec(etcdClusterName, v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing etcd cluster tag %q on volume %q: %v", v, volumeID, err)
|
||||
|
|
|
@ -696,7 +696,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
"k8s-1.6": "2.3.0-kops.3",
|
||||
"k8s-1.7": "2.5.2-kops.2",
|
||||
"k8s-1.8": "2.5.2-kops.2",
|
||||
"k8s-1.12": "2.5.2-kops.2",
|
||||
"k8s-1.12": "2.5.2-kops.3",
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -828,7 +828,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
"k8s-1.6": "2.6.9-kops.1",
|
||||
"k8s-1.7": "2.6.12-kops.1",
|
||||
"k8s-1.7-v3": "3.8.0-kops.1",
|
||||
"k8s-1.12": "3.9.1-kops.1",
|
||||
"k8s-1.12": "3.9.1-kops.2",
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1050,19 +1050,15 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
|
||||
if b.cluster.Spec.Networking.AmazonVPC != nil {
|
||||
key := "networking.amazon-vpc-routed-eni"
|
||||
versions := map[string]string{
|
||||
"k8s-1.7": "1.5.0-kops.1",
|
||||
"k8s-1.8": "1.5.0-kops.1",
|
||||
"k8s-1.10": "1.5.0-kops.1",
|
||||
"k8s-1.12": "1.5.4-kops.1",
|
||||
}
|
||||
version := "1.5.0-kops.1"
|
||||
|
||||
{
|
||||
id := "k8s-1.7"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0 <1.8.0",
|
||||
|
@ -1076,7 +1072,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.8.0 <1.10.0",
|
||||
|
@ -1090,7 +1086,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.10.0 <1.12.0",
|
||||
|
@ -1104,7 +1100,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
|
|||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.12.0",
|
||||
|
|
|
@ -27,6 +27,7 @@ const TagKubernetesClusterIndex = "k8s-index"
|
|||
const TagNameEtcdClusterPrefix = "etcdCluster-"
|
||||
const TagNameRolePrefix = "k8s.io/role/"
|
||||
const TagKubernetesClusterNamePrefix = "KubernetesCluster"
|
||||
const TagKubernetesClusterMasterPrefix = "KubernetesCluster-Master"
|
||||
|
||||
func SafeClusterName(clusterName string) string {
|
||||
// DO does not support . in tags / names
|
||||
|
|
|
@ -42,7 +42,7 @@ var floatingBackoff = wait.Backoff{
|
|||
Duration: time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.1,
|
||||
Steps: 10,
|
||||
Steps: 20,
|
||||
}
|
||||
|
||||
func (c *openstackCloud) CreateInstance(opt servers.CreateOptsBuilder) (*servers.Server, error) {
|
||||
|
|
|
@ -147,8 +147,8 @@ spec:
|
|||
- id: k8s-1.12
|
||||
kubernetesVersion: '>=1.12.0'
|
||||
manifest: networking.weave/k8s-1.12.yaml
|
||||
manifestHash: 11e566a259bbb5f066cf2b06cd8832e74072a900
|
||||
manifestHash: 96334bfcfa6a3ec9791b50c94674a8821cb6ad67
|
||||
name: networking.weave
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 2.5.2-kops.2
|
||||
version: 2.5.2-kops.3
|
||||
|
|
|
@ -511,12 +511,7 @@ func evaluateHostnameOverride(hostnameOverride string) (string, error) {
|
|||
}
|
||||
instanceID := string(instanceIDBytes)
|
||||
|
||||
hostname := fmt.Sprintf("%s.%s", az, instanceID)
|
||||
if hostname == "" {
|
||||
return "", errors.New("hostname for Alicloud ECS was empty")
|
||||
}
|
||||
|
||||
return hostname, nil
|
||||
return fmt.Sprintf("%s.%s", az, instanceID), nil
|
||||
}
|
||||
|
||||
return hostnameOverride, nil
|
||||
|
|
Loading…
Reference in New Issue