From ef30f0ea49db62cb753ba664f670b55fe3f668ce Mon Sep 17 00:00:00 2001 From: Mike Splain Date: Fri, 10 Nov 2017 11:32:57 -0500 Subject: [PATCH 01/27] Add homebrew devel/HEAD notes to docs and cleanup Added to brew in https://github.com/Homebrew/homebrew-core/pull/20464 --- docs/development/homebrew.md | 34 +++++++++++++++++++++++++++++++++- docs/install.md | 2 ++ docs/update_kops.md | 36 ++++++++++++++++++++++++++++-------- 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/docs/development/homebrew.md b/docs/development/homebrew.md index d9887195ef..9b01223572 100644 --- a/docs/development/homebrew.md +++ b/docs/development/homebrew.md @@ -1,3 +1,26 @@ +# Installing Kops via Hombrew + +Homebrew makes installing kops [very simple for MacOS.](../install.md) +```bash +brew update && brew install kops +``` + +Development Releases and master can also be installed via Homebrew very easily: +```bash +# Development Release +brew update && brew install kops --devel +# HEAD of master +brew update && brew install kops --HEAD +``` + +Note: if you already have kops installed, you need to substitute `upgrade` for `install`. + +You can switch between development and stable releases with: +```bash +brew switch kops 1.7.1 +brew switch kops 1.8.0-beta.1 +``` + # Releasing kops to Brew Submitting a new release of kops to Homebrew is very simple. @@ -8,11 +31,20 @@ Submitting a new release of kops to Homebrew is very simple. This will automatically update the provided fields and open a PR for you. More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb) +We now include both major and development releases in homebrew. A development version can be updated by adding the `--devel` flag. + Example usage: -``` +```bash +# Major Version brew bump-formula-pr \ --url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \ --sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a + +# Development Version +brew bump-formula-pr \ + --devel \ + --url=https://github.com/kubernetes/kops/archive/1.8.0-beta.1.tar.gz \ + --sha256=81026d6c1cd7b3898a88275538a7842b4bd8387775937e0528ccb7b83948abf1 ``` * Update the URL variable to the tar.gz of the new release source code diff --git a/docs/install.md b/docs/install.md index 7143f78dc9..f13d382a8a 100644 --- a/docs/install.md +++ b/docs/install.md @@ -8,6 +8,8 @@ From Homebrew: brew update && brew install kops ``` +Developers can also easily install [development releases](development/homebrew.md). + From Github: ```bash diff --git a/docs/update_kops.md b/docs/update_kops.md index 04c03ef485..d3b3831e6a 100644 --- a/docs/update_kops.md +++ b/docs/update_kops.md @@ -1,13 +1,33 @@ -## How to update Kops - Kubernetes Ops +# Updating kops (Binaries) -Update the latest source code from kubernetes/kops +## MacOS -``` -cd ${GOPATH}/src/k8s.io/kops/ -git pull && make -``` +From Homebrew: -Alternatively, if you installed from Homebrew -``` +```bash brew update && brew upgrade kops ``` + +From Github: + +```bash +rm -rf /usr/local/bin/kops +wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64 +chmod +x ./kops +sudo mv ./kops /usr/local/bin/ +``` + +You can also rerun rerun [these steps](development/building.md) if previously built from source. + +## Linux + +From Github: + +```bash +rm -rf /usr/local/bin/kops +wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64 +chmod +x ./kops +sudo mv ./kops /usr/local/bin/ +``` + +You can also rerun rerun [these steps](development/building.md) if previously built from source. From 04ef7ee0a335e1b686a10838e1792d807e98adc9 Mon Sep 17 00:00:00 2001 From: Caleb Gilmour Date: Thu, 16 Nov 2017 23:37:04 +0000 Subject: [PATCH 02/27] Version and validation updates for romana networking. --- pkg/apis/kops/validation/legacy.go | 4 ++-- ...s-1.6.yaml.template => k8s-1.7.yaml.template} | 16 +++++++++++----- upup/pkg/fi/cloudup/bootstrapchannelbuilder.go | 8 ++++---- 3 files changed, 17 insertions(+), 11 deletions(-) rename upup/models/cloudup/resources/addons/networking.romana/{k8s-1.6.yaml.template => k8s-1.7.yaml.template} (94%) diff --git a/pkg/apis/kops/validation/legacy.go b/pkg/apis/kops/validation/legacy.go index c9a1b55072..2f8769c912 100644 --- a/pkg/apis/kops/validation/legacy.go +++ b/pkg/apis/kops/validation/legacy.go @@ -469,9 +469,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error { } } - if kubernetesRelease.LT(semver.MustParse("1.6.0")) { + if kubernetesRelease.LT(semver.MustParse("1.7.0")) { if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil { - return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.5 or lower") + return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.6 or lower") } } diff --git a/upup/models/cloudup/resources/addons/networking.romana/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.romana/k8s-1.7.yaml.template similarity index 94% rename from upup/models/cloudup/resources/addons/networking.romana/k8s-1.6.yaml.template rename to upup/models/cloudup/resources/addons/networking.romana/k8s-1.7.yaml.template index 86a6c42b99..39ab2ea366 100644 --- a/upup/models/cloudup/resources/addons/networking.romana/k8s-1.6.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.romana/k8s-1.7.yaml.template @@ -137,7 +137,7 @@ spec: effect: NoSchedule containers: - name: romana-daemon - image: quay.io/romana/daemon:v2.0-preview.2 + image: quay.io/romana/daemon:v2.0.0 imagePullPolicy: Always resources: requests: @@ -170,7 +170,7 @@ spec: effect: NoSchedule containers: - name: romana-listener - image: quay.io/romana/listener:v2.0-preview.2 + image: quay.io/romana/listener:v2.0.0 imagePullPolicy: Always resources: requests: @@ -185,6 +185,8 @@ metadata: name: romana-agent namespace: kube-system spec: + updateStrategy: + type: RollingUpdate template: metadata: labels: @@ -200,7 +202,7 @@ spec: effect: NoSchedule containers: - name: romana-agent - image: quay.io/romana/agent:v2.0-preview.2 + image: quay.io/romana/agent:v2.0.0 imagePullPolicy: Always resources: requests: @@ -213,6 +215,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: NODEIP + valueFrom: + fieldRef: + fieldPath: status.hostIP args: - --service-cluster-ip-range={{ .ServiceClusterIPRange }} securityContext: @@ -299,7 +305,7 @@ spec: effect: NoSchedule containers: - name: romana-aws - image: quay.io/romana/aws:v2.0-preview.2 + image: quay.io/romana/aws:v2.0.0 imagePullPolicy: Always resources: requests: @@ -328,7 +334,7 @@ spec: effect: NoSchedule containers: - name: romana-vpcrouter - image: quay.io/romana/vpcrouter-romana-plugin + image: quay.io/romana/vpcrouter-romana-plugin:1.1.12 imagePullPolicy: Always resources: requests: diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 2255c2162e..f14e203a34 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -598,18 +598,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri if b.cluster.Spec.Networking.Romana != nil { key := "networking.romana" - version := "v2.0-preview.3" + version := "v2.0.0" { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" + location := key + "/k8s-1.7.yaml" + id := "k8s-1.7" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ Name: fi.String(key), Version: fi.String(version), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0", + KubernetesVersion: ">=1.7.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location From df550ec2b5a44f3f73d1216cc4986f61a345001e Mon Sep 17 00:00:00 2001 From: georgebuckerfield Date: Fri, 17 Nov 2017 17:27:16 +0000 Subject: [PATCH 03/27] Set the Shared field of RouteTable resources when listing --- pkg/resources/aws.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/resources/aws.go b/pkg/resources/aws.go index 571e54dd39..45352b9dba 100644 --- a/pkg/resources/aws.go +++ b/pkg/resources/aws.go @@ -220,7 +220,7 @@ func addUntaggedRouteTables(cloud awsup.AWSCloud, clusterName string, resources continue } - t := buildTrackerForRouteTable(rt) + t := buildTrackerForRouteTable(rt, clusterName) if resources[t.Type+":"+t.ID] == nil { resources[t.Type+":"+t.ID] = t } @@ -973,19 +973,20 @@ func ListRouteTables(cloud fi.Cloud, clusterName string) ([]*Resource, error) { var resourceTrackers []*Resource for _, rt := range routeTables { - resourceTracker := buildTrackerForRouteTable(rt) + resourceTracker := buildTrackerForRouteTable(rt, clusterName) resourceTrackers = append(resourceTrackers, resourceTracker) } return resourceTrackers, nil } -func buildTrackerForRouteTable(rt *ec2.RouteTable) *Resource { +func buildTrackerForRouteTable(rt *ec2.RouteTable, clusterName string) *Resource { resourceTracker := &Resource{ Name: FindName(rt.Tags), ID: aws.StringValue(rt.RouteTableId), Type: ec2.ResourceTypeRouteTable, Deleter: DeleteRouteTable, + Shared: HasSharedTag(ec2.ResourceTypeRouteTable+":"+*rt.RouteTableId, rt.Tags, clusterName), } var blocks []string From eab351c9131abb7d5cfc548e48ee727824213a3e Mon Sep 17 00:00:00 2001 From: georgebuckerfield Date: Sat, 18 Nov 2017 12:34:39 +0000 Subject: [PATCH 04/27] Add tests for ListRouteTables function --- pkg/resources/aws_test.go | 52 +++++++++++++++++++++ upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go | 7 ++- 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/pkg/resources/aws_test.go b/pkg/resources/aws_test.go index fe5d4d0b42..6caf3203c8 100644 --- a/pkg/resources/aws_test.go +++ b/pkg/resources/aws_test.go @@ -88,3 +88,55 @@ func TestAddUntaggedRouteTables(t *testing.T) { t.Fatalf("expected=%q, actual=%q", expected, keys) } } + +func TestListRouteTables(t *testing.T) { + cloud := awsup.BuildMockAWSCloud("us-east-1", "abc") + //resources := make(map[string]*Resource) + clusterName := "me.example.com" + ownershipTagKey := "kubernetes.io/cluster/" + clusterName + + c := &mockec2.MockEC2{} + cloud.MockEC2 = c + + c.RouteTables = append(c.RouteTables, &ec2.RouteTable{ + VpcId: aws.String("vpc-1234"), + RouteTableId: aws.String("rt-shared"), + Tags: []*ec2.Tag{ + { + Key: aws.String("KubernetesCluster"), + Value: aws.String(clusterName), + }, + { + Key: aws.String(ownershipTagKey), + Value: aws.String("shared"), + }, + }, + }) + c.RouteTables = append(c.RouteTables, &ec2.RouteTable{ + VpcId: aws.String("vpc-1234"), + RouteTableId: aws.String("rt-owned"), + Tags: []*ec2.Tag{ + { + Key: aws.String("KubernetesCluster"), + Value: aws.String(clusterName), + }, + { + Key: aws.String(ownershipTagKey), + Value: aws.String("owned"), + }, + }, + }) + + resources, err := ListRouteTables(cloud, clusterName) + if err != nil { + t.Fatalf("error listing route tables: %v", err) + } + for _, rt := range resources { + if rt.ID == "rt-shared" && !rt.Shared { + t.Fatalf("expected Shared: true, got: %v", rt.Shared) + } + if rt.ID == "rt-owned" && rt.Shared { + t.Fatalf("expected Shared: false, got: %v", rt.Shared) + } + } +} diff --git a/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go b/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go index 1501206699..027880e045 100644 --- a/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go @@ -133,8 +133,11 @@ func (c *MockAWSCloud) BuildTags(name *string) map[string]string { } func (c *MockAWSCloud) Tags() map[string]string { - glog.Fatalf("MockAWSCloud Tags not implemented") - return nil + tags := make(map[string]string) + for k, v := range c.tags { + tags[k] = v + } + return tags } func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error { From f1d673f77e121ed90e31176e43501e59f24c5b5c Mon Sep 17 00:00:00 2001 From: zengchen1024 Date: Mon, 20 Nov 2017 09:42:48 +0800 Subject: [PATCH 05/27] implement volume task --- upup/pkg/fi/cloudup/openstack/cloud.go | 141 +++++++++++++++++- upup/pkg/fi/cloudup/openstacktasks/volume.go | 145 +++++++++++++++++++ 2 files changed, 284 insertions(+), 2 deletions(-) create mode 100644 upup/pkg/fi/cloudup/openstacktasks/volume.go diff --git a/upup/pkg/fi/cloudup/openstack/cloud.go b/upup/pkg/fi/cloudup/openstack/cloud.go index e5628da6e0..10ebd37150 100644 --- a/upup/pkg/fi/cloudup/openstack/cloud.go +++ b/upup/pkg/fi/cloudup/openstack/cloud.go @@ -18,25 +18,86 @@ package openstack import ( "fmt" + "time" + "github.com/golang/glog" + "github.com/gophercloud/gophercloud" + os "github.com/gophercloud/gophercloud/openstack" + cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/util/pkg/vfs" "k8s.io/kubernetes/federation/pkg/dnsprovider" ) +// readBackoff is the backoff strategy for openstack read retries. +var readBackoff = wait.Backoff{ + Duration: time.Second, + Factor: 1.5, + Jitter: 0.1, + Steps: 4, +} + +// writeBackoff is the backoff strategy for openstack write retries. +var writeBackoff = wait.Backoff{ + Duration: time.Second, + Factor: 1.5, + Jitter: 0.1, + Steps: 5, +} + type OpenstackCloud interface { fi.Cloud + + // SetVolumeTags will set the tags for the Cinder volume + SetVolumeTags(id string, tags map[string]string) error + + // GetCloudTags will return the tags attached on cloud + GetCloudTags() map[string]string + + // ListVolumes will return the Cinder volumes which match the options + ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error) + + // CreateVolume will create a new Cinder Volume + CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error) } type openstackCloud struct { + cinderClient *gophercloud.ServiceClient + tags map[string]string } var _ fi.Cloud = &openstackCloud{} -func NewOpenstackCloud() (OpenstackCloud, error) { - return &openstackCloud{}, nil +func NewOpenstackCloud(tags map[string]string) (OpenstackCloud, error) { + config := vfs.OpenstackConfig{} + + authOption, err := config.GetCredential() + if err != nil { + return nil, err + } + provider, err := os.AuthenticatedClient(authOption) + if err != nil { + return nil, fmt.Errorf("error building openstack authenticated client: %v", err) + } + + endpointOpt, err := config.GetServiceConfig("Cinder") + if err != nil { + return nil, err + } + cinderClient, err := os.NewBlockStorageV2(provider, endpointOpt) + if err != nil { + return nil, fmt.Errorf("error building swift client: %v", err) + } + + c := &openstackCloud{ + cinderClient: cinderClient, + tags: tags, + } + return c, nil } func (c *openstackCloud) ProviderID() kops.CloudProviderID { @@ -62,3 +123,79 @@ func (c *openstackCloud) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) { return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented") } + +func (c *openstackCloud) SetVolumeTags(id string, tags map[string]string) error { + if len(tags) == 0 { + return nil + } + if id == "" { + return fmt.Errorf("error setting tags to unknown volume") + } + glog.V(4).Infof("setting tags to cinder volume %q: %v", id, tags) + + opt := cinder.UpdateOpts{Metadata: tags} + done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) { + _, err := cinder.Update(c.cinderClient, id, opt).Extract() + if err != nil { + return false, fmt.Errorf("error setting tags to cinder volume %q: %v", id, err) + } + return true, nil + }) + if err != nil { + return err + } else if done { + return nil + } else { + return wait.ErrWaitTimeout + } +} + +func (c *openstackCloud) GetCloudTags() map[string]string { + return c.tags +} + +func (c *openstackCloud) ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error) { + var volumes []cinder.Volume + + done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) { + allPages, err := cinder.List(c.cinderClient, opt).AllPages() + if err != nil { + return false, fmt.Errorf("error listing volumes %v: %v", opt, err) + } + + vs, err := cinder.ExtractVolumes(allPages) + if err != nil { + return false, fmt.Errorf("error extracting volumes: %v", err) + } + volumes = vs + return true, nil + }) + if err != nil { + return volumes, err + } else if done { + return volumes, nil + } else { + return volumes, wait.ErrWaitTimeout + } +} + +func (c *openstackCloud) CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error) { + var volume *cinder.Volume + + done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) { + v, err := cinder.Create(c.cinderClient, opt).Extract() + if err != nil { + return false, fmt.Errorf("error creating volume %v: %v", opt, err) + } + volume = v + return true, nil + }) + if err != nil { + return volume, err + } else if done { + return volume, nil + } else { + return volume, wait.ErrWaitTimeout + } + +} diff --git a/upup/pkg/fi/cloudup/openstacktasks/volume.go b/upup/pkg/fi/cloudup/openstacktasks/volume.go new file mode 100644 index 0000000000..f3c001c2f3 --- /dev/null +++ b/upup/pkg/fi/cloudup/openstacktasks/volume.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstacktasks + +import ( + "fmt" + + "github.com/golang/glog" + cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/cloudup/openstack" +) + +type Volume struct { + ID *string + Name *string + AvailabilityZone *string + VolumeType *string + SizeGB *int64 + Tags map[string]string + Lifecycle *fi.Lifecycle +} + +var _ fi.CompareWithID = &Volume{} + +func (c *Volume) CompareWithID() *string { + return c.ID +} + +func (c *Volume) Find(context *fi.Context) (*Volume, error) { + cloud := context.Cloud.(openstack.OpenstackCloud) + opt := cinder.ListOpts{ + Name: fi.StringValue(c.Name), + Metadata: cloud.GetCloudTags(), + } + volumes, err := cloud.ListVolumes(opt) + if err != nil { + return nil, err + } + n := len(volumes) + if n == 0 { + return nil, nil + } else if n != 1 { + return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.StringValue(c.Name)) + } + v := volumes[0] + actual := &Volume{ + ID: fi.String(v.ID), + Name: fi.String(v.Name), + AvailabilityZone: fi.String(v.AvailabilityZone), + VolumeType: fi.String(v.VolumeType), + SizeGB: fi.Int64(int64(v.Size)), + Tags: v.Metadata, + Lifecycle: c.Lifecycle, + } + return actual, nil +} + +func (c *Volume) Run(context *fi.Context) error { + cloud := context.Cloud.(openstack.OpenstackCloud) + for k, v := range cloud.GetCloudTags() { + c.Tags[k] = v + } + + return fi.DefaultDeltaRunMethod(c, context) +} + +func (_ *Volume) CheckChanges(a, e, changes *Volume) error { + if a == nil { + if e.Name == nil { + return fi.RequiredField("Name") + } + if e.AvailabilityZone == nil { + return fi.RequiredField("AvailabilityZone") + } + if e.VolumeType == nil { + return fi.RequiredField("VolumeType") + } + if e.SizeGB == nil { + return fi.RequiredField("SizeGB") + } + } else { + if changes.ID != nil { + return fi.CannotChangeField("ID") + } + if changes.AvailabilityZone != nil { + return fi.CannotChangeField("AvailabilityZone") + } + if changes.VolumeType != nil { + return fi.CannotChangeField("VolumeType") + } + if changes.SizeGB != nil { + return fi.CannotChangeField("SizeGB") + } + } + return nil +} + +func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Volume) error { + if a == nil { + glog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.StringValue(e.Name)) + + opt := cinder.CreateOpts{ + Size: int(*e.SizeGB), + AvailabilityZone: fi.StringValue(e.AvailabilityZone), + Metadata: e.Tags, + Name: fi.StringValue(e.Name), + VolumeType: fi.StringValue(e.VolumeType), + } + + v, err := t.Cloud.CreateVolume(opt) + if err != nil { + return fmt.Errorf("error creating PersistentVolume: %v", err) + } + + e.ID = fi.String(v.ID) + return nil + } + + if changes != nil && changes.Tags != nil { + glog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.StringValue(e.ID), e.Tags, changes.Tags) + + err := t.Cloud.SetVolumeTags(fi.StringValue(e.ID), e.Tags) + if err != nil { + return fmt.Errorf("error updating the tags on volume %q: %v", fi.StringValue(e.ID), err) + } + } + + glog.V(2).Infof("Openstack task Volume::RenderOpenstack did nothing") + return nil +} From 0949d597b74ba58f53cf7b92ec9665dac669163b Mon Sep 17 00:00:00 2001 From: zengchen1024 Date: Mon, 20 Nov 2017 11:27:29 +0800 Subject: [PATCH 06/27] build volume task --- hack/.packages | 1 + pkg/model/master_volumes.go | 37 ++++++++++++++++++++++++++ upup/pkg/fi/cloudup/openstack/cloud.go | 7 +++-- upup/pkg/fi/cloudup/utils.go | 3 ++- 4 files changed, 45 insertions(+), 3 deletions(-) diff --git a/hack/.packages b/hack/.packages index 15cda1be58..9620d25dff 100644 --- a/hack/.packages +++ b/hack/.packages @@ -123,6 +123,7 @@ k8s.io/kops/upup/pkg/fi/cloudup/dotasks k8s.io/kops/upup/pkg/fi/cloudup/gce k8s.io/kops/upup/pkg/fi/cloudup/gcetasks k8s.io/kops/upup/pkg/fi/cloudup/openstack +k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks k8s.io/kops/upup/pkg/fi/cloudup/terraform k8s.io/kops/upup/pkg/fi/cloudup/vsphere k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks diff --git a/pkg/model/master_volumes.go b/pkg/model/master_volumes.go index cebd12d8c1..cd3545fff9 100644 --- a/pkg/model/master_volumes.go +++ b/pkg/model/master_volumes.go @@ -30,6 +30,8 @@ import ( "k8s.io/kops/upup/pkg/fi/cloudup/dotasks" "k8s.io/kops/upup/pkg/fi/cloudup/gce" "k8s.io/kops/upup/pkg/fi/cloudup/gcetasks" + "k8s.io/kops/upup/pkg/fi/cloudup/openstack" + "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks" ) const ( @@ -95,6 +97,11 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error { b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers) case kops.CloudProviderBareMetal: glog.Fatalf("BareMetal not implemented") + case kops.CloudProviderOpenstack: + err = b.addOpenstackVolume(c, name, volumeSize, zone, etcd, m, allMembers) + if err != nil { + return err + } default: return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider) } @@ -205,3 +212,33 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) { fmt.Print("addVSphereVolume to be implemented") } + +func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) error { + volumeType := fi.StringValue(m.VolumeType) + if volumeType == "" { + return fmt.Errorf("must set ETCDMemberSpec.VolumeType on Openstack platform") + } + + // The tags are how protokube knows to mount the volume and use it for etcd + tags := make(map[string]string) + // Apply all user defined labels on the volumes + for k, v := range b.Cluster.Spec.CloudLabels { + tags[k] = v + } + // This is the configuration of the etcd cluster + tags[openstack.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",") + // This says "only mount on a master" + tags[openstack.TagNameRolePrefix+"master"] = "1" + + t := &openstacktasks.Volume{ + Name: s(name), + AvailabilityZone: s(zone), + VolumeType: s(volumeType), + SizeGB: fi.Int64(int64(volumeSize)), + Tags: tags, + Lifecycle: b.Lifecycle, + } + c.AddTask(t) + + return nil +} diff --git a/upup/pkg/fi/cloudup/openstack/cloud.go b/upup/pkg/fi/cloudup/openstack/cloud.go index 10ebd37150..bee8abf856 100644 --- a/upup/pkg/fi/cloudup/openstack/cloud.go +++ b/upup/pkg/fi/cloudup/openstack/cloud.go @@ -33,6 +33,10 @@ import ( "k8s.io/kubernetes/federation/pkg/dnsprovider" ) +const TagNameEtcdClusterPrefix = "k8s.io/etcd/" +const TagNameRolePrefix = "k8s.io/role/" +const TagClusterName = "KubernetesCluster" + // readBackoff is the backoff strategy for openstack read retries. var readBackoff = wait.Backoff{ Duration: time.Second, @@ -165,7 +169,7 @@ func (c *openstackCloud) ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, erro vs, err := cinder.ExtractVolumes(allPages) if err != nil { - return false, fmt.Errorf("error extracting volumes: %v", err) + return false, fmt.Errorf("error extracting volumes from pages: %v", err) } volumes = vs return true, nil @@ -197,5 +201,4 @@ func (c *openstackCloud) CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, er } else { return volume, wait.ErrWaitTimeout } - } diff --git a/upup/pkg/fi/cloudup/utils.go b/upup/pkg/fi/cloudup/utils.go index 2b207902d6..5d203bf212 100644 --- a/upup/pkg/fi/cloudup/utils.go +++ b/upup/pkg/fi/cloudup/utils.go @@ -133,7 +133,8 @@ func BuildCloud(cluster *kops.Cluster) (fi.Cloud, error) { } case kops.CloudProviderOpenstack: { - osc, err := openstack.NewOpenstackCloud() + cloudTags := map[string]string{openstack.TagClusterName: cluster.ObjectMeta.Name} + osc, err := openstack.NewOpenstackCloud(cloudTags) if err != nil { return nil, err } From 4816ed5e360d05643ddedb035383981635b83aa0 Mon Sep 17 00:00:00 2001 From: Rohith Date: Fri, 10 Nov 2017 12:07:01 +0000 Subject: [PATCH 07/27] DNS Controller Optional The current implementation requires enforces a dns-controller is running; given the user can switch the make the kube-apiserver server Internal and then reuse the dns for the masterInternalName; this effectlively removes the need to run the service (assuming your not using it for pods, node and service dns) - adding a disableDnsController to the ExternalDNS spec provides a toggle on the addon (name is definitely up for debate) - the default behaviour remains, the dns-controller is always pushed as an addon --- pkg/apis/kops/cluster.go | 7 ++- pkg/apis/kops/v1alpha1/cluster.go | 7 ++- .../kops/v1alpha1/zz_generated.conversion.go | 2 + .../kops/v1alpha1/zz_generated.deepcopy.go | 3 +- pkg/apis/kops/v1alpha2/cluster.go | 7 ++- .../kops/v1alpha2/zz_generated.conversion.go | 4 ++ .../kops/v1alpha2/zz_generated.deepcopy.go | 21 ++++++- pkg/apis/kops/zz_generated.deepcopy.go | 3 +- .../clientset/scheme/register.go | 3 +- .../internalclientset/scheme/register.go | 3 +- .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 62 ++++++++++--------- 11 files changed, 81 insertions(+), 41 deletions(-) diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index a182961634..81686cb9e3 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -240,8 +240,11 @@ type RBACAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct { } +// AccessSpec provides configuration details related to kubeapi dns and ELB access type AccessSpec struct { - DNS *DNSAccessSpec `json:"dns,omitempty"` + // DNS wil be used to provide config on kube-apiserver elb dns + DNS *DNSAccessSpec `json:"dns,omitempty"` + // LoadBalancer is the configuration for the kube-apiserver ELB LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` } @@ -281,6 +284,8 @@ type KubeDNSConfig struct { // ExternalDNSConfig are options of the dns-controller type ExternalDNSConfig struct { + // Disable indicates we do not wish to run the dns-controller addon + Disable bool `json:"disable,omitempty"` // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources WatchIngress *bool `json:"watchIngress,omitempty"` // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) diff --git a/pkg/apis/kops/v1alpha1/cluster.go b/pkg/apis/kops/v1alpha1/cluster.go index 10f6682926..f9dd195a83 100644 --- a/pkg/apis/kops/v1alpha1/cluster.go +++ b/pkg/apis/kops/v1alpha1/cluster.go @@ -239,8 +239,11 @@ type RBACAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct { } +// AccessSpec provides configuration details related to kubeapi dns and ELB access type AccessSpec struct { - DNS *DNSAccessSpec `json:"dns,omitempty"` + // DNS wil be used to provide config on kube-apiserver elb dns + DNS *DNSAccessSpec `json:"dns,omitempty"` + // LoadBalancer is the configuration for the kube-apiserver ELB LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` } @@ -280,6 +283,8 @@ type KubeDNSConfig struct { // ExternalDNSConfig are options of the dns-controller type ExternalDNSConfig struct { + // Disable indicates we do not wish to run the dns-controller addon + Disable bool `json:"disable,omitempty"` // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources WatchIngress *bool `json:"watchIngress,omitempty"` // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index 763296e624..f1f6e123db 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -1264,6 +1264,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha1_ExecContainerAction(in *kops.E } func autoConvert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error { + out.Disable = in.Disable out.WatchIngress = in.WatchIngress out.WatchNamespace = in.WatchNamespace return nil @@ -1275,6 +1276,7 @@ func Convert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN } func autoConvert_kops_ExternalDNSConfig_To_v1alpha1_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error { + out.Disable = in.Disable out.WatchIngress = in.WatchIngress out.WatchNamespace = in.WatchNamespace return nil diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index 31e4c7d558..60dc6022a7 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -21,11 +21,10 @@ limitations under the License. package v1alpha1 import ( - reflect "reflect" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" ) func init() { diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 475248d8e2..06c4c51d83 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -240,8 +240,11 @@ type RBACAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct { } +// AccessSpec provides configuration details related to kubeapi dns and ELB access type AccessSpec struct { - DNS *DNSAccessSpec `json:"dns,omitempty"` + // DNS wil be used to provide config on kube-apiserver elb dns + DNS *DNSAccessSpec `json:"dns,omitempty"` + // LoadBalancer is the configuration for the kube-apiserver ELB LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` } @@ -278,6 +281,8 @@ type KubeDNSConfig struct { // ExternalDNSConfig are options of the dns-controller type ExternalDNSConfig struct { + // Disable indicates we do not wish to run the dns-controller addon + Disable bool `json:"disable,omitempty"` // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources WatchIngress *bool `json:"watchIngress,omitempty"` // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index e5d12cd7c8..c86c197cca 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -1373,6 +1373,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha2_ExecContainerAction(in *kops.E } func autoConvert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error { + out.Disable = in.Disable out.WatchIngress = in.WatchIngress out.WatchNamespace = in.WatchNamespace return nil @@ -1384,6 +1385,7 @@ func Convert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN } func autoConvert_kops_ExternalDNSConfig_To_v1alpha2_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error { + out.Disable = in.Disable out.WatchIngress = in.WatchIngress out.WatchNamespace = in.WatchNamespace return nil @@ -2202,6 +2204,8 @@ func autoConvert_v1alpha2_KubeControllerManagerConfig_To_kops_KubeControllerMana out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold out.UseServiceAccountCredentials = in.UseServiceAccountCredentials out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod + out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay + out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay out.FeatureGates = in.FeatureGates return nil } diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 48f447ae07..d6a4c85b01 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -21,11 +21,10 @@ limitations under the License. package v1alpha2 import ( - reflect "reflect" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" ) func init() { @@ -2282,6 +2281,24 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo **out = **in } } + if in.HorizontalPodAutoscalerDownscaleDelay != nil { + in, out := &in.HorizontalPodAutoscalerDownscaleDelay, &out.HorizontalPodAutoscalerDownscaleDelay + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } + if in.HorizontalPodAutoscalerUpscaleDelay != nil { + in, out := &in.HorizontalPodAutoscalerUpscaleDelay, &out.HorizontalPodAutoscalerUpscaleDelay + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } if in.FeatureGates != nil { in, out := &in.FeatureGates, &out.FeatureGates *out = make(map[string]string, len(*in)) diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index baadd0833e..e464bcf38b 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -21,11 +21,10 @@ limitations under the License. package kops import ( - reflect "reflect" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" ) func init() { diff --git a/pkg/client/clientset_generated/clientset/scheme/register.go b/pkg/client/clientset_generated/clientset/scheme/register.go index 3284ad953b..0c46a3329c 100644 --- a/pkg/client/clientset_generated/clientset/scheme/register.go +++ b/pkg/client/clientset_generated/clientset/scheme/register.go @@ -17,8 +17,6 @@ limitations under the License. package scheme import ( - os "os" - announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,6 +24,7 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" kops "k8s.io/kops/pkg/apis/kops/install" + os "os" ) var Scheme = runtime.NewScheme() diff --git a/pkg/client/clientset_generated/internalclientset/scheme/register.go b/pkg/client/clientset_generated/internalclientset/scheme/register.go index 3284ad953b..0c46a3329c 100644 --- a/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -17,8 +17,6 @@ limitations under the License. package scheme import ( - os "os" - announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,6 +24,7 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" kops "k8s.io/kops/pkg/apis/kops/install" + os "os" ) var Scheme = runtime.NewScheme() diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 2255c2162e..d15b8b42ec 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -29,6 +29,7 @@ import ( "k8s.io/kops/upup/pkg/fi/utils" ) +// BootstrapChannelBuilder is responsible for handling the addons in channels type BootstrapChannelBuilder struct { cluster *kops.Cluster Lifecycle *fi.Lifecycle @@ -38,6 +39,7 @@ type BootstrapChannelBuilder struct { var _ fi.ModelBuilder = &BootstrapChannelBuilder{} +// Build is responsible for adding the addons to the channel func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { addons, manifests, err := b.buildManifest() @@ -184,38 +186,42 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri manifests[key] = "addons/" + location } - { - key := "dns-controller.addons.k8s.io" - version := "1.8.0-beta.1" - + // @check the dns-controller has not been disabled + externalDNS := b.cluster.Spec.ExternalDNS + if externalDNS == nil || !externalDNS.Disable { { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" + key := "dns-controller.addons.k8s.io" + version := "1.8.0-beta.1" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - manifests[key+"-"+id] = "addons/" + location - } + { + location := key + "/pre-k8s-1.6.yaml" + id := "pre-k8s-1.6" - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: "<1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0", - Id: id, - }) - manifests[key+"-"+id] = "addons/" + location + { + location := key + "/k8s-1.6.yaml" + id := "k8s-1.6" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: ">=1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } } } From a245e88c01be9163e379005ae683ae7f759df5cb Mon Sep 17 00:00:00 2001 From: Moshe-Immerman Date: Tue, 21 Nov 2017 06:25:54 +0200 Subject: [PATCH 08/27] fix: paths in cluster_template.md --- docs/cluster_template.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/cluster_template.md b/docs/cluster_template.md index d3db9a79f5..be811014ef 100644 --- a/docs/cluster_template.md +++ b/docs/cluster_template.md @@ -1,6 +1,6 @@ # Cluster Templating -The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [/cli/kops_replace.md](/cli/kops_replace.md)). +The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [cli/kops_replace.md](cli/kops_replace.md)). It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)). @@ -45,7 +45,7 @@ Running `kops toolbox template` replaces the placeholders in the template by val Note: when creating a cluster desired configuration template, you can -- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cli/cluster_spec.md](cli/cluster_spec.md). +- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cluster_spec.md](cluster_spec.md). - replace values by placeholders in that file to create the template. ### Templates From 79eef3cc6cca635f082fd05bd2b017d8a6717a38 Mon Sep 17 00:00:00 2001 From: Kashif Saadat Date: Tue, 21 Nov 2017 11:47:03 +0000 Subject: [PATCH 09/27] Support replacing kops secrets via force flag --- cmd/kops/create_secret_dockerconfig.go | 23 +++++++++++--- cmd/kops/create_secret_encryptionconfig.go | 21 +++++++++++-- docs/cli/kops_create_secret_dockerconfig.md | 4 +++ .../kops_create_secret_encryptionconfig.md | 4 +++ upup/pkg/fi/secrets.go | 4 ++- upup/pkg/fi/secrets/clientset_secretstore.go | 24 ++++++++++++-- upup/pkg/fi/secrets/vfs_secretstore.go | 31 +++++++++++++++++-- 7 files changed, 97 insertions(+), 14 deletions(-) diff --git a/cmd/kops/create_secret_dockerconfig.go b/cmd/kops/create_secret_dockerconfig.go index ae54588e58..708b3ef6bf 100644 --- a/cmd/kops/create_secret_dockerconfig.go +++ b/cmd/kops/create_secret_dockerconfig.go @@ -32,7 +32,7 @@ import ( var ( create_secret_dockerconfig_long = templates.LongDesc(i18n.T(` - Create a new docker config, and store it in the state store. + Create a new docker config, and store it in the state store. Used to configure docker on each master or node (ie. for auth) Use update to modify it, this command will only create a new entry.`)) @@ -40,6 +40,9 @@ var ( # Create an new docker config. kops create secret dockerconfig -f /path/to/docker/config.json \ --name k8s-cluster.example.com --state s3://example.com + # Replace an existing docker config secret. + kops create secret dockerconfig -f /path/to/docker/config.json --force \ + --name k8s-cluster.example.com --state s3://example.com `)) create_secret_dockerconfig_short = i18n.T(`Create a docker config.`) @@ -48,6 +51,7 @@ var ( type CreateSecretDockerConfigOptions struct { ClusterName string DockerConfigPath string + Force bool } func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command { @@ -78,6 +82,7 @@ func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Comma } cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file") + cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists") return cmd } @@ -119,9 +124,19 @@ func RunCreateSecretDockerConfig(f *util.Factory, out io.Writer, options *Create secret.Data = data - _, _, err = secretStore.GetOrCreateSecret("dockerconfig", secret) - if err != nil { - return fmt.Errorf("error adding docker config secret: %v", err) + if !options.Force { + _, created, err := secretStore.GetOrCreateSecret("dockerconfig", secret) + if err != nil { + return fmt.Errorf("error adding dockerconfig secret: %v", err) + } + if !created { + return fmt.Errorf("failed to create the dockerconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.") + } + } else { + _, err := secretStore.ReplaceSecret("dockerconfig", secret) + if err != nil { + return fmt.Errorf("error updating dockerconfig secret: %v", err) + } } return nil diff --git a/cmd/kops/create_secret_encryptionconfig.go b/cmd/kops/create_secret_encryptionconfig.go index a2df122272..6c8bd56b80 100644 --- a/cmd/kops/create_secret_encryptionconfig.go +++ b/cmd/kops/create_secret_encryptionconfig.go @@ -40,6 +40,9 @@ var ( # Create a new encryption config. kops create secret encryptionconfig -f config.yaml \ --name k8s-cluster.example.com --state s3://example.com + # Replace an existing encryption config secret. + kops create secret encryptionconfig -f config.yaml --force \ + --name k8s-cluster.example.com --state s3://example.com `)) create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`) @@ -48,6 +51,7 @@ var ( type CreateSecretEncryptionConfigOptions struct { ClusterName string EncryptionConfigPath string + Force bool } func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command { @@ -78,6 +82,7 @@ func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.C } cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file") + cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists") return cmd } @@ -120,9 +125,19 @@ func RunCreateSecretEncryptionConfig(f *util.Factory, out io.Writer, options *Cr secret.Data = data - _, _, err = secretStore.GetOrCreateSecret("encryptionconfig", secret) - if err != nil { - return fmt.Errorf("error adding encryption config secret: %v", err) + if !options.Force { + _, created, err := secretStore.GetOrCreateSecret("encryptionconfig", secret) + if err != nil { + return fmt.Errorf("error adding encryptionconfig secret: %v", err) + } + if !created { + return fmt.Errorf("failed to create the encryptionconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.") + } + } else { + _, err := secretStore.ReplaceSecret("encryptionconfig", secret) + if err != nil { + return fmt.Errorf("error updating encryptionconfig secret: %v", err) + } } return nil diff --git a/docs/cli/kops_create_secret_dockerconfig.md b/docs/cli/kops_create_secret_dockerconfig.md index 1db22b2ac4..0e58080341 100644 --- a/docs/cli/kops_create_secret_dockerconfig.md +++ b/docs/cli/kops_create_secret_dockerconfig.md @@ -20,12 +20,16 @@ kops create secret dockerconfig # Create an new docker config. kops create secret dockerconfig -f /path/to/docker/config.json \ --name k8s-cluster.example.com --state s3://example.com + # Replace an existing docker config secret. + kops create secret dockerconfig -f /path/to/docker/config.json --force \ + --name k8s-cluster.example.com --state s3://example.com ``` ### Options ``` -f, -- string Path to docker config JSON file + --force Force replace the kops secret if it already exists ``` ### Options inherited from parent commands diff --git a/docs/cli/kops_create_secret_encryptionconfig.md b/docs/cli/kops_create_secret_encryptionconfig.md index 57366603b2..7d8da03817 100644 --- a/docs/cli/kops_create_secret_encryptionconfig.md +++ b/docs/cli/kops_create_secret_encryptionconfig.md @@ -20,12 +20,16 @@ kops create secret encryptionconfig # Create a new encryption config. kops create secret encryptionconfig -f config.yaml \ --name k8s-cluster.example.com --state s3://example.com + # Replace an existing encryption config secret. + kops create secret encryptionconfig -f config.yaml --force \ + --name k8s-cluster.example.com --state s3://example.com ``` ### Options ``` -f, -- string Path to encryption config yaml file + --force Force replace the kops secret if it already exists ``` ### Options inherited from parent commands diff --git a/upup/pkg/fi/secrets.go b/upup/pkg/fi/secrets.go index 5d16144723..2a6f4558a3 100644 --- a/upup/pkg/fi/secrets.go +++ b/upup/pkg/fi/secrets.go @@ -32,8 +32,10 @@ type SecretStore interface { DeleteSecret(item *KeystoreItem) error // FindSecret finds a secret, if exists. Returns nil,nil if not found FindSecret(id string) (*Secret, error) - // GetOrCreateSecret creates or replace a secret + // GetOrCreateSecret creates a secret GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error) + // ReplaceSecret will forcefully update an existing secret if it exists + ReplaceSecret(id string, secret *Secret) (current *Secret, err error) // ListSecrets lists the ids of all known secrets ListSecrets() ([]string, error) diff --git a/upup/pkg/fi/secrets/clientset_secretstore.go b/upup/pkg/fi/secrets/clientset_secretstore.go index 5ea69eabb6..14f46973b6 100644 --- a/upup/pkg/fi/secrets/clientset_secretstore.go +++ b/upup/pkg/fi/secrets/clientset_secretstore.go @@ -157,7 +157,7 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret) return s, false, nil } - _, err = c.createSecret(secret, name) + _, err = c.createSecret(secret, name, false) if err != nil { if errors.IsAlreadyExists(err) && i == 0 { glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry") @@ -181,6 +181,21 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret) return s, true, nil } +// ReplaceSecret implements fi.SecretStore::ReplaceSecret +func (c *ClientsetSecretStore) ReplaceSecret(name string, secret *fi.Secret) (*fi.Secret, error) { + _, err := c.createSecret(secret, name, true) + if err != nil { + return nil, fmt.Errorf("unable to write secret: %v", err) + } + + // Confirm the secret exists + s, err := c.loadSecret(name) + if err != nil { + return nil, fmt.Errorf("unable to load secret immmediately after creation: %v", err) + } + return s, nil +} + // loadSecret returns the named secret, if it exists, otherwise returns nil func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) { name = NamePrefix + name @@ -207,8 +222,8 @@ func parseSecret(keyset *kops.Keyset) (*fi.Secret, error) { return s, nil } -// createSecret writes the secret, but only if it does not exist -func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Keyset, error) { +// createSecret will create the Secret, overwriting an existing secret if replace is true +func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string, replace bool) (*kops.Keyset, error) { keyset := &kops.Keyset{} keyset.Name = NamePrefix + name keyset.Spec.Type = kops.SecretTypeSecret @@ -221,5 +236,8 @@ func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Ke PrivateMaterial: s.Data, }) + if replace { + return c.clientset.Keysets(c.namespace).Update(keyset) + } return c.clientset.Keysets(c.namespace).Create(keyset) } diff --git a/upup/pkg/fi/secrets/vfs_secretstore.go b/upup/pkg/fi/secrets/vfs_secretstore.go index f24979b833..afa08bb6da 100644 --- a/upup/pkg/fi/secrets/vfs_secretstore.go +++ b/upup/pkg/fi/secrets/vfs_secretstore.go @@ -127,7 +127,7 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se return nil, false, err } - err = c.createSecret(secret, p, acl) + err = c.createSecret(secret, p, acl, false) if err != nil { if os.IsExist(err) && i == 0 { glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry") @@ -151,6 +151,27 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se return s, true, nil } +func (c *VFSSecretStore) ReplaceSecret(id string, secret *fi.Secret) (*fi.Secret, error) { + p := c.buildSecretPath(id) + + acl, err := acls.GetACL(p, c.cluster) + if err != nil { + return nil, err + } + + err = c.createSecret(secret, p, acl, true) + if err != nil { + return nil, fmt.Errorf("unable to write secret: %v", err) + } + + // Confirm the secret exists + s, err := c.loadSecret(p) + if err != nil { + return nil, fmt.Errorf("unable to load secret immmediately after creation %v: %v", p, err) + } + return s, nil +} + func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) { data, err := p.ReadFile() if err != nil { @@ -166,11 +187,15 @@ func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) { return s, nil } -// createSecret writes the secret, but only if it does not exists -func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL) error { +// createSecret will create the Secret, overwriting an existing secret if replace is true +func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL, replace bool) error { data, err := json.Marshal(s) if err != nil { return fmt.Errorf("error serializing secret: %v", err) } + + if replace { + return p.WriteFile(data, acl) + } return p.CreateFile(data, acl) } From 769a9e9dbbab9a779928b2938f021b9fda3cc54a Mon Sep 17 00:00:00 2001 From: zengchen1024 Date: Wed, 22 Nov 2017 14:29:14 +0800 Subject: [PATCH 10/27] update gazelle --- pkg/model/BUILD.bazel | 2 ++ upup/pkg/fi/cloudup/openstack/BUILD.bazel | 6 ++++++ upup/pkg/fi/cloudup/openstacktasks/BUILD.bazel | 14 ++++++++++++++ 3 files changed, 22 insertions(+) create mode 100644 upup/pkg/fi/cloudup/openstacktasks/BUILD.bazel diff --git a/pkg/model/BUILD.bazel b/pkg/model/BUILD.bazel index f5e7c00bbc..c5a81e6373 100644 --- a/pkg/model/BUILD.bazel +++ b/pkg/model/BUILD.bazel @@ -36,6 +36,8 @@ go_library( "//upup/pkg/fi/cloudup/dotasks:go_default_library", "//upup/pkg/fi/cloudup/gce:go_default_library", "//upup/pkg/fi/cloudup/gcetasks:go_default_library", + "//upup/pkg/fi/cloudup/openstack:go_default_library", + "//upup/pkg/fi/cloudup/openstacktasks:go_default_library", "//upup/pkg/fi/fitasks:go_default_library", "//util/pkg/vfs:go_default_library", "//vendor/github.com/blang/semver:go_default_library", diff --git a/upup/pkg/fi/cloudup/openstack/BUILD.bazel b/upup/pkg/fi/cloudup/openstack/BUILD.bazel index 78b8212a1d..dcb6e4b65e 100644 --- a/upup/pkg/fi/cloudup/openstack/BUILD.bazel +++ b/upup/pkg/fi/cloudup/openstack/BUILD.bazel @@ -12,7 +12,13 @@ go_library( "//pkg/apis/kops:go_default_library", "//pkg/cloudinstances:go_default_library", "//upup/pkg/fi:go_default_library", + "//util/pkg/vfs:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/gophercloud/gophercloud:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library", ], ) diff --git a/upup/pkg/fi/cloudup/openstacktasks/BUILD.bazel b/upup/pkg/fi/cloudup/openstacktasks/BUILD.bazel new file mode 100644 index 0000000000..114fb13beb --- /dev/null +++ b/upup/pkg/fi/cloudup/openstacktasks/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["volume.go"], + importpath = "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks", + visibility = ["//visibility:public"], + deps = [ + "//upup/pkg/fi:go_default_library", + "//upup/pkg/fi/cloudup/openstack:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library", + ], +) From fb675ac8968ed579a918c510619fb88fbc029f42 Mon Sep 17 00:00:00 2001 From: Rohith Date: Wed, 22 Nov 2017 12:12:14 +0000 Subject: [PATCH 11/27] Owners - adding myself and kashifsaadat to the owners file .. proud moment :-) --- OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OWNERS b/OWNERS index f9bbdc6263..7e8d40807a 100644 --- a/OWNERS +++ b/OWNERS @@ -5,3 +5,5 @@ approvers: - zmerlynn - andrewsykim - geojaz + - kashifsaadat + - gambol99 From 5ebde989be552c5ea99ec428b7daf6e0ee61004b Mon Sep 17 00:00:00 2001 From: Kashif Saadat Date: Wed, 22 Nov 2017 15:25:34 +0000 Subject: [PATCH 12/27] Updated 1.8 release notes to cover Canal manual upgrade steps. --- docs/releases/1.8-NOTES.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/releases/1.8-NOTES.md b/docs/releases/1.8-NOTES.md index 6427921546..5429a0c5f0 100644 --- a/docs/releases/1.8-NOTES.md +++ b/docs/releases/1.8-NOTES.md @@ -14,6 +14,13 @@ or `--networking flannel-udp` can be specified to explicitly choose a backend mo See the *Changes to k8s-policy* section in the [Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0) for help. +* Due to `ThirdPartyResources` becoming fully deprecated in Kubernetes v1.8 (replaced by `CustomResourceDefinitions`), existing Canal users upgrading their Clusters to Kubernetes v1.8 must follow the below TPR->CRD migration steps: + 1. Run: `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v2.6.2/upgrade/v2.5/manifests/upgrade-job.yaml` + 2. Retrieve the pod name from describing the job: `kubectl describe job/calico-upgrade-v2.5` + 3. Validate the last log line from the pod reports that it completed successfully: `kubectl logs calico-upgrade-v2.5-` + 4. Update the `KubernetesVersion` within your ClusterSpec to v1.8 (or above), performing an update & rolling-update to all nodes (will involve downtime) + 5. Confirm cluster is back up and all canal pods are running successfully: `kops validate cluster` (this may take a few minutes for the cluster to fully validate) + 6. Delete the upgrade job as it is no longer required: `kubectl delete job calico-upgrade-v2.5` (you can also safely delete the `clusterrole`, `clusterrolebinding` and `serviceaccount` resources that were created by the above manifest file) # Full changelist From 3067a21341e34965ba3d8d84c9b12f68b3833d81 Mon Sep 17 00:00:00 2001 From: chrislovecnm Date: Wed, 15 Nov 2017 19:17:55 -0700 Subject: [PATCH 13/27] Updating Calico manifests to Calico release 2.6.2. Renamed the k8s-1.8 manifest to a k8s-1.7. This is required because of config change that occurs between k8s 1.6 and k8s 1.7. This refactor will also be re-used when Calico Kubernetes data source support is added to kops. Updated bootstrapchannelbuilder with the new Calico version numbers. --- .../k8s-1.6.yaml.template | 140 +++++++++++------- ....8.yaml.template => k8s-1.7.yaml.template} | 3 +- .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 11 +- 3 files changed, 90 insertions(+), 64 deletions(-) rename upup/models/cloudup/resources/addons/networking.projectcalico.org/{k8s-1.8.yaml.template => k8s-1.7.yaml.template} (99%) diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template index cb2abb6844..ded7ceb3c3 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template @@ -5,7 +5,7 @@ metadata: name: calico-config namespace: kube-system data: - # The calico-etcd PetSet service IP:port + # etcd servers etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} {{- range $j, $member := $cluster.Members -}} {{- if $j }},{{ end -}} @@ -18,33 +18,22 @@ data: # The CNI network configuration to install on each node. cni_network_config: |- { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { + "name": "k8s-pod-network", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "ipam": { "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" } - ] } - --- kind: ClusterRole @@ -133,12 +122,15 @@ spec: operator: Exists - effect: NoSchedule operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node - image: quay.io/calico/node:v2.4.1 + image: quay.io/calico/node:v2.6.2 resources: requests: cpu: 10m @@ -169,6 +161,14 @@ spec: # Auto-detect the BGP IP address. - name: IP value: "" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" securityContext: privileged: true volumeMounts: @@ -185,7 +185,7 @@ spec: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: quay.io/calico/cni:v1.10.0 + image: quay.io/calico/cni:v1.11.0 resources: requests: cpu: 10m @@ -194,7 +194,7 @@ spec: env: # The name of calico config file - name: CNI_CONF_NAME - value: 10-calico.conflist + value: 10-calico.conf # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: @@ -237,8 +237,8 @@ spec: --- -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy +# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then +# be removed entirely once the new kube-controllers deployment has been deployed above. apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -246,35 +246,23 @@ metadata: namespace: kube-system labels: k8s-app: calico-policy - role.kubernetes.io/networking: "1" spec: - # The policy controller can only have a single active instance. - replicas: 1 + # Turn this deployment off in favor of the kube-controllers deployment above. + replicas: 0 + strategy: + type: Recreate template: metadata: name: calico-policy-controller namespace: kube-system labels: - k8s-app: calico-policy-controller - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' + k8s-app: calico-policy spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. hostNetwork: true serviceAccountName: calico - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists containers: - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.7.0 - resources: - requests: - cpu: 10m + image: quay.io/calico/kube-controllers:v1.0.0 env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS @@ -282,15 +270,6 @@ spec: configMapKeyRef: name: calico-config key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" volumeMounts: # Necessary for gossip based DNS @@ -301,6 +280,55 @@ spec: - name: etc-hosts hostPath: path: /etc/hosts +--- + +# This manifest deploys the Calico Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" +spec: + # The controllers can only have a single active instance. + replicas: 1 + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + serviceAccountName: calico + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: calico-kube-controllers + image: quay.io/calico/kube-controllers:v1.0.0 + resources: + requests: + cpu: 10m + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + + {{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} # This manifest installs the k8s-ec2-srcdst container, which disables diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template similarity index 99% rename from upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template rename to upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template index 048eb13616..5990da897c 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template @@ -16,11 +16,10 @@ data: calico_backend: "bird" # The CNI network configuration to install on each node. - # cniVersion should be 0.1.0 on k8s: https://github.com/projectcalico/calico/issues/742 cni_network_config: |- { "name": "k8s-pod-network", - "cniVersion": "0.1.0", + "cniVersion": "0.3.0", "plugins": [ { "type": "calico", diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 2255c2162e..3cc56a996f 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -468,11 +468,10 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri if b.cluster.Spec.Networking.Calico != nil { key := "networking.projectcalico.org" - // 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks. This should go away with the next version bump. versions := map[string]string{ "pre-k8s-1.6": "2.4.1", - "k8s-1.6": "2.4.2-kops.1", - "k8s-1.8": "2.6.3-kops.1", + "k8s-1.6": "2.6.2", + "k8s-1.7": "2.6.2", } { @@ -499,14 +498,14 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.8.0", + KubernetesVersion: ">=1.6.0 <1.7.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location } { - id := "k8s-1.8" + id := "k8s-1.7" location := key + "/" + id + ".yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ @@ -514,7 +513,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0", + KubernetesVersion: ">=1.7.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location From b05faa0068d3728f47bcbeca117c53257d6e777b Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 22 Nov 2017 16:48:46 -0500 Subject: [PATCH 14/27] Set SleepDelay function in AWS Works around nil SleepDelay problem: latest aws-sdk-go (in k8s 1.9 and kops 1.8) has updated SleepDelay logic; fix is in https://github.com/kubernetes/kubernetes/pull/55307 but that is only in 1.9. Set the SleepDelay to work around the problem. --- upup/pkg/fi/cloudup/awsup/aws_cloud.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index 11bace6607..29b0d84b54 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -185,6 +185,14 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) { config = config.WithCredentialsChainVerboseErrors(true) config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries)) + // We have the updated aws sdk from 1.9, but don't have https://github.com/kubernetes/kubernetes/pull/55307 + // Set the SleepDelay function to work around this + // TODO: Remove once we update to k8s >= 1.9 (or a version of the retry delayer than includes this) + config.SleepDelay = func(d time.Duration) { + glog.V(6).Infof("aws request sleeping for %v", d) + time.Sleep(d) + } + requestLogger := newRequestLogger(2) sess, err := session.NewSession(config) From c324b01b7a090fe855337c76be250e789d3927a5 Mon Sep 17 00:00:00 2001 From: Ali Rizwan Date: Fri, 24 Nov 2017 17:07:58 +0100 Subject: [PATCH 15/27] Added .service to hooks unit files Recent versions of systemd (version 229 at least) included in Ubuntu 16.04 and Debian 9 require the systemd unit files to have a .service extension. Signed-off-by: Ali Rizwan --- nodeup/pkg/model/hooks.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nodeup/pkg/model/hooks.go b/nodeup/pkg/model/hooks.go index 6d320d3b81..3fdbb5ed48 100644 --- a/nodeup/pkg/model/hooks.go +++ b/nodeup/pkg/model/hooks.go @@ -53,12 +53,12 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error { var name string switch hook.Name { case "": - name = fmt.Sprintf("kops-hook-%d", j) + name = fmt.Sprintf("kops-hook-%d.service", j) if isInstanceGroup { - name = fmt.Sprintf("%s-ig", name) + name = fmt.Sprintf("%s-ig.service", name) } default: - name = hook.Name + name = fmt.Sprintf("%s.service", hook.Name) } if _, found := hookNames[name]; found { From b9ced1957bd7712e6612525a95ddba418d4b7dc7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 25 Nov 2017 16:03:27 -0500 Subject: [PATCH 16/27] bazel: fix tests/ directory --- Makefile | 2 +- tests/integration/channel/BUILD.bazel | 10 ++++++++++ tests/integration/conversion/BUILD.bazel | 9 +++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4d83ddbcb6..6749dafe2a 100644 --- a/Makefile +++ b/Makefile @@ -587,7 +587,7 @@ kops-server-push: kops-server-build .PHONY: bazel-test bazel-test: - bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //upup/... //util/... //hack:verify-all --test_output=errors + bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //tests/... //upup/... //util/... //hack:verify-all --test_output=errors .PHONY: bazel-build bazel-build: diff --git a/tests/integration/channel/BUILD.bazel b/tests/integration/channel/BUILD.bazel index 24c59ff066..06109aedc7 100644 --- a/tests/integration/channel/BUILD.bazel +++ b/tests/integration/channel/BUILD.bazel @@ -3,9 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "go_default_test", srcs = ["integration_test.go"], + data = [ + "exported_testdata", # keep + "//channels:channeldata", # keep + ], importpath = "k8s.io/kops/tests/integration/channel", deps = [ "//pkg/apis/kops:go_default_library", "//vendor/github.com/blang/semver:go_default_library", ], ) + +filegroup( + name = "exported_testdata", + srcs = glob(["simple/**"]), + visibility = ["//visibility:public"], +) diff --git a/tests/integration/conversion/BUILD.bazel b/tests/integration/conversion/BUILD.bazel index 28f766b7ee..3253eeac99 100644 --- a/tests/integration/conversion/BUILD.bazel +++ b/tests/integration/conversion/BUILD.bazel @@ -3,6 +3,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "go_default_test", srcs = ["integration_test.go"], + data = [ + "exported_testdata", # keep + ], importpath = "k8s.io/kops/tests/integration/conversion", deps = [ "//pkg/apis/kops:go_default_library", @@ -14,3 +17,9 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) + +filegroup( + name = "exported_testdata", + srcs = glob(["minimal/**"]), + visibility = ["//visibility:public"], +) From 581e95406278f0da285b65718df19013eab9b925 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 25 Nov 2017 16:36:46 -0500 Subject: [PATCH 17/27] Block etcd peer port from nodes Ports 2380 & 2381 should not be exposed to nodes. Fix #3746 --- pkg/model/firewall.go | 43 ++++++++++++++----- .../additional_user-data/cloudformation.json | 16 ++++++- .../update_cluster/complex/kubernetes.tf | 11 ++++- .../update_cluster/ha/kubernetes.tf | 11 ++++- .../lifecycle_phases/security-kubernetes.tf | 11 ++++- .../update_cluster/minimal-141/kubernetes.tf | 11 ++++- .../cloudformation.json | 16 ++++++- .../update_cluster/minimal/kubernetes.tf | 11 ++++- .../privatecalico/kubernetes.tf | 11 ++++- .../update_cluster/privatecanal/kubernetes.tf | 11 ++++- .../update_cluster/privatedns1/kubernetes.tf | 11 ++++- .../update_cluster/privatedns2/kubernetes.tf | 11 ++++- .../privateflannel/kubernetes.tf | 11 ++++- .../privatekopeio/kubernetes.tf | 11 ++++- .../update_cluster/privateweave/kubernetes.tf | 11 ++++- .../shared_subnet/kubernetes.tf | 11 ++++- .../update_cluster/shared_vpc/kubernetes.tf | 11 ++++- 17 files changed, 203 insertions(+), 26 deletions(-) diff --git a/pkg/model/firewall.go b/pkg/model/firewall.go index 01932dce46..4367d5e0ec 100644 --- a/pkg/model/firewall.go +++ b/pkg/model/firewall.go @@ -52,11 +52,9 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error { } func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error { - name := "nodes." + b.ClusterName() - { t := &awstasks.SecurityGroup{ - Name: s(name), + Name: s(b.SecurityGroupName(kops.InstanceGroupRoleNode)), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Description: s("Security group for nodes"), @@ -211,7 +209,16 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu // TODO: Make less hacky // TODO: Fix management - we need a wildcard matcher now - tcpRanges := []portRange{{From: 1, To: 4000}, {From: 4003, To: 65535}} + tcpBlocked := make(map[int]bool) + + // Don't allow nodes to access etcd client port + tcpBlocked[4001] = true + tcpBlocked[4002] = true + + // Don't allow nodes to access etcd peer port + tcpBlocked[2380] = true + tcpBlocked[2381] = true + udpRanges := []portRange{{From: 1, To: 65535}} protocols := []Protocol{} @@ -219,14 +226,14 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu // Calico needs to access etcd // TODO: Remove, replace with etcd in calico manifest glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.") - tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}} + tcpBlocked[4001] = false protocols = append(protocols, ProtocolIPIP) } if b.Cluster.Spec.Networking.Romana != nil { // Romana needs to access etcd glog.Warningf("Opening etcd port on masters for access from the nodes, for romana. This is unsafe in untrusted environments.") - tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}} + tcpBlocked[4001] = false protocols = append(protocols, ProtocolIPIP) } @@ -245,6 +252,21 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu Protocol: s("udp"), }) } + + tcpRanges := []portRange{ + {From: 1, To: 0}, + } + for port := 1; port < 65536; port++ { + previous := &tcpRanges[len(tcpRanges)-1] + if !tcpBlocked[port] { + if (previous.To + 1) == port { + previous.To = port + } else { + tcpRanges = append(tcpRanges, portRange{From: port, To: port}) + } + } + } + for _, r := range tcpRanges { c.AddTask(&awstasks.SecurityGroupRule{ Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d", r.From, r.To)), @@ -277,18 +299,19 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu } func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error { - name := "masters." + b.ClusterName() - { t := &awstasks.SecurityGroup{ - Name: s(name), + Name: s(b.SecurityGroupName(kops.InstanceGroupRoleMaster)), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Description: s("Security group for masters"), RemoveExtraRules: []string{ "port=22", // SSH "port=443", // k8s api - "port=4001", // etcd main (etcd events is 4002) + "port=2380", // etcd main peer + "port=2381", // etcd events peer + "port=4001", // etcd main + "port=4002", // etcd events "port=4789", // VXLAN "port=179", // Calico diff --git a/tests/integration/update_cluster/additional_user-data/cloudformation.json b/tests/integration/update_cluster/additional_user-data/cloudformation.json index b176d55370..9f01c66931 100644 --- a/tests/integration/update_cluster/additional_user-data/cloudformation.json +++ b/tests/integration/update_cluster/additional_user-data/cloudformation.json @@ -266,7 +266,7 @@ "CidrIp": "0.0.0.0/0" } }, - "AWSEC2SecurityGroupIngressnodetomastertcp14000": { + "AWSEC2SecurityGroupIngressnodetomastertcp12379": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "GroupId": { @@ -276,6 +276,20 @@ "Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom" }, "FromPort": 1, + "ToPort": 2379, + "IpProtocol": "tcp" + } + }, + "AWSEC2SecurityGroupIngressnodetomastertcp23824000": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersadditionaluserdataexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom" + }, + "FromPort": 2382, "ToPort": 4000, "IpProtocol": "tcp" } diff --git a/tests/integration/update_cluster/complex/kubernetes.tf b/tests/integration/update_cluster/complex/kubernetes.tf index d2faabaa05..cc77b45e4a 100644 --- a/tests/integration/update_cluster/complex/kubernetes.tf +++ b/tests/integration/update_cluster/complex/kubernetes.tf @@ -339,11 +339,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-complex-example-com.id}" source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-complex-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/ha/kubernetes.tf b/tests/integration/update_cluster/ha/kubernetes.tf index 87977ff71a..ba25f70007 100644 --- a/tests/integration/update_cluster/ha/kubernetes.tf +++ b/tests/integration/update_cluster/ha/kubernetes.tf @@ -481,11 +481,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-ha-example-com.id}" source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-ha-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/lifecycle_phases/security-kubernetes.tf b/tests/integration/update_cluster/lifecycle_phases/security-kubernetes.tf index 0a20b671d9..6a20dc7208 100644 --- a/tests/integration/update_cluster/lifecycle_phases/security-kubernetes.tf +++ b/tests/integration/update_cluster/lifecycle_phases/security-kubernetes.tf @@ -250,11 +250,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/minimal-141/kubernetes.tf b/tests/integration/update_cluster/minimal-141/kubernetes.tf index c7187b420d..8a1455b7c9 100644 --- a/tests/integration/update_cluster/minimal-141/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-141/kubernetes.tf @@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}" source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/minimal-cloudformation/cloudformation.json b/tests/integration/update_cluster/minimal-cloudformation/cloudformation.json index 5d0d6aa193..8b034be017 100644 --- a/tests/integration/update_cluster/minimal-cloudformation/cloudformation.json +++ b/tests/integration/update_cluster/minimal-cloudformation/cloudformation.json @@ -266,7 +266,7 @@ "CidrIp": "0.0.0.0/0" } }, - "AWSEC2SecurityGroupIngressnodetomastertcp14000": { + "AWSEC2SecurityGroupIngressnodetomastertcp12379": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "GroupId": { @@ -276,6 +276,20 @@ "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" }, "FromPort": 1, + "ToPort": 2379, + "IpProtocol": "tcp" + } + }, + "AWSEC2SecurityGroupIngressnodetomastertcp23824000": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 2382, "ToPort": 4000, "IpProtocol": "tcp" } diff --git a/tests/integration/update_cluster/minimal/kubernetes.tf b/tests/integration/update_cluster/minimal/kubernetes.tf index 06fa93651f..9e55b58011 100644 --- a/tests/integration/update_cluster/minimal/kubernetes.tf +++ b/tests/integration/update_cluster/minimal/kubernetes.tf @@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-minimal-example-com.id}" source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-minimal-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privatecalico/kubernetes.tf b/tests/integration/update_cluster/privatecalico/kubernetes.tf index 034634c606..4484ee5059 100644 --- a/tests/integration/update_cluster/privatecalico/kubernetes.tf +++ b/tests/integration/update_cluster/privatecalico/kubernetes.tf @@ -591,11 +591,20 @@ resource "aws_security_group_rule" "node-to-master-protocol-ipip" { protocol = "4" } -resource "aws_security_group_rule" "node-to-master-tcp-1-4001" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4001" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}" + from_port = 2382 to_port = 4001 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privatecanal/kubernetes.tf b/tests/integration/update_cluster/privatecanal/kubernetes.tf index 00f55c75cd..38161329cd 100644 --- a/tests/integration/update_cluster/privatecanal/kubernetes.tf +++ b/tests/integration/update_cluster/privatecanal/kubernetes.tf @@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privatedns1/kubernetes.tf b/tests/integration/update_cluster/privatedns1/kubernetes.tf index 23e82caed3..5a330aa74c 100644 --- a/tests/integration/update_cluster/privatedns1/kubernetes.tf +++ b/tests/integration/update_cluster/privatedns1/kubernetes.tf @@ -587,11 +587,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privatedns2/kubernetes.tf b/tests/integration/update_cluster/privatedns2/kubernetes.tf index b6ed0ad66b..85a2633719 100644 --- a/tests/integration/update_cluster/privatedns2/kubernetes.tf +++ b/tests/integration/update_cluster/privatedns2/kubernetes.tf @@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privateflannel/kubernetes.tf b/tests/integration/update_cluster/privateflannel/kubernetes.tf index fc6c677cfb..389bb7ee5e 100644 --- a/tests/integration/update_cluster/privateflannel/kubernetes.tf +++ b/tests/integration/update_cluster/privateflannel/kubernetes.tf @@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privatekopeio/kubernetes.tf b/tests/integration/update_cluster/privatekopeio/kubernetes.tf index 66c3ebd2e0..9f7fc06186 100644 --- a/tests/integration/update_cluster/privatekopeio/kubernetes.tf +++ b/tests/integration/update_cluster/privatekopeio/kubernetes.tf @@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/privateweave/kubernetes.tf b/tests/integration/update_cluster/privateweave/kubernetes.tf index bebc48ecf4..c9c424c2fc 100644 --- a/tests/integration/update_cluster/privateweave/kubernetes.tf +++ b/tests/integration/update_cluster/privateweave/kubernetes.tf @@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/shared_subnet/kubernetes.tf b/tests/integration/update_cluster/shared_subnet/kubernetes.tf index 1f3d1fbfd2..6f5fa07471 100644 --- a/tests/integration/update_cluster/shared_subnet/kubernetes.tf +++ b/tests/integration/update_cluster/shared_subnet/kubernetes.tf @@ -286,11 +286,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}" source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } diff --git a/tests/integration/update_cluster/shared_vpc/kubernetes.tf b/tests/integration/update_cluster/shared_vpc/kubernetes.tf index abdfa15afc..a5f9ee3df6 100644 --- a/tests/integration/update_cluster/shared_vpc/kubernetes.tf +++ b/tests/integration/update_cluster/shared_vpc/kubernetes.tf @@ -302,11 +302,20 @@ resource "aws_security_group_rule" "node-egress" { cidr_blocks = ["0.0.0.0/0"] } -resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { +resource "aws_security_group_rule" "node-to-master-tcp-1-2379" { type = "ingress" security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}" source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}" from_port = 1 + to_port = 2379 + protocol = "tcp" +} + +resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" { + type = "ingress" + security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}" + source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}" + from_port = 2382 to_port = 4000 protocol = "tcp" } From e3c7f03aaa7cbba88fa7a99d8727655fc3e72e85 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 25 Nov 2017 17:19:40 -0500 Subject: [PATCH 18/27] Avoid generating a CA keypair on-demand Instead we must explicitly create it; this avoids races where we are reading the private key and creating CA certs. Issue #3875 --- upup/pkg/fi/clientset_castore.go | 10 ++----- upup/pkg/fi/fitasks/BUILD.bazel | 11 +++++++- upup/pkg/fi/fitasks/keypair_test.go | 44 +++++++++++++++++++++++++++++ upup/pkg/fi/vfs_castore.go | 11 ++++---- 4 files changed, 62 insertions(+), 14 deletions(-) create mode 100644 upup/pkg/fi/fitasks/keypair_test.go diff --git a/upup/pkg/fi/clientset_castore.go b/upup/pkg/fi/clientset_castore.go index 0344d21d2c..72bea46aa4 100644 --- a/upup/pkg/fi/clientset_castore.go +++ b/upup/pkg/fi/clientset_castore.go @@ -62,7 +62,8 @@ func NewClientsetCAStore(cluster *kops.Cluster, clientset kopsinternalversion.Ko return c } -// readCAKeypairs retrieves the CA keypair, generating a new keypair if not found +// readCAKeypairs retrieves the CA keypair. +// (No longer generates a keypair if not found.) func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) { c.mutex.Lock() defer c.mutex.Unlock() @@ -78,14 +79,9 @@ func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) { } if keyset == nil { - keyset, err = c.generateCACertificate(id) - if err != nil { - return nil, err - } - + return nil, nil } c.cachedCaKeysets[id] = keyset - return keyset, nil } diff --git a/upup/pkg/fi/fitasks/BUILD.bazel b/upup/pkg/fi/fitasks/BUILD.bazel index 3c4cf84b5f..a1c492607d 100644 --- a/upup/pkg/fi/fitasks/BUILD.bazel +++ b/upup/pkg/fi/fitasks/BUILD.bazel @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -27,3 +27,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["keypair_test.go"], + importpath = "k8s.io/kops/upup/pkg/fi/fitasks", + library = ":go_default_library", + deps = ["//upup/pkg/fi:go_default_library"], +) diff --git a/upup/pkg/fi/fitasks/keypair_test.go b/upup/pkg/fi/fitasks/keypair_test.go new file mode 100644 index 0000000000..871c33043c --- /dev/null +++ b/upup/pkg/fi/fitasks/keypair_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fitasks + +import ( + "k8s.io/kops/upup/pkg/fi" + "strings" + "testing" +) + +func TestKeypairDeps(t *testing.T) { + ca := &Keypair{} + cert := &Keypair{ + Signer: ca, + } + + tasks := make(map[string]fi.Task) + tasks["ca"] = ca + tasks["cert"] = cert + + deps := fi.FindTaskDependencies(tasks) + + if strings.Join(deps["ca"], ",") != "" { + t.Errorf("unexpected dependencies for ca: %v", deps["ca"]) + } + + if strings.Join(deps["cert"], ",") != "ca" { + t.Errorf("unexpected dependencies for cert: %v", deps["cert"]) + } +} diff --git a/upup/pkg/fi/vfs_castore.go b/upup/pkg/fi/vfs_castore.go index df3fb6413e..fc83dd1651 100644 --- a/upup/pkg/fi/vfs_castore.go +++ b/upup/pkg/fi/vfs_castore.go @@ -68,7 +68,7 @@ func (s *VFSCAStore) VFSPath() vfs.Path { return s.basedir } -// Retrieves the CA keypair, generating a new keypair if not found +// Retrieves the CA keypair. No longer generates keypairs if not found. func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, error) { s.mutex.Lock() defer s.mutex.Unlock() @@ -98,16 +98,15 @@ func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, err } if caPrivateKeys == nil { - caCertificates, caPrivateKeys, err = s.generateCACertificate(id) - if err != nil { - return nil, nil, err - } - + // We no longer generate CA certificates automatically - too race-prone + return caCertificates, caPrivateKeys, nil } + cached = &cachedEntry{certificates: caCertificates, privateKeys: caPrivateKeys} s.cachedCAs[id] = cached return cached.certificates, cached.privateKeys, nil + } func BuildCAX509Template() *x509.Certificate { From b2cd5c961cb872089a3c7190f2c610ee3e87684f Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 01:49:19 -0500 Subject: [PATCH 19/27] Use EnsureTask so we don't have to track directories as closely Issue #3921 --- nodeup/pkg/model/file_assets.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nodeup/pkg/model/file_assets.go b/nodeup/pkg/model/file_assets.go index 430809a3cf..fbc6347f39 100644 --- a/nodeup/pkg/model/file_assets.go +++ b/nodeup/pkg/model/file_assets.go @@ -40,7 +40,7 @@ func (f *FileAssetsBuilder) Build(c *fi.ModelBuilderContext) error { // used to keep track of previous file, so a instanceGroup can override a cluster wide one tracker := make(map[string]bool, 0) // ensure the default path exists - c.AddTask(&nodetasks.File{ + c.EnsureTask(&nodetasks.File{ Path: f.FileAssetsDefaultPath(), Type: nodetasks.FileType_Directory, Mode: s("0755"), @@ -88,8 +88,8 @@ func (f *FileAssetsBuilder) buildFileAssets(c *fi.ModelBuilderContext, assets [] content = string(decoded) } - // @check if the directory structure exist or create it - c.AddTask(&nodetasks.File{ + // We use EnsureTask so that we don't have to check if the asset directories have already been done + c.EnsureTask(&nodetasks.File{ Path: filepath.Dir(assetPath), Type: nodetasks.FileType_Directory, Mode: s("0755"), From d0944714f47a7114da30810f499aedecc8495650 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 15:27:26 -0500 Subject: [PATCH 20/27] Update kopeio auth --- .../authentication.kope.io/k8s-1.6.yaml | 132 ------------- .../authentication.kope.io/k8s-1.8.yaml | 185 ++++++++++++++++++ .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 8 +- 3 files changed, 189 insertions(+), 136 deletions(-) delete mode 100644 upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.6.yaml create mode 100644 upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml diff --git a/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.6.yaml b/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.6.yaml deleted file mode 100644 index 63ad45f259..0000000000 --- a/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.6.yaml +++ /dev/null @@ -1,132 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: auth-portal - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - template: - metadata: - labels: - app: auth-portal - spec: - containers: - - name: auth-portal - image: kopeio/auth-portal:1.0.20170619 - ports: - - containerPort: 8080 - command: - - /auth-portal - ---- - -apiVersion: v1 -kind: Service -metadata: - name: auth-portal - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - selector: - app: auth-portal - ports: - - port: 80 - targetPort: 8080 - ---- - -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - app: auth-api - spec: - hostNetwork: true - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - containers: - - name: auth-api - image: kopeio/auth-api:1.0.20170619 - imagePullPolicy: Always - ports: - - containerPort: 9001 - command: - - /auth-api - - --listen=127.0.0.1:9001 - - --secure-port=9002 - - --server=https://127.0.0.1:9002 - - --insecure-skip-tls-verify - - --etcd-servers=http://127.0.0.1:4001 - - --v=8 - - --storage-backend=etcd2 - ---- - -apiVersion: v1 -kind: Service -metadata: - name: auth-api - namespace: kopeio-auth -spec: - selector: - app: auth-api - ports: - - port: 443 - targetPort: 9002 - ---- - -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.auth.kope.io -spec: - insecureSkipTLSVerify: true - group: auth.kope.io - priority: 150 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 - ---- - -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.config.auth.kope.io -spec: - insecureSkipTLSVerify: true - group: config.auth.kope.io - priority: 150 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 diff --git a/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml b/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml new file mode 100644 index 0000000000..62f4cdfcae --- /dev/null +++ b/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" + +--- + +apiVersion: v1 +kind: Service +metadata: + name: auth-api + namespace: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +spec: + selector: + app: auth-api + ports: + - port: 443 + targetPort: 9002 + +--- + +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: auth-api + namespace: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +spec: + template: + metadata: + labels: + app: auth-api + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: auth-api + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: auth-api + image: kopeio/auth-api:1.0.20171125 + imagePullPolicy: Always + ports: + - containerPort: 9001 + command: + - /auth-api + - --listen=127.0.0.1:9001 + - --secure-port=9002 + - --etcd-servers=http://127.0.0.1:4001 + - --v=8 + - --storage-backend=etcd2 + +--- + +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1alpha1.auth.kope.io + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +spec: + insecureSkipTLSVerify: true + group: auth.kope.io + groupPriorityMinimum: 1000 + versionPriority: 15 + service: + name: auth-api + namespace: kopeio-auth + version: v1alpha1 + +--- + +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1alpha1.config.auth.kope.io + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +spec: + insecureSkipTLSVerify: true + group: config.auth.kope.io + groupPriorityMinimum: 1000 + versionPriority: 15 + service: + name: auth-api + namespace: kopeio-auth + version: v1alpha1 + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: auth-api + namespace: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kopeio-auth:auth-api:auth-reader + namespace: kube-system + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: auth-api + namespace: kopeio-auth + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kopeio-auth:system:auth-delegator + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: auth-api + namespace: kopeio-auth + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: auth-api + namespace: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +rules: +- apiGroups: ["auth.kope.io"] + resources: ["users"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: auth-api + namespace: kopeio-auth + labels: + k8s-addon: authentication.kope.io + role.kubernetes.io/authentication: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: auth-api +subjects: +- kind: ServiceAccount + name: auth-api + namespace: kopeio-auth diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index f5fe45bcea..b20b2b139f 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -619,18 +619,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri if b.cluster.Spec.Authentication != nil && b.cluster.Spec.Authentication.Kopeio != nil { key := "authentication.kope.io" - version := "1.0.20170619" + version := "1.0.20171125" { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" + location := key + "/k8s-1.8.yaml" + id := "k8s-1.8" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ Name: fi.String(key), Version: fi.String(version), Selector: authenticationSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0", + KubernetesVersion: ">=1.8.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location From 079464c223f767fe517564f110b8b60955d13485 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 17:05:59 -0500 Subject: [PATCH 21/27] Don't add .service extension if already there --- nodeup/pkg/model/hooks.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/nodeup/pkg/model/hooks.go b/nodeup/pkg/model/hooks.go index 3fdbb5ed48..74eb888128 100644 --- a/nodeup/pkg/model/hooks.go +++ b/nodeup/pkg/model/hooks.go @@ -53,12 +53,12 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error { var name string switch hook.Name { case "": - name = fmt.Sprintf("kops-hook-%d.service", j) + name = fmt.Sprintf("kops-hook-%d", j) if isInstanceGroup { - name = fmt.Sprintf("%s-ig.service", name) + name += "-ig" } default: - name = fmt.Sprintf("%s.service", hook.Name) + name = hook.Name } if _, found := hookNames[name]; found { @@ -72,7 +72,7 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error { enabled := false managed := true c.AddTask(&nodetasks.Service{ - Name: hook.Name, + Name: ensureSystemdSuffix(name), ManageState: &managed, Enabled: &enabled, Running: &enabled, @@ -94,6 +94,14 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error { return nil } +// ensureSystemdSuffix makes sure that we have a .service suffix on the name, needed on needed versions of systems +func ensureSystemdSuffix(name string) string { + if !strings.HasSuffix(name, ".service") && !strings.HasSuffix(name, ".timer") { + name += ".service" + } + return name +} + // buildSystemdService is responsible for generating the service func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*nodetasks.Service, error) { // perform some basic validation @@ -130,7 +138,7 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no } service := &nodetasks.Service{ - Name: name, + Name: ensureSystemdSuffix(name), Definition: s(unit.Render()), } From 726ce3651f9597bcc48abdd153e8490554b0841c Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 23:11:44 -0500 Subject: [PATCH 22/27] Put the 1.8 image into the alpha channel --- channels/alpha | 3 +++ 1 file changed, 3 insertions(+) diff --git a/channels/alpha b/channels/alpha index 638125eae4..727c4fd532 100644 --- a/channels/alpha +++ b/channels/alpha @@ -13,6 +13,9 @@ spec: - name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2017-07-28 providerID: aws kubernetesVersion: ">=1.7.0" + - name: kope.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2017-11-27 + providerID: aws + kubernetesVersion: ">=1.8.0" - providerID: gce name: "cos-cloud/cos-stable-60-9592-90-0" cluster: From 660c45a01cded0bfa77ae1b29a50a71c4b19c836 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 23:26:23 -0500 Subject: [PATCH 23/27] Add initial docs on the kops side of authentication --- docs/authentication.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 docs/authentication.md diff --git a/docs/authentication.md b/docs/authentication.md new file mode 100644 index 0000000000..f8905f9588 --- /dev/null +++ b/docs/authentication.md @@ -0,0 +1,34 @@ +# Authentication + +Kops has support for configuring authentication systems. This support is +currently highly experimental, and should not be used with kubernetes versions +before 1.8.5 because of a serious bug with apimachinery (#55022)[https://github.com/kubernetes/kubernetes/issues/55022]. + +## kopeio authentication + +If you want to experiment with kopeio authentication, you can use +`--authentication kopeio`. However please be aware that kopeio authentication +has not yet been formally released, and thus there is not a lot of upstream +documentation. + +Alternatively, you can add this block to your cluster: + +``` +authentication: + kopeio: {} +``` + +For example: + +``` +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + name: cluster.example.com +spec: + authentication: + kopeio: {} + authorization: + rbac: {} +``` + From 0112cc225e68ab62a991921383ef125c1f8ec1f9 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 23:16:37 -0500 Subject: [PATCH 24/27] Promote 1.5.8 and 1.6.11 to stable channel --- channels/stable | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/channels/stable b/channels/stable index bd72c3eea6..044d5bb27e 100644 --- a/channels/stable +++ b/channels/stable @@ -24,10 +24,10 @@ spec: recommendedVersion: 1.7.10 requiredVersion: 1.7.0 - range: ">=1.6.0" - recommendedVersion: 1.6.7 + recommendedVersion: 1.6.11 requiredVersion: 1.6.0 - range: ">=1.5.0" - recommendedVersion: 1.5.7 + recommendedVersion: 1.5.8 requiredVersion: 1.5.1 - range: "<1.5.0" recommendedVersion: 1.4.12 @@ -40,11 +40,11 @@ spec: - range: ">=1.6.0-alpha.1" #recommendedVersion: 1.6.0 #requiredVersion: 1.6.0 - kubernetesVersion: 1.6.7 + kubernetesVersion: 1.6.11 - range: ">=1.5.0-alpha1" recommendedVersion: 1.5.1 #requiredVersion: 1.5.1 - kubernetesVersion: 1.5.7 + kubernetesVersion: 1.5.8 - range: "<1.5.0" recommendedVersion: 1.4.4 #requiredVersion: 1.4.4 From 31326059b18e07dfd568e64804adf95c331611f4 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 26 Nov 2017 23:17:05 -0500 Subject: [PATCH 25/27] Bump alpha channel k8s versions --- channels/alpha | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/channels/alpha b/channels/alpha index 638125eae4..fc70a76719 100644 --- a/channels/alpha +++ b/channels/alpha @@ -21,13 +21,13 @@ spec: kubenet: {} kubernetesVersions: - range: ">=1.8.0" - recommendedVersion: 1.8.3 + recommendedVersion: 1.8.4 requiredVersion: 1.8.0 - range: ">=1.7.0" - recommendedVersion: 1.7.10 + recommendedVersion: 1.7.11 requiredVersion: 1.7.0 - range: ">=1.6.0" - recommendedVersion: 1.6.11 + recommendedVersion: 1.6.13 requiredVersion: 1.6.0 - range: ">=1.5.0" recommendedVersion: 1.5.8 @@ -39,15 +39,15 @@ spec: - range: ">=1.8.0-alpha.1" recommendedVersion: 1.8.0-beta.1 #requiredVersion: 1.8.0 - kubernetesVersion: 1.8.3 + kubernetesVersion: 1.8.4 - range: ">=1.7.0-alpha.1" recommendedVersion: 1.7.1 #requiredVersion: 1.7.0 - kubernetesVersion: 1.7.10 + kubernetesVersion: 1.7.11 - range: ">=1.6.0-alpha.1" #recommendedVersion: 1.6.0 #requiredVersion: 1.6.0 - kubernetesVersion: 1.6.11 + kubernetesVersion: 1.6.13 - range: ">=1.5.0-alpha1" recommendedVersion: 1.5.1 #requiredVersion: 1.5.1 From 2d59315b9670d7b370bd21fc2aa379c84878f80c Mon Sep 17 00:00:00 2001 From: King'ori Maina Date: Mon, 27 Nov 2017 21:41:27 +0200 Subject: [PATCH 26/27] Map horizontal-pod-autoscaler-use-rest-clients flag --- pkg/apis/kops/componentconfig.go | 3 +++ pkg/apis/kops/v1alpha1/componentconfig.go | 3 +++ pkg/apis/kops/v1alpha2/componentconfig.go | 3 +++ 3 files changed, 9 insertions(+) diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index f61eb47637..0885a3d604 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct { // long the autoscaler has to wait before another upscale operation can // be performed after the current one has completed. HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` + // HorizontalPodAutoscalerUseRestClients determines if the new-style clients + // should be used if support for custom metrics is enabled. + HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"` // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` } diff --git a/pkg/apis/kops/v1alpha1/componentconfig.go b/pkg/apis/kops/v1alpha1/componentconfig.go index b8c42aad36..226a2b6ed5 100644 --- a/pkg/apis/kops/v1alpha1/componentconfig.go +++ b/pkg/apis/kops/v1alpha1/componentconfig.go @@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct { // long the autoscaler has to wait before another upscale operation can // be performed after the current one has completed. HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` + // HorizontalPodAutoscalerUseRestClients determines if the new-style clients + // should be used if support for custom metrics is enabled. + HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"` // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` } diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index c3a94929f2..4b61278732 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct { // long the autoscaler has to wait before another upscale operation can // be performed after the current one has completed. HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` + // HorizontalPodAutoscalerUseRestClients determines if the new-style clients + // should be used if support for custom metrics is enabled. + HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"` // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` } From 28ff1bfe069f406ffcedc93864c2ef4c2c9d26f5 Mon Sep 17 00:00:00 2001 From: King'ori Maina Date: Mon, 27 Nov 2017 23:12:33 +0200 Subject: [PATCH 27/27] Add API machinery generated code --- pkg/apis/kops/v1alpha1/zz_generated.conversion.go | 2 ++ pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go | 9 +++++++++ pkg/apis/kops/v1alpha2/zz_generated.conversion.go | 2 ++ pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go | 9 +++++++++ pkg/apis/kops/zz_generated.deepcopy.go | 9 +++++++++ 5 files changed, 31 insertions(+) diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index f1f6e123db..b35d889184 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -1944,6 +1944,7 @@ func autoConvert_v1alpha1_KubeControllerManagerConfig_To_kops_KubeControllerMana out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay + out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients out.FeatureGates = in.FeatureGates return nil } @@ -1979,6 +1980,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha1_KubeControllerMana out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay + out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients out.FeatureGates = in.FeatureGates return nil } diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index 60dc6022a7..e1d6228c36 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -2173,6 +2173,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo **out = **in } } + if in.HorizontalPodAutoscalerUseRestClients != nil { + in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } if in.FeatureGates != nil { in, out := &in.FeatureGates, &out.FeatureGates *out = make(map[string]string, len(*in)) diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index c86c197cca..b3697b091a 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -2206,6 +2206,7 @@ func autoConvert_v1alpha2_KubeControllerManagerConfig_To_kops_KubeControllerMana out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay + out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients out.FeatureGates = in.FeatureGates return nil } @@ -2241,6 +2242,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha2_KubeControllerMana out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay + out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients out.FeatureGates = in.FeatureGates return nil } diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index d6a4c85b01..3c50685758 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -2299,6 +2299,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo **out = **in } } + if in.HorizontalPodAutoscalerUseRestClients != nil { + in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } if in.FeatureGates != nil { in, out := &in.FeatureGates, &out.FeatureGates *out = make(map[string]string, len(*in)) diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index e464bcf38b..2f3f1fc455 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -2518,6 +2518,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo **out = **in } } + if in.HorizontalPodAutoscalerUseRestClients != nil { + in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } if in.FeatureGates != nil { in, out := &in.FeatureGates, &out.FeatureGates *out = make(map[string]string, len(*in))