From 454c47f92bee59ed4e3232dd22a1674f1f74908c Mon Sep 17 00:00:00 2001 From: justinsb Date: Sun, 15 Aug 2021 00:18:32 -0400 Subject: [PATCH 01/10] Support pruning in channels command We let the addon specify exactly what should be pruned; this approach is a little more verbose but we're likely generating this automatically in the common case anyway. In return for the verbosity, we can likely handle more future cases and edge cases (for example removing objects that aren't labelled or are in the wrong namespace). --- channels/pkg/api/channel.go | 13 +++ channels/pkg/channels/BUILD.bazel | 6 ++ channels/pkg/channels/addon.go | 16 +++- channels/pkg/channels/apply.go | 8 +- channels/pkg/channels/prune.go | 144 ++++++++++++++++++++++++++++++ channels/pkg/cmd/BUILD.bazel | 4 +- channels/pkg/cmd/apply_channel.go | 17 +++- channels/pkg/cmd/factory.go | 75 ++++++++++++---- pkg/kubemanifest/manifest.go | 38 ++++++++ 9 files changed, 291 insertions(+), 30 deletions(-) create mode 100644 channels/pkg/channels/prune.go diff --git a/channels/pkg/api/channel.go b/channels/pkg/api/channel.go index 9477ee81f2..97c0850815 100644 --- a/channels/pkg/api/channel.go +++ b/channels/pkg/api/channel.go @@ -69,6 +69,19 @@ type AddonSpec struct { NeedsPKI bool `json:"needsPKI,omitempty"` Version string `json:"version,omitempty"` + + Prune *PruneSpec `json:"prune,omitempty"` +} + +type PruneSpec struct { + Kinds []PruneKindSpec `json:"kinds,omitempty"` +} + +type PruneKindSpec struct { + Group string `json:"group,omitempty"` + Kind string `json:"kind,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + LabelSelector string `json:"labelSelector,omitempty"` } func (a *Addons) Verify() error { diff --git a/channels/pkg/channels/BUILD.bazel b/channels/pkg/channels/BUILD.bazel index e03041dd70..7610d8bbf0 100644 --- a/channels/pkg/channels/BUILD.bazel +++ b/channels/pkg/channels/BUILD.bazel @@ -7,11 +7,13 @@ go_library( "addons.go", "apply.go", "channel_version.go", + "prune.go", ], importpath = "k8s.io/kops/channels/pkg/channels", visibility = ["//visibility:public"], deps = [ "//channels/pkg/api:go_default_library", + "//pkg/kubemanifest:go_default_library", "//pkg/pki:go_default_library", "//upup/pkg/fi/utils:go_default_library", "//util/pkg/vfs:go_default_library", @@ -21,9 +23,13 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/restmapper:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", ], ) diff --git a/channels/pkg/channels/addon.go b/channels/pkg/channels/addon.go index 732817178d..d51269507d 100644 --- a/channels/pkg/channels/addon.go +++ b/channels/pkg/channels/addon.go @@ -24,6 +24,7 @@ import ( "net/url" "k8s.io/kops/pkg/pki" + "k8s.io/kops/util/pkg/vfs" certmanager "github.com/jetstack/cert-manager/pkg/client/clientset/versioned" "k8s.io/apimachinery/pkg/api/errors" @@ -151,7 +152,7 @@ func (a *Addon) GetManifestFullUrl() (*url.URL, error) { return manifestURL, nil } -func (a *Addon) EnsureUpdated(ctx context.Context, k8sClient kubernetes.Interface, cmClient certmanager.Interface) (*AddonUpdate, error) { +func (a *Addon) EnsureUpdated(ctx context.Context, k8sClient kubernetes.Interface, cmClient certmanager.Interface, pruner *Pruner) (*AddonUpdate, error) { required, err := a.GetRequiredUpdates(ctx, k8sClient, cmClient) if err != nil { return nil, err @@ -167,9 +168,18 @@ func (a *Addon) EnsureUpdated(ctx context.Context, k8sClient kubernetes.Interfac } klog.Infof("Applying update from %q", manifestURL) - err = Apply(manifestURL.String()) + // We copy the manifest to a temp file because it is likely e.g. an s3 URL, which kubectl can't read + data, err := vfs.Context.ReadFile(manifestURL.String()) if err != nil { - return nil, fmt.Errorf("error applying update from %q: %v", manifestURL, err) + return nil, fmt.Errorf("error reading manifest: %w", err) + } + + if err := Apply(data); err != nil { + return nil, fmt.Errorf("error applying update from %q: %w", manifestURL, err) + } + + if err := pruner.Prune(ctx, data, a.Spec.Prune); err != nil { + return nil, fmt.Errorf("error pruning manifest from %q: %w", manifestURL, err) } if err := a.AddNeedsUpdateLabel(ctx, k8sClient, required); err != nil { diff --git a/channels/pkg/channels/apply.go b/channels/pkg/channels/apply.go index e408afbaa7..f34c30ea76 100644 --- a/channels/pkg/channels/apply.go +++ b/channels/pkg/channels/apply.go @@ -25,18 +25,12 @@ import ( "strings" "k8s.io/klog/v2" - "k8s.io/kops/util/pkg/vfs" ) // Apply calls kubectl apply to apply the manifest. // We will likely in future change this to create things directly (or more likely embed this logic into kubectl itself) -func Apply(manifest string) error { +func Apply(data []byte) error { // We copy the manifest to a temp file because it is likely e.g. an s3 URL, which kubectl can't read - data, err := vfs.Context.ReadFile(manifest) - if err != nil { - return fmt.Errorf("error reading manifest: %v", err) - } - tmpDir, err := ioutil.TempDir("", "channel") if err != nil { return fmt.Errorf("error creating temp dir: %v", err) diff --git a/channels/pkg/channels/prune.go b/channels/pkg/channels/prune.go new file mode 100644 index 0000000000..5e6bd404a3 --- /dev/null +++ b/channels/pkg/channels/prune.go @@ -0,0 +1,144 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package channels + +import ( + "context" + "fmt" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/restmapper" + "k8s.io/klog/v2" + "k8s.io/kops/channels/pkg/api" + "k8s.io/kops/pkg/kubemanifest" +) + +type Pruner struct { + Client dynamic.Interface + RESTMapper *restmapper.DeferredDiscoveryRESTMapper +} + +// Prune prunes objects not in the manifest, according to PruneSpec. +func (p *Pruner) Prune(ctx context.Context, manifest []byte, spec *api.PruneSpec) error { + if spec == nil { + return nil + } + + objects, err := kubemanifest.LoadObjectsFrom(manifest) + if err != nil { + return fmt.Errorf("failed to parse objects: %w", err) + } + + objectsByKind := make(map[schema.GroupKind][]*kubemanifest.Object) + for _, object := range objects { + gv, err := schema.ParseGroupVersion(object.APIVersion()) + if err != nil || gv.Version == "" { + return fmt.Errorf("failed to parse apiVersion %q", object.APIVersion()) + } + kind := object.Kind() + if kind == "" { + return fmt.Errorf("failed to find kind in object") + } + + gvk := gv.WithKind(kind) + gk := gvk.GroupKind() + objectsByKind[gk] = append(objectsByKind[gk], object) + } + + for i := range spec.Kinds { + pruneKind := &spec.Kinds[i] + gk := schema.GroupKind{Group: pruneKind.Group, Kind: pruneKind.Kind} + if err := p.pruneObjectsOfKind(ctx, gk, pruneKind, objectsByKind[gk]); err != nil { + return fmt.Errorf("failed to prune objects of kind %s: %w", gk, err) + } + } + + return nil +} + +func (p *Pruner) pruneObjectsOfKind(ctx context.Context, gk schema.GroupKind, spec *api.PruneKindSpec, keepObjects []*kubemanifest.Object) error { + restMapping, err := p.RESTMapper.RESTMapping(gk) + if err != nil { + return fmt.Errorf("unable to find resource for %s: %w", gk, err) + } + + gvr := restMapping.Resource + + var listOptions v1.ListOptions + listOptions.LabelSelector = spec.LabelSelector + + baseResource := p.Client.Resource(gvr) + if len(spec.Namespaces) == 0 { + objects, err := baseResource.List(ctx, listOptions) + if err != nil { + return fmt.Errorf("error listing objects: %w", err) + } + if err := p.pruneObjects(ctx, gvr, objects, keepObjects); err != nil { + return err + } + } else { + for _, namespace := range spec.Namespaces { + resource := baseResource.Namespace(namespace) + actualObjects, err := resource.List(ctx, listOptions) + if err != nil { + return fmt.Errorf("error listing objects in namespace %s: %w", namespace, err) + } + if err := p.pruneObjects(ctx, gvr, actualObjects, keepObjects); err != nil { + return err + } + } + } + + return nil +} + +func (p *Pruner) pruneObjects(ctx context.Context, gvr schema.GroupVersionResource, actualObjects *unstructured.UnstructuredList, keepObjects []*kubemanifest.Object) error { + keepMap := make(map[string]*kubemanifest.Object) + for _, keepObject := range keepObjects { + key := keepObject.GetNamespace() + "/" + keepObject.GetName() + keepMap[key] = keepObject + } + + for _, actualObject := range actualObjects.Items { + name := actualObject.GetName() + namespace := actualObject.GetNamespace() + key := namespace + "/" + name + if _, found := keepMap[key]; found { + // Object is in manifest, don't delete + continue + } + + klog.Infof("pruning %s %s", gvr, key) + + var resource dynamic.ResourceInterface + if namespace != "" { + resource = p.Client.Resource(gvr).Namespace(namespace) + } else { + resource = p.Client.Resource(gvr) + } + + var opts v1.DeleteOptions + if err := resource.Delete(ctx, name, opts); err != nil { + return fmt.Errorf("failed to delete %s: %w", key, err) + } + } + + return nil +} diff --git a/channels/pkg/cmd/BUILD.bazel b/channels/pkg/cmd/BUILD.bazel index bfd7954cfb..d2e09f30bc 100644 --- a/channels/pkg/cmd/BUILD.bazel +++ b/channels/pkg/cmd/BUILD.bazel @@ -22,10 +22,12 @@ go_library( "//vendor/github.com/spf13/viper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/restmapper:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", ], ) diff --git a/channels/pkg/cmd/apply_channel.go b/channels/pkg/cmd/apply_channel.go index 3ac1945d93..3af655cd4d 100644 --- a/channels/pkg/cmd/apply_channel.go +++ b/channels/pkg/cmd/apply_channel.go @@ -66,6 +66,16 @@ func RunApplyChannel(ctx context.Context, f Factory, out io.Writer, options *App return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + + restMapper, err := f.RESTMapper() + if err != nil { + return err + } + kubernetesVersionInfo, err := k8sClient.Discovery().ServerVersion() if err != nil { return fmt.Errorf("error querying kubernetes version: %v", err) @@ -200,8 +210,13 @@ func RunApplyChannel(ctx context.Context, f Factory, out io.Writer, options *App return nil } + pruner := &channels.Pruner{ + Client: dynamicClient, + RESTMapper: restMapper, + } + for _, needUpdate := range needUpdates { - update, err := needUpdate.EnsureUpdated(ctx, k8sClient, cmClient) + update, err := needUpdate.EnsureUpdated(ctx, k8sClient, cmClient, pruner) if err != nil { fmt.Printf("error updating %q: %v", needUpdate.Name, err) } else if update != nil { diff --git a/channels/pkg/cmd/factory.go b/channels/pkg/cmd/factory.go index 1f4c306fcc..13b1237a49 100644 --- a/channels/pkg/cmd/factory.go +++ b/channels/pkg/cmd/factory.go @@ -19,9 +19,11 @@ package cmd import ( "fmt" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/restmapper" _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -31,37 +33,43 @@ import ( type Factory interface { KubernetesClient() (kubernetes.Interface, error) CertManagerClient() (certmanager.Interface, error) + RESTMapper() (*restmapper.DeferredDiscoveryRESTMapper, error) + DynamicClient() (dynamic.Interface, error) } type DefaultFactory struct { + ConfigFlags genericclioptions.ConfigFlags + kubernetesClient kubernetes.Interface certManagerClient certmanager.Interface + + cachedRESTConfig *rest.Config + dynamicClient dynamic.Interface + restMapper *restmapper.DeferredDiscoveryRESTMapper } var _ Factory = &DefaultFactory{} -func loadConfig() (*rest.Config, error) { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig - - configOverrides := &clientcmd.ConfigOverrides{ - ClusterDefaults: clientcmd.ClusterDefaults, +func (f *DefaultFactory) restConfig() (*rest.Config, error) { + if f.cachedRESTConfig == nil { + restConfig, err := f.ConfigFlags.ToRESTConfig() + if err != nil { + return nil, fmt.Errorf("cannot load kubecfg settings: %w", err) + } + f.cachedRESTConfig = restConfig } - - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - return kubeConfig.ClientConfig() - + return f.cachedRESTConfig, nil } func (f *DefaultFactory) KubernetesClient() (kubernetes.Interface, error) { if f.kubernetesClient == nil { - config, err := loadConfig() + restConfig, err := f.restConfig() if err != nil { - return nil, fmt.Errorf("cannot load kubecfg settings: %v", err) + return nil, err } - k8sClient, err := kubernetes.NewForConfig(config) + k8sClient, err := kubernetes.NewForConfig(restConfig) if err != nil { - return nil, fmt.Errorf("cannot build kube client: %v", err) + return nil, fmt.Errorf("cannot build kube client: %w", err) } f.kubernetesClient = k8sClient } @@ -69,13 +77,29 @@ func (f *DefaultFactory) KubernetesClient() (kubernetes.Interface, error) { return f.kubernetesClient, nil } +func (f *DefaultFactory) DynamicClient() (dynamic.Interface, error) { + if f.dynamicClient == nil { + restConfig, err := f.restConfig() + if err != nil { + return nil, fmt.Errorf("cannot load kubecfg settings: %w", err) + } + dynamicClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("cannot build dynamicClient client: %v", err) + } + f.dynamicClient = dynamicClient + } + + return f.dynamicClient, nil +} + func (f *DefaultFactory) CertManagerClient() (certmanager.Interface, error) { if f.certManagerClient == nil { - config, err := loadConfig() + restConfig, err := f.restConfig() if err != nil { - return nil, fmt.Errorf("cannot load kubecfg settings: %v", err) + return nil, err } - certManagerClient, err := certmanager.NewForConfig(config) + certManagerClient, err := certmanager.NewForConfig(restConfig) if err != nil { return nil, fmt.Errorf("cannot build kube client: %v", err) } @@ -84,3 +108,18 @@ func (f *DefaultFactory) CertManagerClient() (certmanager.Interface, error) { return f.certManagerClient, nil } + +func (f *DefaultFactory) RESTMapper() (*restmapper.DeferredDiscoveryRESTMapper, error) { + if f.restMapper == nil { + discoveryClient, err := f.ConfigFlags.ToDiscoveryClient() + if err != nil { + return nil, err + } + + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + + f.restMapper = restMapper + } + + return f.restMapper, nil +} diff --git a/pkg/kubemanifest/manifest.go b/pkg/kubemanifest/manifest.go index bb67a6d5af..b66fd4a46d 100644 --- a/pkg/kubemanifest/manifest.go +++ b/pkg/kubemanifest/manifest.go @@ -140,6 +140,44 @@ func (m *Object) Kind() string { return s } +// GetNamespace returns the namespace field of the object, or "" if it cannot be found or is invalid +func (m *Object) GetNamespace() string { + metadata := m.metadata() + return getStringValue(metadata, "namespace") +} + +// GetName returns the namespace field of the object, or "" if it cannot be found or is invalid +func (m *Object) GetName() string { + metadata := m.metadata() + return getStringValue(metadata, "name") +} + +// getStringValue returns the specified field of the object, or "" if it cannot be found or is invalid +func getStringValue(m map[string]interface{}, key string) string { + v, found := m[key] + if !found { + return "" + } + s, ok := v.(string) + if !ok { + return "" + } + return s +} + +// metadata returns the metadata map of the object, or nil if it cannot be found or is invalid +func (m *Object) metadata() map[string]interface{} { + v, found := m.data["metadata"] + if !found { + return nil + } + metadata, ok := v.(map[string]interface{}) + if !ok { + return nil + } + return metadata +} + // APIVersion returns the apiVersion field of the object, or "" if it cannot be found or is invalid func (m *Object) APIVersion() string { v, found := m.data["apiVersion"] From 46d99cce433090c29ad691c049c5127527bcb0e0 Mon Sep 17 00:00:00 2001 From: justinsb Date: Sun, 15 Aug 2021 00:21:07 -0400 Subject: [PATCH 02/10] Automatically generate prune specifiers We use the "app.kubernetes.io/managed-by" and "addon.kops.k8s.io/name" labels, which are automatically added. --- ...opeio.example.com-addons-bootstrap_content | 17 ++ .../bootstrapchannelbuilder/BUILD.bazel | 4 + .../bootstrapchannelbuilder.go | 172 +++++++++++------- .../cloudup/bootstrapchannelbuilder/cilium.go | 8 +- .../bootstrapchannelbuilder/pruning.go | 111 +++++++++++ 5 files changed, 239 insertions(+), 73 deletions(-) create mode 100644 upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go diff --git a/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content index b1bb018adf..9dec74c17d 100644 --- a/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content @@ -56,6 +56,23 @@ spec: manifest: networking.kope.io/k8s-1.12.yaml manifestHash: 294272eb01da2938395ff6425ac74690788b6f7ebe80327a83a77b2951b63968 name: networking.kope.io + prune: + kinds: + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops selector: role.kubernetes.io/networking: "1" version: 9.99.0 diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/BUILD.bazel b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/BUILD.bazel index b5c01b8a56..cc45e57c72 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/BUILD.bazel +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "bootstrapchannelbuilder.go", "cilium.go", + "pruning.go", ], importpath = "k8s.io/kops/upup/pkg/fi/cloudup/bootstrapchannelbuilder", visibility = ["//visibility:public"], @@ -31,6 +32,9 @@ go_library( "//upup/pkg/fi/fitasks:go_default_library", "//upup/pkg/fi/utils:go_default_library", "//vendor/github.com/blang/semver/v4:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", ], ) diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go index 1e79637395..4bd03ec6f6 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go @@ -100,22 +100,18 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { return err } - if err := addons.Verify(); err != nil { - return err - } - - for _, a := range addons.Spec.Addons { + for _, a := range addons.Items { // Older versions of channels that may be running on the upgrading cluster requires Version to be set // We hardcode version to a high version to ensure an update is triggered on first run, and from then on // only a hash change will trigger an addon update. - a.Version = "9.99.0" + a.Spec.Version = "9.99.0" - key := *a.Name - if a.Id != "" { - key = key + "-" + a.Id + key := *a.Spec.Name + if a.Spec.Id != "" { + key = key + "-" + a.Spec.Id } name := b.Cluster.ObjectMeta.Name + "-addons-" + key - manifestPath := "addons/" + *a.Manifest + manifestPath := "addons/" + *a.Spec.Manifest klog.V(4).Infof("Addon %q", name) manifestResource := b.templates.Find(manifestPath) @@ -129,7 +125,7 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { } // Go through any transforms that are best expressed as code - remapped, err := addonmanifests.RemapAddonManifest(a, b.KopsModelContext, b.assetBuilder, manifestBytes) + remapped, err := addonmanifests.RemapAddonManifest(a.Spec, b.KopsModelContext, b.assetBuilder, manifestBytes) if err != nil { klog.Infof("invalid manifest: %s", string(manifestBytes)) return fmt.Errorf("error remapping manifest %s: %v", manifestPath, err) @@ -139,6 +135,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { // Trim whitespace manifestBytes = []byte(strings.TrimSpace(string(manifestBytes))) + a.ManifestData = manifestBytes + rawManifest := string(manifestBytes) klog.V(4).Infof("Manifest %v", rawManifest) @@ -147,7 +145,7 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { if err != nil { return fmt.Errorf("error hashing manifest: %v", err) } - a.ManifestHash = manifestHash + a.Spec.ManifestHash = manifestHash c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestBytes), @@ -202,7 +200,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { Name: fi.String(name), }) - addons.Spec.Addons = append(addons.Spec.Addons, &a.Spec) + addon := addons.Add(&a.Spec) + addon.ManifestData = manifestBytes } b.ClusterAddons = append(b.ClusterAddons, crds...) @@ -244,10 +243,25 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { Name: fi.String(name), }) - addons.Spec.Addons = append(addons.Spec.Addons, a) + addons.Add(a) } - addonsYAML, err := utils.YamlMarshal(addons) + if err := b.addPruneDirectives(addons); err != nil { + return err + } + + addonsObject := &channelsapi.Addons{} + addonsObject.Kind = "Addons" + addonsObject.ObjectMeta.Name = "bootstrap" + for _, addon := range addons.Items { + addonsObject.Spec.Addons = append(addonsObject.Spec.Addons, addon.Spec) + } + + if err := addonsObject.Verify(); err != nil { + return err + } + + addonsYAML, err := utils.YamlMarshal(addonsObject) if err != nil { return fmt.Errorf("error serializing addons yaml: %v", err) } @@ -264,12 +278,33 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { return nil } -func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*channelsapi.Addons, error) { - serviceAccountRoles := []iam.Subject{} +type AddonList struct { + Items []*Addon +} - addons := &channelsapi.Addons{} - addons.Kind = "Addons" - addons.ObjectMeta.Name = "bootstrap" +func (a *AddonList) Add(spec *channelsapi.AddonSpec) *Addon { + addon := &Addon{ + Spec: spec, + } + a.Items = append(a.Items, addon) + return addon +} + +type Addon struct { + // Spec is the spec that will (eventually) be passed to the channels binary. + Spec *channelsapi.AddonSpec + + // ManifestData is the object data loaded from the manifest. + ManifestData []byte + + // BuildPrune is set if we should automatically build prune specifiers, based on the manifest. + BuildPrune bool +} + +func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*AddonList, error) { + addons := &AddonList{} + + serviceAccountRoles := []iam.Subject{} { key := "kops-controller.addons.k8s.io" @@ -278,7 +313,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.16.yaml" id := "k8s-1.16" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -293,7 +328,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann version := "1.4.0" location := key + "/v" + version + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -308,7 +343,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -338,7 +373,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -356,7 +391,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -387,7 +422,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.8.yaml" id := "k8s-1.8" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -407,7 +442,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.9.yaml" id := "k8s-1.9" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -421,7 +456,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann version := "1.5.0" location := key + "/v" + version + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -434,18 +469,15 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if externalDNS == nil || !externalDNS.Disable { { key := "dns-controller.addons.k8s.io" + location := key + "/k8s-1.12.yaml" + id := "k8s-1.12" - { - location := key + "/k8s-1.12.yaml" - id := "k8s-1.12" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - Id: id, - }) - } + addons.Add(&channelsapi.AddonSpec{ + Name: fi.String(key), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, + }) } // Generate dns-controller ServiceAccount IAM permissions @@ -461,7 +493,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -485,7 +517,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -503,7 +535,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.15.yaml" id := "k8s-1.15" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -526,7 +558,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.11.yaml" id := "k8s-1.11" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-app": "metrics-server"}, Manifest: fi.String(location), @@ -545,7 +577,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.16.yaml" id := "k8s-1.16" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Id: id, @@ -564,7 +596,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.11.yaml" id := "k8s-1.11" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -587,7 +619,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.17.yaml" id := "k8s-1.17" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -606,7 +638,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.16.yaml" id := "k8s-1.16" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -623,7 +655,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.9.yaml" id := "k8s-1.9" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -645,7 +677,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "v1.15.0" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -661,7 +693,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.8" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -677,7 +709,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "v1.7.0" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -693,7 +725,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "v1.14.0" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -711,7 +743,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "v0.1.12" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -727,12 +759,14 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addon := addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), Id: id, }) + + addon.BuildPrune = true } } @@ -743,7 +777,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -759,7 +793,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -775,7 +809,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.16" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -791,7 +825,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.16" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -807,7 +841,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -823,7 +857,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.16" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -848,7 +882,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: authenticationSelector, Manifest: fi.String(location), @@ -863,7 +897,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: authenticationSelector, Manifest: fi.String(location), @@ -880,7 +914,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.16" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Selector: map[string]string{"k8s-addon": key}, @@ -896,7 +930,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.13.yaml" id := "k8s-1.13-ccm" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Selector: map[string]string{"k8s-addon": key}, @@ -910,7 +944,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/k8s-1.12.yaml" id := "k8s-1.12-ccm" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), @@ -928,7 +962,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann { id := "k8s-1.18" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Selector: map[string]string{"k8s-addon": key}, @@ -945,7 +979,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann { id := "k8s-1.17" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Selector: map[string]string{"k8s-addon": key}, @@ -964,7 +998,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann { id := "k8s-1.20" location := key + "/" + id + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Manifest: fi.String(location), Selector: map[string]string{"k8s-addon": key}, @@ -979,7 +1013,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann version := "1.7.0" location := key + "/v" + version + ".yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index 7d1aefb15e..35cc3f26ae 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -24,7 +24,7 @@ import ( "k8s.io/kops/upup/pkg/fi" ) -func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) error { +func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { cilium := b.Cluster.Spec.Networking.Cilium if cilium != nil { @@ -39,7 +39,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) error { id := "k8s-1.12" location := key + "/" + id + "-v1.8.yaml" - addons.Spec.Addons = append(addons.Spec.Addons, &api.AddonSpec{ + addons.Add(&api.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), Manifest: fi.String(location), @@ -62,7 +62,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) error { if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { addon.NeedsPKI = true } - addons.Spec.Addons = append(addons.Spec.Addons, addon) + addons.Add(addon) } } else if ver.Minor == 10 { { @@ -79,7 +79,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) error { if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { addon.NeedsPKI = true } - addons.Spec.Addons = append(addons.Spec.Addons, addon) + addons.Add(addon) } } else { return fmt.Errorf("unknown cilium version: %q", cilium.Version) diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go new file mode 100644 index 0000000000..bc1ad72c46 --- /dev/null +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bootstrapchannelbuilder + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + channelsapi "k8s.io/kops/channels/pkg/api" + "k8s.io/kops/pkg/kubemanifest" +) + +func (b *BootstrapChannelBuilder) addPruneDirectives(addons *AddonList) error { + for _, addon := range addons.Items { + if !addon.BuildPrune { + continue + } + + id := *addon.Spec.Name + + if err := b.addPruneDirectivesForAddon(addon); err != nil { + return fmt.Errorf("failed to configure pruning for %s: %w", id, err) + } + } + return nil +} + +func (b *BootstrapChannelBuilder) addPruneDirectivesForAddon(addon *Addon) error { + addon.Spec.Prune = &channelsapi.PruneSpec{} + + // We add these labels to all objects we manage, so we reuse them for pruning. + selectorMap := map[string]string{ + "app.kubernetes.io/managed-by": "kops", + "addon.kops.k8s.io/name": *addon.Spec.Name, + } + selector, err := labels.ValidatedSelectorFromSet(selectorMap) + if err != nil { + return fmt.Errorf("error parsing selector %v: %w", selectorMap, err) + } + + objects, err := kubemanifest.LoadObjectsFrom(addon.ManifestData) + if err != nil { + return fmt.Errorf("failed to parse manifest: %w", err) + } + + byGroupKind := make(map[schema.GroupKind][]*kubemanifest.Object) + for _, object := range objects { + gv, err := schema.ParseGroupVersion(object.APIVersion()) + if err != nil || gv.Version == "" { + return fmt.Errorf("failed to parse apiVersion %q", object.APIVersion()) + } + gvk := gv.WithKind(object.Kind()) + if gvk.Kind == "" { + return fmt.Errorf("failed to get kind for object") + } + + gk := gvk.GroupKind() + byGroupKind[gk] = append(byGroupKind[gk], object) + } + + var groupKinds []schema.GroupKind + for gk := range byGroupKind { + groupKinds = append(groupKinds, gk) + } + sort.Slice(groupKinds, func(i, j int) bool { + if groupKinds[i].Group != groupKinds[j].Group { + return groupKinds[i].Group < groupKinds[j].Group + } + return groupKinds[i].Kind < groupKinds[j].Kind + }) + + for _, gk := range groupKinds { + pruneSpec := channelsapi.PruneKindSpec{} + pruneSpec.Group = gk.Group + pruneSpec.Kind = gk.Kind + + namespaces := sets.NewString() + for _, object := range byGroupKind[gk] { + namespace := object.GetNamespace() + if namespace != "" { + namespaces.Insert(namespace) + } + } + if namespaces.Len() != 0 { + pruneSpec.Namespaces = namespaces.List() + } + + pruneSpec.LabelSelector = selector.String() + + addon.Spec.Prune.Kinds = append(addon.Spec.Prune.Kinds, pruneSpec) + } + + return nil +} From 41d2779805801c73393e5e3caee1cef8f5b6c5be Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 7 Oct 2021 07:45:41 -0400 Subject: [PATCH 03/10] Use a fixed set of pruning kinds We don't want to prune some kinds that are considered dangerous (CustomResourceDefinitions, which delete all the instances of the CRD) and Namespaces (which delete all the objects in that Namespace). In addition, having a hard-coded list means we don't have to remember to manually specify the kind if we delete an object from the manifest (as long as it's in the known set). We can in future support specifying additional kinds, but we can cross that when we need it. --- ...opeio.example.com-addons-bootstrap_content | 19 +++++++ .../bootstrapchannelbuilder/pruning.go | 50 +++++++++++++++++-- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content index 9dec74c17d..8467d9a6e5 100644 --- a/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatekopeio/data/aws_s3_bucket_object_privatekopeio.example.com-addons-bootstrap_content @@ -58,6 +58,10 @@ spec: name: networking.kope.io prune: kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops - kind: ServiceAccount labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops namespaces: @@ -67,12 +71,27 @@ spec: labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops namespaces: - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops - group: rbac.authorization.k8s.io kind: ClusterRole labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops - group: rbac.authorization.k8s.io kind: ClusterRoleBinding labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=networking.kope.io,app.kubernetes.io/managed-by=kops selector: role.kubernetes.io/networking: "1" version: 9.99.0 diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go index bc1ad72c46..ce44cd77dc 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/pruning.go @@ -23,6 +23,8 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + channelsapi "k8s.io/kops/channels/pkg/api" "k8s.io/kops/pkg/kubemanifest" ) @@ -55,12 +57,42 @@ func (b *BootstrapChannelBuilder) addPruneDirectivesForAddon(addon *Addon) error return fmt.Errorf("error parsing selector %v: %w", selectorMap, err) } + // We always include a set of well-known group kinds, + // so that we prune even if we end up removing something from the manifest. + alwaysPruneGroupKinds := []schema.GroupKind{ + {Group: "", Kind: "ConfigMap"}, + {Group: "", Kind: "Service"}, + {Group: "", Kind: "ServiceAccount"}, + {Group: "apps", Kind: "Deployment"}, + {Group: "apps", Kind: "DaemonSet"}, + {Group: "apps", Kind: "StatefulSet"}, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}, + {Group: "rbac.authorization.k8s.io", Kind: "Role"}, + {Group: "rbac.authorization.k8s.io", Kind: "RoleBinding"}, + {Group: "policy", Kind: "PodDisruptionBudget"}, + } + pruneGroupKind := make(map[schema.GroupKind]bool) + for _, gk := range alwaysPruneGroupKinds { + pruneGroupKind[gk] = true + } + + // In addition, we deliberately exclude a few types that are riskier to delete: + // + // * Namespace: because it deletes anything else that happens to be in the namespace + // + // * CustomResourceDefinition: because it deletes all instances of the CRD + neverPruneGroupKinds := map[schema.GroupKind]bool{ + {Group: "", Kind: "Namespace"}: true, + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true, + } + + // Parse the manifest; we use this to scope pruning to namespaces objects, err := kubemanifest.LoadObjectsFrom(addon.ManifestData) if err != nil { return fmt.Errorf("failed to parse manifest: %w", err) } - - byGroupKind := make(map[schema.GroupKind][]*kubemanifest.Object) + objectsByGK := make(map[schema.GroupKind][]*kubemanifest.Object) for _, object := range objects { gv, err := schema.ParseGroupVersion(object.APIVersion()) if err != nil || gv.Version == "" { @@ -72,13 +104,21 @@ func (b *BootstrapChannelBuilder) addPruneDirectivesForAddon(addon *Addon) error } gk := gvk.GroupKind() - byGroupKind[gk] = append(byGroupKind[gk], object) + objectsByGK[gk] = append(objectsByGK[gk], object) + + // Warn if there are objects in the manifest that we haven't considered + if !pruneGroupKind[gk] { + if !neverPruneGroupKinds[gk] { + klog.Warningf("manifest includes an object of GroupKind %v, which will not be pruned", gk) + } + } } var groupKinds []schema.GroupKind - for gk := range byGroupKind { + for gk := range pruneGroupKind { groupKinds = append(groupKinds, gk) } + sort.Slice(groupKinds, func(i, j int) bool { if groupKinds[i].Group != groupKinds[j].Group { return groupKinds[i].Group < groupKinds[j].Group @@ -92,7 +132,7 @@ func (b *BootstrapChannelBuilder) addPruneDirectivesForAddon(addon *Addon) error pruneSpec.Kind = gk.Kind namespaces := sets.NewString() - for _, object := range byGroupKind[gk] { + for _, object := range objectsByGK[gk] { namespace := object.GetNamespace() if namespace != "" { namespaces.Insert(namespace) From 0594eee373a8c9a7abb3c938b12fd608badcca87 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 7 Oct 2021 08:00:37 -0400 Subject: [PATCH 04/10] pruning: Allow specification of FieldSelector FieldSelector allows for filtering by name (as well as a few other attributes). Though we aren't using this today, filtering by name is something we've discussed as being valuable in future, and by putting it in the API now we avoid version skew problems should we need to introduce it in future. --- channels/pkg/api/channel.go | 22 ++++++++++++++++++---- channels/pkg/channels/prune.go | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/channels/pkg/api/channel.go b/channels/pkg/api/channel.go index 97c0850815..61657f5ea8 100644 --- a/channels/pkg/api/channel.go +++ b/channels/pkg/api/channel.go @@ -70,18 +70,32 @@ type AddonSpec struct { Version string `json:"version,omitempty"` + // PruneSpec specifies how old objects should be removed (pruned). Prune *PruneSpec `json:"prune,omitempty"` } +// PruneSpec specifies how old objects should be removed (pruned). type PruneSpec struct { + // Kinds specifies the objects to be pruned, by Kind. Kinds []PruneKindSpec `json:"kinds,omitempty"` } +// PruneKindSpec specifies pruning for a particular Kind of object. type PruneKindSpec struct { - Group string `json:"group,omitempty"` - Kind string `json:"kind,omitempty"` - Namespaces []string `json:"namespaces,omitempty"` - LabelSelector string `json:"labelSelector,omitempty"` + // Group specifies the object Group to be pruned (required). + Group string `json:"group,omitempty"` + // Kind specifies the object Kind to be pruned (required). + Kind string `json:"kind,omitempty"` + + // Namespaces limits pruning only to objects in certain namespaces. + Namespaces []string `json:"namespaces,omitempty"` + + // LabelSelector limits pruning only to objects matching the specified labels. + LabelSelector string `json:"labelSelector,omitempty"` + + // FieldSelector allows pruning only of objects matching the field selector. + // (This isn't currently used, but adding it now lets us start without worrying about version skew) + FieldSelector string `json:"fieldSelector,omitempty"` } func (a *Addons) Verify() error { diff --git a/channels/pkg/channels/prune.go b/channels/pkg/channels/prune.go index 5e6bd404a3..33c35bb4d6 100644 --- a/channels/pkg/channels/prune.go +++ b/channels/pkg/channels/prune.go @@ -83,6 +83,7 @@ func (p *Pruner) pruneObjectsOfKind(ctx context.Context, gk schema.GroupKind, sp var listOptions v1.ListOptions listOptions.LabelSelector = spec.LabelSelector + listOptions.FieldSelector = spec.FieldSelector baseResource := p.Client.Resource(gvr) if len(spec.Namespaces) == 0 { From 2be45ca3cbfd36e00baf86f21851362ddba17205 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 13 Oct 2021 09:09:13 -0400 Subject: [PATCH 05/10] Add pruning for node-termination-handler.aws --- ...nimal.example.com-addons-bootstrap_content | 36 +++++++++++++++++++ ...nimal.example.com-addons-bootstrap_content | 36 +++++++++++++++++++ ...nimal.example.com-addons-bootstrap_content | 36 +++++++++++++++++++ ...rname.example.com-addons-bootstrap_content | 36 +++++++++++++++++++ .../bootstrapchannelbuilder.go | 3 +- 5 files changed, 146 insertions(+), 1 deletion(-) diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content index 5090843494..b6ef12fb08 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content @@ -62,6 +62,42 @@ spec: manifest: node-termination-handler.aws/k8s-1.11.yaml manifestHash: 4197a26e91677e28e68617b12e8b2e9f2825d397578dcd7d8b6500e47a05c4c2 name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops selector: k8s-addon: node-termination-handler.aws version: 9.99.0 diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content index be7988190c..6e8b946885 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content @@ -62,6 +62,42 @@ spec: manifest: node-termination-handler.aws/k8s-1.11.yaml manifestHash: dd42ae2f7510700d37bf0214e0afdd87c12968c5c67ec88791f20f06fef90caf name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops selector: k8s-addon: node-termination-handler.aws version: 9.99.0 diff --git a/tests/integration/update_cluster/many-addons/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/many-addons/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content index 8b08b05c9c..0411bc72c5 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/many-addons/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content @@ -62,6 +62,42 @@ spec: manifest: node-termination-handler.aws/k8s-1.11.yaml manifestHash: dd42ae2f7510700d37bf0214e0afdd87c12968c5c67ec88791f20f06fef90caf name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops selector: k8s-addon: node-termination-handler.aws version: 9.99.0 diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_bucket_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content b/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_bucket_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content index af08bf1d62..790719c1a3 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_bucket_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_bucket_object_nthsqsresources.longclustername.example.com-addons-bootstrap_content @@ -49,6 +49,42 @@ spec: manifest: node-termination-handler.aws/k8s-1.11.yaml manifestHash: 424354959edcf24bcc3e1a3099b5b0a4525d59e2336a36940995ae51ead4ab08 name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops selector: k8s-addon: node-termination-handler.aws version: 9.99.0 diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go index 4bd03ec6f6..52223a52d3 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go @@ -596,12 +596,13 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/k8s-1.11.yaml" id := "k8s-1.11" - addons.Add(&channelsapi.AddonSpec{ + addon := addons.Add(&channelsapi.AddonSpec{ Name: fi.String(key), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), Id: id, }) + addon.BuildPrune = true } if b.UseServiceAccountExternalPermissions() { From df2c55917be5f099c3f30dfbb04deefb2af7d295 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 13 Oct 2021 10:33:57 -0400 Subject: [PATCH 06/10] More logging from pruning --- channels/pkg/channels/prune.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/channels/pkg/channels/prune.go b/channels/pkg/channels/prune.go index 33c35bb4d6..4d076ae5e4 100644 --- a/channels/pkg/channels/prune.go +++ b/channels/pkg/channels/prune.go @@ -37,6 +37,8 @@ type Pruner struct { // Prune prunes objects not in the manifest, according to PruneSpec. func (p *Pruner) Prune(ctx context.Context, manifest []byte, spec *api.PruneSpec) error { + klog.Infof("Prune spec: %v", spec) + if spec == nil { return nil } @@ -74,6 +76,8 @@ func (p *Pruner) Prune(ctx context.Context, manifest []byte, spec *api.PruneSpec } func (p *Pruner) pruneObjectsOfKind(ctx context.Context, gk schema.GroupKind, spec *api.PruneKindSpec, keepObjects []*kubemanifest.Object) error { + klog.Infof("pruning objects of kind: %v", gk) + restMapping, err := p.RESTMapper.RESTMapping(gk) if err != nil { return fmt.Errorf("unable to find resource for %s: %w", gk, err) From d90885e7b89586ef05bdff9eded700296c5300da Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 13 Oct 2021 11:31:52 -0400 Subject: [PATCH 07/10] Update addon-resource-tracking test to roll control plane Otherwise we don't get the new version of channels that supports pruning. --- tests/e2e/scenarios/addon-resource-tracking/run-test.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh index 1c763e0972..84eec4f954 100755 --- a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh +++ b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh @@ -19,7 +19,7 @@ source "${REPO_ROOT}"/tests/e2e/scenarios/lib/common.sh function haveds() { local ds=0 - kubectl get ds -n kube-system aws-node-termination-handler || ds=$? + kubectl get ds -n kube-system aws-node-termination-handler --show-labels || ds=$? return $ds } @@ -53,8 +53,8 @@ kops edit cluster "${CLUSTER_NAME}" "--set=cluster.spec.nodeTerminationHandler.e kops update cluster --allow-kops-downgrade kops update cluster --yes --allow-kops-downgrade -# wait for channels to deploy -sleep 90s +# Rolling-upgrade is needed so we get the new channels binary that supports prune +kops rolling-update cluster --instance-group-roles=master --yes # just make sure pods are ready kops validate cluster --wait=5m From 7eda9a41c570c7e827190bf7751b14d3e59e8e09 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 14 Oct 2021 10:26:28 -0400 Subject: [PATCH 08/10] kubetest2-kops: get KOPS_BASE_URL in non-periodic jobs Otherwise we're not using the version we just built --- tests/e2e/kubetest2-kops/builder/build.go | 21 ++++++++++++++++++++- tests/e2e/scenarios/lib/common.sh | 3 +++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/e2e/kubetest2-kops/builder/build.go b/tests/e2e/kubetest2-kops/builder/build.go index 38898b3ad9..d825aba169 100644 --- a/tests/e2e/kubetest2-kops/builder/build.go +++ b/tests/e2e/kubetest2-kops/builder/build.go @@ -18,7 +18,10 @@ package builder import ( "fmt" + "io/ioutil" "os" + "path/filepath" + "strings" "sigs.k8s.io/kubetest2/pkg/exec" ) @@ -39,5 +42,21 @@ func (b *BuildOptions) Build() error { ) cmd.SetDir(b.KopsRoot) exec.InheritOutput(cmd) - return cmd.Run() + if err := cmd.Run(); err != nil { + return err + } + + // Write some meta files so that other tooling can know e.g. KOPS_BASE_URL + metaDir := filepath.Join(b.KopsRoot, ".kubetest2") + + if err := os.MkdirAll(metaDir, 0755); err != nil { + return fmt.Errorf("failed to Mkdir(%q): %w", metaDir, err) + } + p := filepath.Join(metaDir, "kops-base-url") + kopsBaseURL := strings.Replace(b.StageLocation, "gs://", "https://storage.googleapis.com/", 1) + if err := ioutil.WriteFile(p, []byte(kopsBaseURL), 0644); err != nil { + return fmt.Errorf("failed to WriteFile(%q): %w", p, err) + } + + return nil } diff --git a/tests/e2e/scenarios/lib/common.sh b/tests/e2e/scenarios/lib/common.sh index 250eaac491..2a7901c0e6 100644 --- a/tests/e2e/scenarios/lib/common.sh +++ b/tests/e2e/scenarios/lib/common.sh @@ -108,6 +108,9 @@ function kops-acquire-latest() { fi $KUBETEST2 --build KOPS="${REPO_ROOT}/.bazelbuild/dist/linux/amd64/kops" + KOPS_BASE_URL=$(cat "${REPO_ROOT}/.kubetest2/kops-base-url") + export KOPS_BASE_URL + echo "KOPS_BASE_URL=$KOPS_BASE_URL" fi } From c34fd83365b9feb3fa56f7790c9a9e531029550c Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 14 Oct 2021 15:39:51 -0400 Subject: [PATCH 09/10] Add SystemGeneration to channel version tracker This allows us to reapply a manifest when we introduce new functionality, such as pruning. Otherwise an old version can apply the manifest, mark the manifest as applied, and we won't reapply. --- channels/pkg/api/channel.go | 2 +- channels/pkg/channels/addon.go | 11 +++--- channels/pkg/channels/addons.go | 2 +- channels/pkg/channels/addons_test.go | 32 +++++++++++++---- channels/pkg/channels/channel_version.go | 46 +++++++++++++++++------- nodeup/pkg/model/update_service.go | 2 +- 6 files changed, 68 insertions(+), 27 deletions(-) diff --git a/channels/pkg/api/channel.go b/channels/pkg/api/channel.go index 61657f5ea8..39ddd1da90 100644 --- a/channels/pkg/api/channel.go +++ b/channels/pkg/api/channel.go @@ -46,7 +46,7 @@ type AddonSpec struct { // Manifest is the URL to the manifest that should be applied Manifest *string `json:"manifest,omitempty"` - // Manifesthash is the sha256 hash of our manifest + // ManifestHash is the sha256 hash of our manifest ManifestHash string `json:"manifestHash,omitempty"` // KubernetesVersion is a semver version range on which this version of the addon can be applied diff --git a/channels/pkg/channels/addon.go b/channels/pkg/channels/addon.go index d51269507d..33abc2fcab 100644 --- a/channels/pkg/channels/addon.go +++ b/channels/pkg/channels/addon.go @@ -73,7 +73,7 @@ func (m *AddonMenu) MergeAddons(o *AddonMenu) { if existing == nil { m.Addons[k] = v } else { - if v.ChannelVersion().replaces(existing.ChannelVersion()) { + if v.ChannelVersion().replaces(k, existing.ChannelVersion()) { m.Addons[k] = v } } @@ -82,9 +82,10 @@ func (m *AddonMenu) MergeAddons(o *AddonMenu) { func (a *Addon) ChannelVersion() *ChannelVersion { return &ChannelVersion{ - Channel: &a.ChannelName, - Id: a.Spec.Id, - ManifestHash: a.Spec.ManifestHash, + Channel: &a.ChannelName, + Id: a.Spec.Id, + ManifestHash: a.Spec.ManifestHash, + SystemGeneration: CurrentSystemGeneration, } } @@ -120,7 +121,7 @@ func (a *Addon) GetRequiredUpdates(ctx context.Context, k8sClient kubernetes.Int } } - if existingVersion != nil && !newVersion.replaces(existingVersion) { + if existingVersion != nil && !newVersion.replaces(a.Name, existingVersion) { newVersion = nil } diff --git a/channels/pkg/channels/addons.go b/channels/pkg/channels/addons.go index e48ceaa41b..f6f023f678 100644 --- a/channels/pkg/channels/addons.go +++ b/channels/pkg/channels/addons.go @@ -74,7 +74,7 @@ func (a *Addons) GetCurrent(kubernetesVersion semver.Version) (*AddonMenu, error name := addon.Name existing := menu.Addons[name] - if existing == nil || addon.ChannelVersion().replaces(existing.ChannelVersion()) { + if existing == nil || addon.ChannelVersion().replaces(name, existing.ChannelVersion()) { menu.Addons[name] = addon } } diff --git a/channels/pkg/channels/addons_test.go b/channels/pkg/channels/addons_test.go index 0cd03ba49b..e4c0cc639c 100644 --- a/channels/pkg/channels/addons_test.go +++ b/channels/pkg/channels/addons_test.go @@ -85,6 +85,9 @@ func Test_Filtering(t *testing.T) { } func Test_Replacement(t *testing.T) { + hash1 := "3544de6578b2b582c0323b15b7b05a28c60b9430" + hash2 := "ea9e79bf29adda450446487d65a8fc6b3fdf8c2b" + grid := []struct { Old *ChannelVersion New *ChannelVersion @@ -92,23 +95,38 @@ func Test_Replacement(t *testing.T) { }{ //Test ManifestHash Changes { - Old: &ChannelVersion{Id: "a", ManifestHash: "3544de6578b2b582c0323b15b7b05a28c60b9430"}, - New: &ChannelVersion{Id: "a", ManifestHash: "3544de6578b2b582c0323b15b7b05a28c60b9430"}, + Old: &ChannelVersion{Id: "a", ManifestHash: hash1}, + New: &ChannelVersion{Id: "a", ManifestHash: hash1}, Replaces: false, }, { Old: &ChannelVersion{Id: "a", ManifestHash: ""}, - New: &ChannelVersion{Id: "a", ManifestHash: "3544de6578b2b582c0323b15b7b05a28c60b9430"}, + New: &ChannelVersion{Id: "a", ManifestHash: hash1}, Replaces: true, }, { - Old: &ChannelVersion{Id: "a", ManifestHash: "3544de6578b2b582c0323b15b7b05a28c60b9430"}, - New: &ChannelVersion{Id: "a", ManifestHash: "ea9e79bf29adda450446487d65a8fc6b3fdf8c2b"}, + Old: &ChannelVersion{Id: "a", ManifestHash: hash1}, + New: &ChannelVersion{Id: "a", ManifestHash: hash2}, Replaces: true, }, + { + Old: &ChannelVersion{Id: "a", ManifestHash: hash1}, + New: &ChannelVersion{Id: "a", ManifestHash: hash1, SystemGeneration: 1}, + Replaces: true, + }, + { + Old: &ChannelVersion{Id: "a", ManifestHash: hash1, SystemGeneration: 1}, + New: &ChannelVersion{Id: "a", ManifestHash: hash1}, + Replaces: false, + }, + { + Old: &ChannelVersion{Id: "a", ManifestHash: hash1, SystemGeneration: 1}, + New: &ChannelVersion{Id: "a", ManifestHash: hash1, SystemGeneration: 1}, + Replaces: false, + }, } for _, g := range grid { - actual := g.New.replaces(g.Old) + actual := g.New.replaces(t.Name(), g.Old) if actual != g.Replaces { t.Errorf("unexpected result from %v -> %v, expect %t. actual %v", g.Old, g.New, g.Replaces, actual) } @@ -218,7 +236,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { ctx := context.Background() annotations := map[string]string{ - "addons.k8s.io/test": "{\"manifestHash\":\"originalHash\"}", + "addons.k8s.io/test": "{\"manifestHash\":\"originalHash\",\"systemGeneration\": 1}", } if len(g.originalAnnotations) > 0 { annotations = g.originalAnnotations diff --git a/channels/pkg/channels/channel_version.go b/channels/pkg/channels/channel_version.go index 3e4fb33444..54aca2fe48 100644 --- a/channels/pkg/channels/channel_version.go +++ b/channels/pkg/channels/channel_version.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" "strings" certmanager "github.com/jetstack/cert-manager/pkg/client/clientset/versioned" @@ -38,10 +39,20 @@ type Channel struct { Name string } +// CurrentSystemGeneration holds our current SystemGeneration value. +// Version history: +// 0 Pre-history (and the default value); versions prior to prune. +// 1 Prune functionality introduced. +const CurrentSystemGeneration = 1 + type ChannelVersion struct { Channel *string `json:"channel,omitempty"` Id string `json:"id,omitempty"` ManifestHash string `json:"manifestHash,omitempty"` + + // SystemGeneration holds the generation of the channels functionality. + // It is used so that we reapply when we introduce new features, such as prune. + SystemGeneration int `json:"systemGeneration,omitempty"` } func stringValue(s *string) string { @@ -59,6 +70,7 @@ func (c *ChannelVersion) String() string { if c.ManifestHash != "" { s += " ManifestHash=" + c.ManifestHash } + s += " SystemGeneration=" + strconv.Itoa(c.SystemGeneration) return s } @@ -102,21 +114,31 @@ func (c *Channel) AnnotationName() string { return AnnotationPrefix + c.Name } -func (c *ChannelVersion) replaces(existing *ChannelVersion) bool { - klog.V(4).Infof("Checking existing channel: %v compared to new channel: %v", existing, c) +func (c *ChannelVersion) replaces(name string, existing *ChannelVersion) bool { + klog.V(6).Infof("Checking existing config for %q: %v compared to new channel: %v", name, existing, c) - if c.Id == existing.Id { - // Same id; check manifests - if c.ManifestHash == existing.ManifestHash { - klog.V(4).Infof("Manifest Match") - return false - } - klog.V(4).Infof("Channels had same ids %q, %q but different ManifestHash (%q vs %q); will replace", c.Id, c.ManifestHash, existing.ManifestHash) - } else { - klog.V(4).Infof("Channels had different ids (%q vs %q); will replace", c.Id, existing.Id) + if c.Id != existing.Id { + klog.V(4).Infof("cluster has different ids for %q (%q vs %q); will replace", name, c.Id, existing.Id) + return true } - return true + if c.ManifestHash != existing.ManifestHash { + klog.V(4).Infof("cluster has different ManifestHash for %q (%q vs %q); will replace", name, c.ManifestHash, existing.ManifestHash) + return true + } + + if existing.SystemGeneration != c.SystemGeneration { + if existing.SystemGeneration > c.SystemGeneration { + klog.V(4).Infof("cluster has newer SystemGeneration for %q (%v vs %v), will not replace", name, existing.SystemGeneration, c.SystemGeneration) + return false + } else { + klog.V(4).Infof("cluster has different SystemGeneration for %q (%v vs %v); will replace", name, existing.SystemGeneration, c.SystemGeneration) + return true + } + } + + klog.V(4).Infof("manifest Match for %q: %v", name, existing) + return false } func (c *Channel) GetInstalledVersion(ctx context.Context, k8sClient kubernetes.Interface) (*ChannelVersion, error) { diff --git a/nodeup/pkg/model/update_service.go b/nodeup/pkg/model/update_service.go index 1a970fe7bb..80ff4769f5 100644 --- a/nodeup/pkg/model/update_service.go +++ b/nodeup/pkg/model/update_service.go @@ -92,7 +92,7 @@ func (b *UpdateServiceBuilder) buildDebianPackage(c *fi.ModelBuilderContext) { ` } else { - klog.Infof("Detected OS %s; installing %s package", b.Distribution, debianPackageName) + klog.Infof("Detected OS %v; installing %s package", b.Distribution, debianPackageName) c.AddTask(&nodetasks.Package{Name: debianPackageName}) contents = `APT::Periodic::Update-Package-Lists "1"; From 50cd7152d9c8f5c4658d8964465e02ba245185ff Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 14 Oct 2021 21:23:01 -0400 Subject: [PATCH 10/10] resource-tracking e2e: fixup bash conditions --- .../e2e/scenarios/addon-resource-tracking/run-test.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh index 84eec4f954..7db909f283 100755 --- a/tests/e2e/scenarios/addon-resource-tracking/run-test.sh +++ b/tests/e2e/scenarios/addon-resource-tracking/run-test.sh @@ -39,7 +39,10 @@ ${KUBETEST2} \ --create-args="$ARGS" -haveds +if ! haveds; then + echo "Expected aws-node-termination-handler to exist" + exit 1 +fi # Upgrade to a version that should adopt existing resources and apply the change below kops-acquire-latest @@ -60,4 +63,7 @@ kops rolling-update cluster --instance-group-roles=master --yes kops validate cluster --wait=5m # We should no longer have a daemonset called aws-node-termination-handler -haveds && exit 1 \ No newline at end of file +if haveds; then + echo "Expected aws-node-termination-handler to have been pruned" + exit 1 +fi \ No newline at end of file