Add prune command to linkerd and to extensions (#10303)

Fixes: #10262 

When a resource is removed from the Linkerd manifests from one version to the next, we would like that resource to be removed from the user's cluster as part of the upgrade process.  Our current recommendation is to use the `linkerd upgrade` command in conjunction with the `kubectl apply` command and the `--prune` flag to remove resources which are no longer part of the manifest.  However, `--prune` has many shortcomings and does not detect resources kinds which are not part of the input manifest, nor does it detect cluster scoped resources.  See https://linkerd.io/2.12/tasks/upgrade/#with-the-linkerd-cli

We add a `linkerd prune` command which locates all Linkerd resources on the cluster which are not part of the Linkerd manifest and prints their metadata so that users can delete them.  The recommended upgrade procedure would then be:

```
> linkerd upgrade | kubectl apply -f -
> linkerd prune | kubectl delete -f -
```

User must take special care to use the desired version of the CLI to run the prune command since running this command will print all resources on the cluster which are not included in that version.

We also add similar prune commands to each of the `viz`, `multicluster`, and `jaeger` extensions for deleting extension resources which are not in the extension manifest.

Signed-off-by: Alex Leong <alex@buoyant.io>
This commit is contained in:
Alex Leong 2023-02-17 10:44:30 -08:00 committed by GitHub
parent f211080992
commit e9eac4c672
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 391 additions and 0 deletions

64
cli/cmd/prune.go Normal file
View File

@ -0,0 +1,64 @@
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
pkgCmd "github.com/linkerd/linkerd2/pkg/cmd"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/spf13/cobra"
valuespkg "helm.sh/helm/v3/pkg/cli/values"
)
func newCmdPrune() *cobra.Command {
cmd := &cobra.Command{
Use: "prune [flags]",
Args: cobra.NoArgs,
Short: "Output extraneous Kubernetes resources in the linkerd control plane",
Long: `Output extraneous Kubernetes resources in the linkerd control plane.`,
Example: ` # Prune extraneous resources.
linkerd prune | kubectl delete -f -
`,
RunE: func(cmd *cobra.Command, _ []string) error {
k8sAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 30*time.Second)
if err != nil {
return err
}
values, err := loadStoredValues(cmd.Context(), k8sAPI)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to load stored values: %s\n", err)
os.Exit(1)
}
if values == nil {
return errors.New(
`Could not find the linkerd-config-overrides secret.
Please note this command is only intended for instances of Linkerd that were installed via the CLI`)
}
err = validateValues(cmd.Context(), k8sAPI, values)
if err != nil {
return err
}
manifests := strings.Builder{}
if err = renderControlPlane(&manifests, values, make(map[string]interface{})); err != nil {
return err
}
if err = renderCRDs(&manifests, valuespkg.Options{}); err != nil {
return err
}
return pkgCmd.Prune(cmd.Context(), k8sAPI, manifests.String(), k8s.ControllerNSLabel)
},
}
return cmd
}

View File

@ -119,6 +119,7 @@ func init() {
RootCmd.AddCommand(newCmdUpgrade())
RootCmd.AddCommand(newCmdVersion())
RootCmd.AddCommand(newCmdUninstall())
RootCmd.AddCommand(newCmdPrune())
// Extension Sub Commands
RootCmd.AddCommand(jaeger.NewCmdJaeger())

59
jaeger/cmd/prune.go Normal file
View File

@ -0,0 +1,59 @@
package cmd
import (
"fmt"
"strings"
"time"
pkgCmd "github.com/linkerd/linkerd2/pkg/cmd"
"github.com/linkerd/linkerd2/pkg/flags"
"github.com/linkerd/linkerd2/pkg/healthcheck"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/cli/values"
)
func newCmdPrune() *cobra.Command {
var cniEnabled bool
var wait time.Duration
var options values.Options
cmd := &cobra.Command{
Use: "prune [flags]",
Args: cobra.NoArgs,
Short: "Output extraneous Kubernetes resources in the linkerd-jaeger extension",
Long: `Output extraneous Kubernetes resources in the linkerd-jaeger extension.`,
Example: ` # Prune extraneous resources.
linkerd jaeger prune | kubectl delete -f -
`,
RunE: func(cmd *cobra.Command, _ []string) error {
hc := healthcheck.NewWithCoreChecks(&healthcheck.Options{
ControlPlaneNamespace: controlPlaneNamespace,
KubeConfig: kubeconfigPath,
KubeContext: kubeContext,
Impersonate: impersonate,
ImpersonateGroup: impersonateGroup,
APIAddr: apiAddr,
RetryDeadline: time.Now().Add(wait),
})
hc.RunWithExitOnError()
cniEnabled = hc.CNIEnabled
manifests := strings.Builder{}
err := install(&manifests, options, "", cniEnabled)
if err != nil {
return err
}
label := fmt.Sprintf("%s=%s", k8s.LinkerdExtensionLabel, JaegerExtensionName)
return pkgCmd.Prune(cmd.Context(), hc.KubeAPIClient(), manifests.String(), label)
},
}
cmd.Flags().DurationVar(&wait, "wait", 300*time.Second, "Wait for extension components to be available")
flags.AddValueOptionsFlags(cmd.Flags(), &options)
return cmd
}

View File

@ -70,6 +70,7 @@ func NewCmdJaeger() *cobra.Command {
jaegerCmd.AddCommand(newCmdInstall())
jaegerCmd.AddCommand(newCmdList())
jaegerCmd.AddCommand(newCmdUninstall())
jaegerCmd.AddCommand(newCmdPrune())
// resource-aware completion flag configurations
pkgcmd.ConfigureNamespaceFlagCompletion(

67
multicluster/cmd/prune.go Normal file
View File

@ -0,0 +1,67 @@
package cmd
import (
"fmt"
"os"
"strings"
"time"
pkgCmd "github.com/linkerd/linkerd2/pkg/cmd"
"github.com/linkerd/linkerd2/pkg/flags"
"github.com/linkerd/linkerd2/pkg/healthcheck"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/spf13/cobra"
valuespkg "helm.sh/helm/v3/pkg/cli/values"
)
func newCmdPrune() *cobra.Command {
var ha bool
var cniEnabled bool
var wait time.Duration
options, err := newMulticlusterInstallOptionsWithDefault()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
var valuesOptions valuespkg.Options
cmd := &cobra.Command{
Use: "prune [flags]",
Args: cobra.NoArgs,
Short: "Output extraneous Kubernetes resources in the linkerd-multicluster extension",
Long: `Output extraneous Kubernetes resources in the linkerd-multicluster extension.`,
Example: ` # Prune extraneous resources.
linkerd multicluster prune | kubectl delete -f -
`,
RunE: func(cmd *cobra.Command, _ []string) error {
hc := healthcheck.NewWithCoreChecks(&healthcheck.Options{
ControlPlaneNamespace: controlPlaneNamespace,
KubeConfig: kubeconfigPath,
KubeContext: kubeContext,
Impersonate: impersonate,
ImpersonateGroup: impersonateGroup,
APIAddr: apiAddr,
RetryDeadline: time.Now().Add(wait),
})
hc.RunWithExitOnError()
cniEnabled = hc.CNIEnabled
manifests := strings.Builder{}
err := install(cmd.Context(), &manifests, options, valuesOptions, ha, false, cniEnabled)
if err != nil {
return err
}
label := fmt.Sprintf("%s=%s", k8s.LinkerdExtensionLabel, MulticlusterExtensionName)
return pkgCmd.Prune(cmd.Context(), hc.KubeAPIClient(), manifests.String(), label)
},
}
cmd.Flags().BoolVar(&ha, "ha", false, `Set if Linkerd Multicluster Extension is installed in High Availability mode.`)
cmd.Flags().DurationVar(&wait, "wait", 300*time.Second, "Wait for extension components to be available")
flags.AddValueOptionsFlags(cmd.Flags(), &valuesOptions)
return cmd
}

View File

@ -87,6 +87,7 @@ components on a cluster, manage credentials and link clusters together.`,
multiclusterCmd.AddCommand(newMulticlusterUninstallCommand())
multiclusterCmd.AddCommand(newGatewaysCommand())
multiclusterCmd.AddCommand(newAllowCommand())
multiclusterCmd.AddCommand(newCmdPrune())
// resource-aware completion flag configurations
pkgcmd.ConfigureNamespaceFlagCompletion(

View File

@ -1,9 +1,11 @@
package cmd
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
"strings"
@ -15,7 +17,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
yamlDecoder "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/yaml"
)
// GetDefaultNamespace fetches the default namespace
@ -61,6 +65,66 @@ func Uninstall(ctx context.Context, k8sAPI *k8s.KubernetesAPI, selector string)
return nil
}
// Prune takes an install manifest and prints all resources on the cluster which
// match the given label selector but are not in the given manifest. Users are
// expected to pipe these resources to `kubectl delete` to clean up resources
// left on the cluster which are no longer part of the install manifest.
func Prune(ctx context.Context, k8sAPI *k8s.KubernetesAPI, expectedManifests string, selector string) error {
expectedResources := []resource.Kubernetes{}
reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(strings.NewReader(expectedManifests), 4096))
for {
manifest, err := reader.Read()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
resource := resource.Kubernetes{}
err = yaml.Unmarshal(manifest, &resource)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing manifest: %s", manifest)
os.Exit(1)
}
expectedResources = append(expectedResources, resource)
}
listOptions := metav1.ListOptions{
LabelSelector: selector,
}
resources, err := resource.FetchPrunableResources(ctx, k8sAPI, metav1.NamespaceAll, listOptions)
if err != nil {
fmt.Fprintf(os.Stderr, "error fetching resources: %s\n", err)
os.Exit(1)
}
for _, resource := range resources {
// If the resource is not in the expected resource list, render it for
// pruning.
if !resourceListContains(expectedResources, resource) {
if err = resource.RenderResource(os.Stdout); err != nil {
return fmt.Errorf("error rendering Kubernetes resource: %w\n", err)
}
}
}
return nil
}
func resourceListContains(list []resource.Kubernetes, a resource.Kubernetes) bool {
for _, r := range list {
if resourceEquals(a, r) {
return true
}
}
return false
}
func resourceEquals(a resource.Kubernetes, b resource.Kubernetes) bool {
return a.GroupVersionKind().GroupKind() == b.GroupVersionKind().GroupKind() &&
a.GetName() == b.GetName() &&
a.GetNamespace() == b.GetNamespace()
}
// ConfigureNamespaceFlagCompletion sets up resource-aware completion for command
// flags that accept a namespace name
func ConfigureNamespaceFlagCompletion(

View File

@ -5,13 +5,22 @@ import (
"fmt"
"io"
link "github.com/linkerd/linkerd2/controller/gen/apis/link/v1alpha1"
policy "github.com/linkerd/linkerd2/controller/gen/apis/policy/v1alpha1"
profile "github.com/linkerd/linkerd2/controller/gen/apis/serviceprofile/v1alpha2"
"github.com/linkerd/linkerd2/pkg/k8s"
log "github.com/sirupsen/logrus"
admissionRegistration "k8s.io/api/admissionregistration/v1"
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
core "k8s.io/api/core/v1"
k8sPolicy "k8s.io/api/policy/v1"
rbac "k8s.io/api/rbac/v1"
apiextension "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
apiRegistration "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
"sigs.k8s.io/yaml"
@ -27,6 +36,39 @@ type Kubernetes struct {
metav1.ObjectMeta `json:"metadata"`
}
var prunableNamespaceResources []schema.GroupVersionResource = []schema.GroupVersionResource{
core.SchemeGroupVersion.WithResource("configmaps"),
batch.SchemeGroupVersion.WithResource("cronjobs"),
apps.SchemeGroupVersion.WithResource("daemonsets"),
apps.SchemeGroupVersion.WithResource("deployments"),
batch.SchemeGroupVersion.WithResource("jobs"),
policy.SchemeGroupVersion.WithResource("meshtlsauthentications"),
policy.SchemeGroupVersion.WithResource("networkauthentication"),
core.SchemeGroupVersion.WithResource("replicationcontrollers"),
core.SchemeGroupVersion.WithResource("secrets"),
core.SchemeGroupVersion.WithResource("services"),
profile.SchemeGroupVersion.WithResource("serviceprofiles"),
apps.SchemeGroupVersion.WithResource("statefulsets"),
rbac.SchemeGroupVersion.WithResource("roles"),
rbac.SchemeGroupVersion.WithResource("rolebindings"),
core.SchemeGroupVersion.WithResource("serviceaccounts"),
k8sPolicy.SchemeGroupVersion.WithResource("poddisruptionbudgets"),
k8s.ServerGVR,
k8s.SazGVR,
k8s.AuthorizationPolicyGVR,
link.SchemeGroupVersion.WithResource("links"),
k8s.HTTPRouteGVR,
}
var prunableClusterResources []schema.GroupVersionResource = []schema.GroupVersionResource{
rbac.SchemeGroupVersion.WithResource("clusterroles"),
rbac.SchemeGroupVersion.WithResource("clusterrolebindings"),
apiRegistration.SchemeGroupVersion.WithResource("apiservices"),
admissionRegistration.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"),
admissionRegistration.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"),
apiextension.SchemeGroupVersion.WithResource("customresourcedefinitions"),
}
// New returns a kubernetes resource with the given data
func New(apiVersion, kind, name string) Kubernetes {
return Kubernetes{
@ -265,3 +307,32 @@ func fetchAPIRegistrationResources(ctx context.Context, k *k8s.KubernetesAPI, op
return resources, nil
}
func FetchPrunableResources(ctx context.Context, k *k8s.KubernetesAPI, namespace string, options metav1.ListOptions) ([]Kubernetes, error) {
resources := []Kubernetes{}
for _, gvr := range prunableNamespaceResources {
items, err := k.DynamicClient.Resource(gvr).Namespace(namespace).List(ctx, options)
if err != nil {
if !kerrors.IsNotFound(err) {
log.Debugf("failed to list resources of type %s", gvr)
}
continue
}
for _, item := range items.Items {
resources = append(resources, NewNamespaced(item.GetAPIVersion(), item.GetKind(), item.GetName(), item.GetNamespace()))
}
}
for _, gvr := range prunableClusterResources {
items, err := k.DynamicClient.Resource(gvr).List(ctx, options)
if err != nil {
log.Debugf("failed to list resources of type %s", gvr)
continue
}
for _, item := range items.Items {
resources = append(resources, New(item.GetAPIVersion(), item.GetKind(), item.GetName()))
}
}
return resources, nil
}

62
viz/cmd/prune.go Normal file
View File

@ -0,0 +1,62 @@
package cmd
import (
"fmt"
"strings"
"time"
pkgCmd "github.com/linkerd/linkerd2/pkg/cmd"
"github.com/linkerd/linkerd2/pkg/flags"
"github.com/linkerd/linkerd2/pkg/healthcheck"
"github.com/linkerd/linkerd2/pkg/k8s"
vizHealthcheck "github.com/linkerd/linkerd2/viz/pkg/healthcheck"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/cli/values"
)
func newCmdPrune() *cobra.Command {
var ha bool
var cniEnabled bool
var wait time.Duration
var options values.Options
cmd := &cobra.Command{
Use: "prune [flags]",
Args: cobra.NoArgs,
Short: "Output extraneous Kubernetes resources in the linkerd-viz extension",
Long: `Output extraneous Kubernetes resources in the linkerd-viz extension.`,
Example: ` # Prune extraneous resources.
linkerd viz prune | kubectl delete -f -
`,
RunE: func(cmd *cobra.Command, _ []string) error {
hc := healthcheck.NewWithCoreChecks(&healthcheck.Options{
ControlPlaneNamespace: controlPlaneNamespace,
KubeConfig: kubeconfigPath,
KubeContext: kubeContext,
Impersonate: impersonate,
ImpersonateGroup: impersonateGroup,
APIAddr: apiAddr,
RetryDeadline: time.Now().Add(wait),
})
hc.RunWithExitOnError()
cniEnabled = hc.CNIEnabled
manifests := strings.Builder{}
err := install(&manifests, options, ha, cniEnabled)
if err != nil {
return err
}
label := fmt.Sprintf("%s=%s", k8s.LinkerdExtensionLabel, vizHealthcheck.VizExtensionName)
return pkgCmd.Prune(cmd.Context(), hc.KubeAPIClient(), manifests.String(), label)
},
}
cmd.Flags().BoolVar(&ha, "ha", false, `Set if Linkerd Viz Extension is installed in High Availability mode.`)
cmd.Flags().DurationVar(&wait, "wait", 300*time.Second, "Wait for extension components to be available")
flags.AddValueOptionsFlags(cmd.Flags(), &options)
return cmd
}

View File

@ -90,6 +90,7 @@ func NewCmdViz() *cobra.Command {
vizCmd.AddCommand(NewCmdTop())
vizCmd.AddCommand(newCmdUninstall())
vizCmd.AddCommand(newCmdAllowScrapes())
vizCmd.AddCommand(newCmdPrune())
// resource-aware completion flag configurations
pkgcmd.ConfigureNamespaceFlagCompletion(