fix kops get cluster for files

We were skipping tasks in `kops get assets`; but this
meant we would not discover assets for skipped tasks.

We are doing a dry-run here, so introduce a dry-run mode that
does not look for existing resources.  That is essentially as fast
as skipping the task, but means we visit all tasks.
This commit is contained in:
justinsb 2025-05-02 15:36:39 -04:00 committed by Ciprian Hacman
parent 47cba5aedc
commit ae9f90e616
6 changed files with 72 additions and 14 deletions

View File

@ -1,3 +1,52 @@
files:
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet
download: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet
sha: 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl
download: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl
sha: 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70
- canonical: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64
download: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64
sha: 5035d7814c95cd3cedbc5efb447ef25a4942ef05caab2159746d55ce1698c74a
- canonical: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz
download: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz
sha: 2503ce29ac445715ebe146073f45468153f9e28f45fa173cb060cfd9e735f563
- canonical: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-amd64.tar.gz
download: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-amd64.tar.gz
sha: 02990fa281c0a2c4b073c6d2415d264b682bd693aa7d86c5d8eb4b86d684a18c
- canonical: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64
download: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64
sha: e83565aa78ec8f52a4d2b4eb6c4ca262b74c5f6770c1f43670c3029c20175502
- canonical: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz
download: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz
sha: 71aee9d987b7fad0ff2ade50b038ad7e2356324edc02c54045960a3521b3e6a7
- canonical: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz
download: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz
sha: d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet
download: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet
sha: bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl
download: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl
sha: ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896
- canonical: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64
download: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64
sha: b3d567bda9e2996fc1fbd9d13506bd16763d3865b5c7b0b3c4b48c6088c04481
- canonical: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz
download: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz
sha: f0f440b968ab50ad13d9d42d993ba98ec30b2ec666846f4ef1bddc7646a701cc
- canonical: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-arm64.tar.gz
download: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-arm64.tar.gz
sha: e9201d478e4c931496344b779eb6cb40ce5084ec08c8fff159a02cabb0c6b9bf
- canonical: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64
download: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64
sha: 285f6c4c3de1d78d9f536a0299ae931219527b2ebd9ad89df5a1072896b7e82a
- canonical: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz
download: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz
sha: d8df47708ca57b9cd7f498055126ba7dcfc811d9ba43aae1830c93a09e70e22d
- canonical: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz
download: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz
sha: 0b615cfa00c331fb9c4524f3d4058a61cc487b33a3436d1269e7832cf283f925
images:
- canonical: registry.k8s.io/kube-apiserver:v1.32.0
download: registry.k8s.io/kube-apiserver:v1.32.0

View File

@ -235,11 +235,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) {
default:
return nil, fmt.Errorf("unknown phase %q", c.Phase)
}
if c.GetAssets {
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
}
assetBuilder := assets.NewAssetBuilder(c.Clientset.VFSContext(), c.Cluster.Spec.Assets, c.GetAssets)
if len(c.ControlPlaneRunningVersion) > 0 && c.ControlPlaneRunningVersion != c.Cluster.Spec.KubernetesVersion {
@ -776,10 +771,14 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) {
case TargetDryRun:
var out io.Writer = os.Stdout
checkExisting := true
if c.GetAssets {
out = io.Discard
// For `kops get assets`,there is no need to run Find,
// we are just trying to discover the assets.
checkExisting = false
}
target = fi.NewCloudupDryRunTarget(assetBuilder, out)
target = fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, out)
// Avoid making changes on a dry-run
shouldPrecreateDNS = false

View File

@ -118,7 +118,8 @@ func checkNoChanges(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks
},
}
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
checkExisting := true
target := fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, os.Stderr)
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
if err != nil {
t.Fatalf("error building context: %v", err)

View File

@ -119,7 +119,8 @@ func doDryRun(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks map[st
},
}
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
checkExisting := true
target := fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, os.Stderr)
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
if err != nil {
t.Fatalf("error building context: %v", err)

View File

@ -45,6 +45,10 @@ type DryRunTarget[T SubContext] struct {
// assetBuilder records all assets used
assetBuilder *assets.AssetBuilder
// defaultCheckExisting will control whether we look for existing objects in our dry-run.
// This is normally true except for special-case dry-runs, like `kops get assets`
defaultCheckExisting bool
}
type NodeupDryRunTarget = DryRunTarget[NodeupSubContext]
@ -77,23 +81,26 @@ func (a DeletionByTaskName[T]) Less(i, j int) bool {
var _ Target[CloudupSubContext] = &DryRunTarget[CloudupSubContext]{}
func newDryRunTarget[T SubContext](assetBuilder *assets.AssetBuilder, out io.Writer) *DryRunTarget[T] {
func newDryRunTarget[T SubContext](assetBuilder *assets.AssetBuilder, defaultCheckExisting bool, out io.Writer) *DryRunTarget[T] {
t := &DryRunTarget[T]{}
t.out = out
t.assetBuilder = assetBuilder
t.defaultCheckExisting = defaultCheckExisting
return t
}
func NewCloudupDryRunTarget(assetBuilder *assets.AssetBuilder, out io.Writer) *CloudupDryRunTarget {
return newDryRunTarget[CloudupSubContext](assetBuilder, out)
// NewCloudupDryRunTarget builds a dry-run target.
// checkExisting should normally be true, but can be false for special-case dry-run, such as in `kops get assets`
func NewCloudupDryRunTarget(assetBuilder *assets.AssetBuilder, checkExisting bool, out io.Writer) *CloudupDryRunTarget {
return newDryRunTarget[CloudupSubContext](assetBuilder, checkExisting, out)
}
func NewNodeupDryRunTarget(assetBuilder *assets.AssetBuilder, out io.Writer) *NodeupDryRunTarget {
return newDryRunTarget[NodeupSubContext](assetBuilder, out)
return newDryRunTarget[NodeupSubContext](assetBuilder, true, out)
}
func (t *DryRunTarget[T]) DefaultCheckExisting() bool {
return true
return t.defaultCheckExisting
}
func (t *DryRunTarget[T]) Render(a, e, changes Task[T]) error {

View File

@ -69,7 +69,8 @@ func (*testTask) Run(_ *CloudupContext) error {
func Test_DryrunTarget_PrintReport(t *testing.T) {
builder := assets.NewAssetBuilder(vfs.Context, nil, false)
var stdout bytes.Buffer
target := newDryRunTarget[CloudupSubContext](builder, &stdout)
checkExisting := true
target := newDryRunTarget[CloudupSubContext](builder, checkExisting, &stdout)
tasks := map[string]CloudupTask{}
a := &testTask{
Name: PtrTo("TestName"),