Merge pull request #17385 from justinsb/fix_get_assets

fix get assets
This commit is contained in:
Kubernetes Prow Robot 2025-05-02 20:05:56 -07:00 committed by GitHub
commit abdc58dbad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 143 additions and 14 deletions

View File

@ -75,6 +75,8 @@ type integrationTest struct {
nthRebalance bool
// enable GCE startup script
startupScript bool
// verify "kops get assets" functionality
testGetAssets bool
}
func newIntegrationTest(clusterName, srcDir string) *integrationTest {
@ -89,6 +91,11 @@ func newIntegrationTest(clusterName, srcDir string) *integrationTest {
}
}
func (i *integrationTest) withTestGetAssets() *integrationTest {
i.testGetAssets = true
return i
}
func (i *integrationTest) withStartupScript() *integrationTest {
i.startupScript = true
return i
@ -1041,6 +1048,7 @@ func TestContainerd(t *testing.T) {
dnsControllerAddon,
awsCCMAddon,
).
withTestGetAssets().
runTestTerraformAWS(t)
}
@ -1319,6 +1327,22 @@ func (i *integrationTest) runTest(t *testing.T, ctx context.Context, h *testutil
}
}
}
if i.testGetAssets {
options := &GetAssetsOptions{}
options.GetOptions = &GetOptions{}
options.Output = "yaml"
options.ClusterName = i.clusterName
var assetsOut bytes.Buffer
err := RunGetAssets(ctx, factory, &assetsOut, options)
if err != nil {
t.Fatalf("error running get assets %q: %v", i.clusterName, err)
}
wantPath := filepath.Join(i.srcDir, "assets.yaml")
golden.AssertMatchesFile(t, assetsOut.String(), wantPath)
}
}
func (i *integrationTest) setupCluster(t *testing.T, ctx context.Context, inputYAML string, stdout bytes.Buffer) *util.Factory {

View File

@ -0,0 +1,96 @@
files:
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet
download: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet
sha: 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl
download: https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl
sha: 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70
- canonical: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64
download: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64
sha: 5035d7814c95cd3cedbc5efb447ef25a4942ef05caab2159746d55ce1698c74a
- canonical: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz
download: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz
sha: 2503ce29ac445715ebe146073f45468153f9e28f45fa173cb060cfd9e735f563
- canonical: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-amd64.tar.gz
download: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-amd64.tar.gz
sha: 02990fa281c0a2c4b073c6d2415d264b682bd693aa7d86c5d8eb4b86d684a18c
- canonical: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64
download: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64
sha: e83565aa78ec8f52a4d2b4eb6c4ca262b74c5f6770c1f43670c3029c20175502
- canonical: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz
download: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz
sha: 71aee9d987b7fad0ff2ade50b038ad7e2356324edc02c54045960a3521b3e6a7
- canonical: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz
download: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz
sha: d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet
download: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet
sha: bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf
- canonical: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl
download: https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl
sha: ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896
- canonical: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64
download: https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64
sha: b3d567bda9e2996fc1fbd9d13506bd16763d3865b5c7b0b3c4b48c6088c04481
- canonical: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz
download: https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz
sha: f0f440b968ab50ad13d9d42d993ba98ec30b2ec666846f4ef1bddc7646a701cc
- canonical: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-arm64.tar.gz
download: https://github.com/containerd/containerd/releases/download/v1.7.25/containerd-1.7.25-linux-arm64.tar.gz
sha: e9201d478e4c931496344b779eb6cb40ce5084ec08c8fff159a02cabb0c6b9bf
- canonical: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64
download: https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64
sha: 285f6c4c3de1d78d9f536a0299ae931219527b2ebd9ad89df5a1072896b7e82a
- canonical: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz
download: https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz
sha: d8df47708ca57b9cd7f498055126ba7dcfc811d9ba43aae1830c93a09e70e22d
- canonical: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz
download: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz
sha: 0b615cfa00c331fb9c4524f3d4058a61cc487b33a3436d1269e7832cf283f925
images:
- canonical: registry.k8s.io/kube-apiserver:v1.32.0
download: registry.k8s.io/kube-apiserver:v1.32.0
- canonical: registry.k8s.io/pause:3.9
download: registry.k8s.io/pause:3.9
- canonical: registry.k8s.io/kube-controller-manager:v1.32.0
download: registry.k8s.io/kube-controller-manager:v1.32.0
- canonical: registry.k8s.io/kube-scheduler:v1.32.0
download: registry.k8s.io/kube-scheduler:v1.32.0
- canonical: registry.k8s.io/kube-proxy:v1.32.0
download: registry.k8s.io/kube-proxy:v1.32.0
- canonical: registry.k8s.io/kops/kops-controller:1.32.0-beta.1
download: registry.k8s.io/kops/kops-controller:1.32.0-beta.1
- canonical: registry.k8s.io/coredns/coredns:v1.11.3
download: registry.k8s.io/coredns/coredns:v1.11.3
- canonical: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.9
download: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.9
- canonical: registry.k8s.io/kops/dns-controller:1.32.0-beta.1
download: registry.k8s.io/kops/dns-controller:1.32.0-beta.1
- canonical: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.22.0
download: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.22.0
- canonical: registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.0
download: registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.0
- canonical: public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.38.1
download: public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.38.1
- canonical: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.12.0-eks-1-32-1
download: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.12.0-eks-1-32-1
- canonical: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.14.0-eks-1-32-1
download: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.14.0-eks-1-32-1
- canonical: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v5.1.0-eks-1-32-1
download: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v5.1.0-eks-1-32-1
- canonical: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v4.7.0-eks-1-32-1
download: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v4.7.0-eks-1-32-1
- canonical: public.ecr.aws/ebs-csi-driver/volume-modifier-for-k8s:v0.5.0
download: public.ecr.aws/ebs-csi-driver/volume-modifier-for-k8s:v0.5.0
- canonical: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer:v1.12.0-eks-1-32-1
download: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer:v1.12.0-eks-1-32-1
- canonical: registry.k8s.io/kops/kube-apiserver-healthcheck:1.32.0-beta.1
download: registry.k8s.io/kops/kube-apiserver-healthcheck:1.32.0-beta.1
- canonical: registry.k8s.io/kops/kops-utils-cp:1.32.0-beta.1
download: registry.k8s.io/kops/kops-utils-cp:1.32.0-beta.1
- canonical: registry.k8s.io/etcd:3.4.13-0
download: registry.k8s.io/etcd:3.4.13-0
- canonical: registry.k8s.io/etcd:3.5.21-0
download: registry.k8s.io/etcd:3.5.21-0
- canonical: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20241012
download: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20241012

View File

@ -235,11 +235,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) {
default:
return nil, fmt.Errorf("unknown phase %q", c.Phase)
}
if c.GetAssets {
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
}
assetBuilder := assets.NewAssetBuilder(c.Clientset.VFSContext(), c.Cluster.Spec.Assets, c.GetAssets)
if len(c.ControlPlaneRunningVersion) > 0 && c.ControlPlaneRunningVersion != c.Cluster.Spec.KubernetesVersion {
@ -776,10 +771,14 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) {
case TargetDryRun:
var out io.Writer = os.Stdout
checkExisting := true
if c.GetAssets {
out = io.Discard
// For `kops get assets`,there is no need to run Find,
// we are just trying to discover the assets.
checkExisting = false
}
target = fi.NewCloudupDryRunTarget(assetBuilder, out)
target = fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, out)
// Avoid making changes on a dry-run
shouldPrecreateDNS = false

View File

@ -118,7 +118,8 @@ func checkNoChanges(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks
},
}
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
checkExisting := true
target := fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, os.Stderr)
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
if err != nil {
t.Fatalf("error building context: %v", err)

View File

@ -119,7 +119,8 @@ func doDryRun(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks map[st
},
}
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
checkExisting := true
target := fi.NewCloudupDryRunTarget(assetBuilder, checkExisting, os.Stderr)
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
if err != nil {
t.Fatalf("error building context: %v", err)

View File

@ -45,6 +45,10 @@ type DryRunTarget[T SubContext] struct {
// assetBuilder records all assets used
assetBuilder *assets.AssetBuilder
// defaultCheckExisting will control whether we look for existing objects in our dry-run.
// This is normally true except for special-case dry-runs, like `kops get assets`
defaultCheckExisting bool
}
type NodeupDryRunTarget = DryRunTarget[NodeupSubContext]
@ -77,23 +81,26 @@ func (a DeletionByTaskName[T]) Less(i, j int) bool {
var _ Target[CloudupSubContext] = &DryRunTarget[CloudupSubContext]{}
func newDryRunTarget[T SubContext](assetBuilder *assets.AssetBuilder, out io.Writer) *DryRunTarget[T] {
func newDryRunTarget[T SubContext](assetBuilder *assets.AssetBuilder, defaultCheckExisting bool, out io.Writer) *DryRunTarget[T] {
t := &DryRunTarget[T]{}
t.out = out
t.assetBuilder = assetBuilder
t.defaultCheckExisting = defaultCheckExisting
return t
}
func NewCloudupDryRunTarget(assetBuilder *assets.AssetBuilder, out io.Writer) *CloudupDryRunTarget {
return newDryRunTarget[CloudupSubContext](assetBuilder, out)
// NewCloudupDryRunTarget builds a dry-run target.
// checkExisting should normally be true, but can be false for special-case dry-run, such as in `kops get assets`
func NewCloudupDryRunTarget(assetBuilder *assets.AssetBuilder, checkExisting bool, out io.Writer) *CloudupDryRunTarget {
return newDryRunTarget[CloudupSubContext](assetBuilder, checkExisting, out)
}
func NewNodeupDryRunTarget(assetBuilder *assets.AssetBuilder, out io.Writer) *NodeupDryRunTarget {
return newDryRunTarget[NodeupSubContext](assetBuilder, out)
return newDryRunTarget[NodeupSubContext](assetBuilder, true, out)
}
func (t *DryRunTarget[T]) DefaultCheckExisting() bool {
return true
return t.defaultCheckExisting
}
func (t *DryRunTarget[T]) Render(a, e, changes Task[T]) error {

View File

@ -69,7 +69,8 @@ func (*testTask) Run(_ *CloudupContext) error {
func Test_DryrunTarget_PrintReport(t *testing.T) {
builder := assets.NewAssetBuilder(vfs.Context, nil, false)
var stdout bytes.Buffer
target := newDryRunTarget[CloudupSubContext](builder, &stdout)
checkExisting := true
target := newDryRunTarget[CloudupSubContext](builder, checkExisting, &stdout)
tasks := map[string]CloudupTask{}
a := &testTask{
Name: PtrTo("TestName"),