mirror of https://github.com/kubernetes/kops.git
Remove unusued kubernetesVersion from AssetBuilder
This field is no longer used, and can be removed.
This commit is contained in:
parent
6dfbd4633b
commit
4a63a118b2
|
@ -191,7 +191,7 @@ func (r *KopsConfigReconciler) buildBootstrapData(ctx context.Context) ([]byte,
|
||||||
ig.Spec.Role = kops.InstanceGroupRoleNode
|
ig.Spec.Role = kops.InstanceGroupRoleNode
|
||||||
|
|
||||||
getAssets := false
|
getAssets := false
|
||||||
assetBuilder := assets.NewAssetBuilder(vfsContext, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, getAssets)
|
assetBuilder := assets.NewAssetBuilder(vfsContext, cluster.Spec.Assets, getAssets)
|
||||||
|
|
||||||
encryptionConfigSecretHash := ""
|
encryptionConfigSecretHash := ""
|
||||||
// if fi.ValueOf(c.Cluster.Spec.EncryptionConfig) {
|
// if fi.ValueOf(c.Cluster.Spec.EncryptionConfig) {
|
||||||
|
|
|
@ -651,7 +651,7 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, false)
|
||||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, instanceGroups, cloud, assetBuilder)
|
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, instanceGroups, cloud, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -263,7 +263,7 @@ func updateCluster(ctx context.Context, clientset simple.Clientset, oldCluster,
|
||||||
return "", fmt.Errorf("error populating configuration: %v", err)
|
return "", fmt.Errorf("error populating configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), newCluster.Spec.Assets, newCluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), newCluster.Spec.Assets, false)
|
||||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, newCluster, instanceGroups, cloud, assetBuilder)
|
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, newCluster, instanceGroups, cloud, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
||||||
|
|
|
@ -295,7 +295,7 @@ func updateInstanceGroup(ctx context.Context, clientset simple.Clientset, channe
|
||||||
return "", fmt.Errorf("error populating configuration: %v", err)
|
return "", fmt.Errorf("error populating configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, false)
|
||||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, []*api.InstanceGroup{newGroup}, cloud, assetBuilder)
|
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, []*api.InstanceGroup{newGroup}, cloud, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
||||||
|
|
|
@ -293,7 +293,7 @@ func BuildNodeupModelContext(model *testutils.Model) (*NodeupModelContext, error
|
||||||
func mockedPopulateClusterSpec(ctx context.Context, c *kops.Cluster, instanceGroups []*kops.InstanceGroup, cloud fi.Cloud) (*kops.Cluster, error) {
|
func mockedPopulateClusterSpec(ctx context.Context, c *kops.Cluster, instanceGroups []*kops.InstanceGroup, cloud fi.Cloud) (*kops.Cluster, error) {
|
||||||
vfs.Context.ResetMemfsContext(true)
|
vfs.Context.ResetMemfsContext(true)
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error building vfspath: %v", err)
|
return nil, fmt.Errorf("error building vfspath: %v", err)
|
||||||
|
|
|
@ -25,13 +25,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/blang/semver/v4"
|
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
"github.com/google/go-containerregistry/pkg/crane"
|
"github.com/google/go-containerregistry/pkg/crane"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kops/pkg/apis/kops"
|
"k8s.io/kops/pkg/apis/kops"
|
||||||
"k8s.io/kops/pkg/apis/kops/util"
|
|
||||||
"k8s.io/kops/pkg/assets/assetdata"
|
"k8s.io/kops/pkg/assets/assetdata"
|
||||||
"k8s.io/kops/pkg/featureflag"
|
"k8s.io/kops/pkg/featureflag"
|
||||||
"k8s.io/kops/pkg/kubemanifest"
|
"k8s.io/kops/pkg/kubemanifest"
|
||||||
|
@ -48,9 +46,6 @@ type AssetBuilder struct {
|
||||||
AssetsLocation *kops.AssetsSpec
|
AssetsLocation *kops.AssetsSpec
|
||||||
GetAssets bool
|
GetAssets bool
|
||||||
|
|
||||||
// KubernetesVersion is the version of kubernetes we are installing
|
|
||||||
KubernetesVersion semver.Version
|
|
||||||
|
|
||||||
// KubeletSupportedVersion is the max version of kubelet that we are currently allowed to run on worker nodes.
|
// KubeletSupportedVersion is the max version of kubelet that we are currently allowed to run on worker nodes.
|
||||||
// This is used to avoid violating the kubelet supported version skew policy,
|
// This is used to avoid violating the kubelet supported version skew policy,
|
||||||
// (we are not allowed to run a newer kubelet on a worker node than the control plane)
|
// (we are not allowed to run a newer kubelet on a worker node than the control plane)
|
||||||
|
@ -119,20 +114,13 @@ type FileAsset struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAssetBuilder creates a new AssetBuilder.
|
// NewAssetBuilder creates a new AssetBuilder.
|
||||||
func NewAssetBuilder(vfsContext *vfs.VFSContext, assets *kops.AssetsSpec, kubernetesVersion string, getAssets bool) *AssetBuilder {
|
func NewAssetBuilder(vfsContext *vfs.VFSContext, assets *kops.AssetsSpec, getAssets bool) *AssetBuilder {
|
||||||
a := &AssetBuilder{
|
a := &AssetBuilder{
|
||||||
vfsContext: vfsContext,
|
vfsContext: vfsContext,
|
||||||
AssetsLocation: assets,
|
AssetsLocation: assets,
|
||||||
GetAssets: getAssets,
|
GetAssets: getAssets,
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := util.ParseKubernetesVersion(kubernetesVersion)
|
|
||||||
if err != nil {
|
|
||||||
// This should have already been validated
|
|
||||||
klog.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", kubernetesVersion, err)
|
|
||||||
}
|
|
||||||
a.KubernetesVersion = *version
|
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kops/pkg/apis/kops"
|
"k8s.io/kops/pkg/apis/kops"
|
||||||
"k8s.io/kops/pkg/apis/kops/util"
|
|
||||||
"k8s.io/kops/pkg/testutils/golden"
|
"k8s.io/kops/pkg/testutils/golden"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -78,9 +77,7 @@ func TestValidate_RemapImage_ContainerProxy_AppliesToSimplifiedKubernetesURL(t *
|
||||||
proxyURL := "proxy.example.com/"
|
proxyURL := "proxy.example.com/"
|
||||||
image := "registry.k8s.io/kube-apiserver"
|
image := "registry.k8s.io/kube-apiserver"
|
||||||
expected := "proxy.example.com/kube-apiserver"
|
expected := "proxy.example.com/kube-apiserver"
|
||||||
version, _ := util.ParseKubernetesVersion("1.10")
|
|
||||||
|
|
||||||
builder.KubernetesVersion = *version
|
|
||||||
builder.AssetsLocation.ContainerProxy = &proxyURL
|
builder.AssetsLocation.ContainerProxy = &proxyURL
|
||||||
|
|
||||||
remapped, err := builder.RemapImage(image)
|
remapped, err := builder.RemapImage(image)
|
||||||
|
@ -118,9 +115,7 @@ func TestValidate_RemapImage_ContainerProxy_AppliesToImagesWithTags(t *testing.T
|
||||||
proxyURL := "proxy.example.com/"
|
proxyURL := "proxy.example.com/"
|
||||||
image := "registry.k8s.io/kube-apiserver:1.2.3"
|
image := "registry.k8s.io/kube-apiserver:1.2.3"
|
||||||
expected := "proxy.example.com/kube-apiserver:1.2.3"
|
expected := "proxy.example.com/kube-apiserver:1.2.3"
|
||||||
version, _ := util.ParseKubernetesVersion("1.10")
|
|
||||||
|
|
||||||
builder.KubernetesVersion = *version
|
|
||||||
builder.AssetsLocation.ContainerProxy = &proxyURL
|
builder.AssetsLocation.ContainerProxy = &proxyURL
|
||||||
|
|
||||||
remapped, err := builder.RemapImage(image)
|
remapped, err := builder.RemapImage(image)
|
||||||
|
@ -139,9 +134,7 @@ func TestValidate_RemapImage_ContainerRegistry_MappingMultipleTimesConverges(t *
|
||||||
mirrorURL := "proxy.example.com"
|
mirrorURL := "proxy.example.com"
|
||||||
image := "kube-apiserver:1.2.3"
|
image := "kube-apiserver:1.2.3"
|
||||||
expected := "proxy.example.com/kube-apiserver:1.2.3"
|
expected := "proxy.example.com/kube-apiserver:1.2.3"
|
||||||
version, _ := util.ParseKubernetesVersion("1.10")
|
|
||||||
|
|
||||||
builder.KubernetesVersion = *version
|
|
||||||
builder.AssetsLocation.ContainerRegistry = &mirrorURL
|
builder.AssetsLocation.ContainerRegistry = &mirrorURL
|
||||||
|
|
||||||
remapped := image
|
remapped := image
|
||||||
|
|
|
@ -40,7 +40,7 @@ func UpdateCluster(ctx context.Context, clientset simple.Clientset, cluster *kop
|
||||||
return fmt.Errorf("error populating configuration: %v", err)
|
return fmt.Errorf("error populating configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, false)
|
||||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, instanceGroups, cloud, assetBuilder)
|
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, instanceGroups, cloud, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -78,7 +78,7 @@ func UpdateInstanceGroup(ctx context.Context, clientset simple.Clientset, cluste
|
||||||
return fmt.Errorf("error populating configuration: %v", err)
|
return fmt.Errorf("error populating configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, false)
|
||||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, allInstanceGroups, cloud, assetBuilder)
|
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, allInstanceGroups, cloud, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -55,7 +55,7 @@ func getTestSetupOS(t *testing.T, ctx context.Context) (*RollingUpdateCluster, *
|
||||||
t.Fatalf("Failed to perform assignments: %v", err)
|
t.Fatalf("Failed to perform assignments: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, inCluster.Spec.Assets, inCluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, inCluster.Spec.Assets, false)
|
||||||
basePath, _ := vfs.Context.BuildVfsPath(inCluster.Spec.ConfigStore.Base)
|
basePath, _ := vfs.Context.BuildVfsPath(inCluster.Spec.ConfigStore.Base)
|
||||||
clientset := vfsclientset.NewVFSClientset(vfs.Context, basePath)
|
clientset := vfsclientset.NewVFSClientset(vfs.Context, basePath)
|
||||||
cluster, err := cloudup.PopulateClusterSpec(ctx, clientset, inCluster, nil, mockcloud, assetBuilder)
|
cluster, err := cloudup.PopulateClusterSpec(ctx, clientset, inCluster, nil, mockcloud, assetBuilder)
|
||||||
|
|
|
@ -45,7 +45,7 @@ func Test_Build_Containerd_Supported_Version(t *testing.T) {
|
||||||
for _, v := range kubernetesVersions {
|
for _, v := range kubernetesVersions {
|
||||||
|
|
||||||
c := buildContainerdCluster(v)
|
c := buildContainerdCluster(v)
|
||||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
|
|
||||||
version, err := util.ParseKubernetesVersion(v)
|
version, err := util.ParseKubernetesVersion(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -53,7 +53,7 @@ func Test_RunEtcdManagerBuilder(t *testing.T) {
|
||||||
|
|
||||||
builder := EtcdManagerBuilder{
|
builder := EtcdManagerBuilder{
|
||||||
KopsModelContext: kopsModelContext,
|
KopsModelContext: kopsModelContext,
|
||||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := builder.Build(context); err != nil {
|
if err := builder.Build(context); err != nil {
|
||||||
|
|
|
@ -78,7 +78,7 @@ func TestImage(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, g.Cluster.Spec.Assets, g.Cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, g.Cluster.Spec.Assets, false)
|
||||||
actual, err := Image(g.Component, &g.Cluster.Spec, assetBuilder)
|
actual, err := Image(g.Component, &g.Cluster.Spec, assetBuilder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error from image %q %v: %v",
|
t.Errorf("unexpected error from image %q %v: %v",
|
||||||
|
|
|
@ -50,7 +50,7 @@ func Test_RunKubeApiserverBuilder(t *testing.T) {
|
||||||
|
|
||||||
builder := KubeApiserverBuilder{
|
builder := KubeApiserverBuilder{
|
||||||
KopsModelContext: kopsModelContext,
|
KopsModelContext: kopsModelContext,
|
||||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := builder.Build(context); err != nil {
|
if err := builder.Build(context); err != nil {
|
||||||
|
|
|
@ -47,7 +47,7 @@ func Test_Build_KCM_Builder(t *testing.T) {
|
||||||
|
|
||||||
c := buildCluster()
|
c := buildCluster()
|
||||||
c.Spec.KubernetesVersion = v
|
c.Spec.KubernetesVersion = v
|
||||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
|
|
||||||
kcm := &KubeControllerManagerOptionsBuilder{
|
kcm := &KubeControllerManagerOptionsBuilder{
|
||||||
OptionsContext: &OptionsContext{
|
OptionsContext: &OptionsContext{
|
||||||
|
@ -68,7 +68,7 @@ func Test_Build_KCM_Builder(t *testing.T) {
|
||||||
|
|
||||||
func Test_Build_KCM_Builder_Change_Duration(t *testing.T) {
|
func Test_Build_KCM_Builder_Change_Duration(t *testing.T) {
|
||||||
c := buildCluster()
|
c := buildCluster()
|
||||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
|
|
||||||
kcm := &KubeControllerManagerOptionsBuilder{
|
kcm := &KubeControllerManagerOptionsBuilder{
|
||||||
OptionsContext: &OptionsContext{
|
OptionsContext: &OptionsContext{
|
||||||
|
@ -143,7 +143,7 @@ func Test_Build_KCM_Builder_CIDR_Mask_Size(t *testing.T) {
|
||||||
for _, tc := range grid {
|
for _, tc := range grid {
|
||||||
t.Run(tc.PodCIDR+":"+tc.ClusterCIDR, func(t *testing.T) {
|
t.Run(tc.PodCIDR+":"+tc.ClusterCIDR, func(t *testing.T) {
|
||||||
c := buildCluster()
|
c := buildCluster()
|
||||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
|
|
||||||
kcm := &KubeControllerManagerOptionsBuilder{
|
kcm := &KubeControllerManagerOptionsBuilder{
|
||||||
OptionsContext: &OptionsContext{
|
OptionsContext: &OptionsContext{
|
||||||
|
|
|
@ -38,7 +38,7 @@ func buildKubeletTestCluster() *kops.Cluster {
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildOptions(cluster *kops.Cluster) error {
|
func buildOptions(cluster *kops.Cluster) error {
|
||||||
ab := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
ab := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
|
|
||||||
ver, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
ver, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -51,7 +51,7 @@ func Test_RunKubeSchedulerBuilder(t *testing.T) {
|
||||||
|
|
||||||
builder := KubeSchedulerBuilder{
|
builder := KubeSchedulerBuilder{
|
||||||
KopsModelContext: kopsModelContext,
|
KopsModelContext: kopsModelContext,
|
||||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := builder.Build(context); err != nil {
|
if err := builder.Build(context); err != nil {
|
||||||
|
|
|
@ -31,7 +31,7 @@ func Test_Build_Scheduler(t *testing.T) {
|
||||||
|
|
||||||
c := buildCluster()
|
c := buildCluster()
|
||||||
c.Spec.KubernetesVersion = v
|
c.Spec.KubernetesVersion = v
|
||||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
|
|
||||||
version, err := util.ParseKubernetesVersion(v)
|
version, err := util.ParseKubernetesVersion(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -38,7 +38,7 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
|
||||||
|
|
||||||
ig := &api.InstanceGroup{}
|
ig := &api.InstanceGroup{}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
|
|
||||||
igModel, err := kopsmodel.ForInstanceGroup(cluster, ig)
|
igModel, err := kopsmodel.ForInstanceGroup(cluster, ig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -73,7 +73,7 @@ func Test_FindCNIAssetFromDefaults122(t *testing.T) {
|
||||||
t.Fatalf("building instance group model: %v", err)
|
t.Fatalf("building instance group model: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
|
|
||||||
asset, err := FindCNIAssets(igModel, assetBuilder, architectures.ArchitectureAmd64)
|
asset, err := FindCNIAssets(igModel, assetBuilder, architectures.ArchitectureAmd64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -40,7 +40,7 @@ func Test_FindCrictlVersionHash(t *testing.T) {
|
||||||
t.Fatalf("building instance group model: %v", err)
|
t.Fatalf("building instance group model: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
|
|
||||||
crictlAsset, err := FindCrictlAsset(igModel, assetBuilder, architectures.ArchitectureAmd64)
|
crictlAsset, err := FindCrictlAsset(igModel, assetBuilder, architectures.ArchitectureAmd64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -241,7 +241,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) {
|
||||||
clusterLifecycle = fi.LifecycleIgnore
|
clusterLifecycle = fi.LifecycleIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(c.Clientset.VFSContext(), c.Cluster.Spec.Assets, c.Cluster.Spec.KubernetesVersion, c.GetAssets)
|
assetBuilder := assets.NewAssetBuilder(c.Clientset.VFSContext(), c.Cluster.Spec.Assets, c.GetAssets)
|
||||||
if len(c.ControlPlaneRunningVersion) > 0 && c.ControlPlaneRunningVersion != c.Cluster.Spec.KubernetesVersion {
|
if len(c.ControlPlaneRunningVersion) > 0 && c.ControlPlaneRunningVersion != c.Cluster.Spec.KubernetesVersion {
|
||||||
assetBuilder.KubeletSupportedVersion = c.ControlPlaneRunningVersion
|
assetBuilder.KubeletSupportedVersion = c.ControlPlaneRunningVersion
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ func checkNoChanges(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks
|
||||||
KubernetesVersion: "v1.9.0",
|
KubernetesVersion: "v1.9.0",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
||||||
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
|
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -164,7 +164,7 @@ func runChannelBuilderTest(t *testing.T, key string, addonManifests []string) {
|
||||||
bcb := bootstrapchannelbuilder.NewBootstrapChannelBuilder(
|
bcb := bootstrapchannelbuilder.NewBootstrapChannelBuilder(
|
||||||
&kopsModel,
|
&kopsModel,
|
||||||
fi.LifecycleSync,
|
fi.LifecycleSync,
|
||||||
assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false),
|
assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false),
|
||||||
templates,
|
templates,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
|
@ -118,7 +118,7 @@ func doDryRun(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks map[st
|
||||||
KubernetesVersion: "v1.23.0",
|
KubernetesVersion: "v1.23.0",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, false)
|
||||||
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
||||||
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
|
context, err := fi.NewCloudupContext(ctx, fi.DeletionProcessingModeDeleteIncludingDeferred, target, nil, cloud, nil, nil, nil, allTasks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -110,7 +110,7 @@ func TestPopulateCluster_Subnets(t *testing.T) {
|
||||||
func mockedPopulateClusterSpec(ctx context.Context, c *kopsapi.Cluster, cloud fi.Cloud) (*kopsapi.Cluster, error) {
|
func mockedPopulateClusterSpec(ctx context.Context, c *kopsapi.Cluster, cloud fi.Cloud) (*kopsapi.Cluster, error) {
|
||||||
vfs.Context.ResetMemfsContext(true)
|
vfs.Context.ResetMemfsContext(true)
|
||||||
|
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, false)
|
||||||
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error building vfspath: %v", err)
|
return nil, fmt.Errorf("error building vfspath: %v", err)
|
||||||
|
|
|
@ -67,7 +67,7 @@ func (*testTask) Run(_ *CloudupContext) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_DryrunTarget_PrintReport(t *testing.T) {
|
func Test_DryrunTarget_PrintReport(t *testing.T) {
|
||||||
builder := assets.NewAssetBuilder(vfs.Context, nil, "1.17.3", false)
|
builder := assets.NewAssetBuilder(vfs.Context, nil, false)
|
||||||
var stdout bytes.Buffer
|
var stdout bytes.Buffer
|
||||||
target := newDryRunTarget[CloudupSubContext](builder, &stdout)
|
target := newDryRunTarget[CloudupSubContext](builder, &stdout)
|
||||||
tasks := map[string]CloudupTask{}
|
tasks := map[string]CloudupTask{}
|
||||||
|
|
|
@ -348,7 +348,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
Cloud: cloud,
|
Cloud: cloud,
|
||||||
}
|
}
|
||||||
case "dryrun":
|
case "dryrun":
|
||||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, nil, nodeupConfig.KubernetesVersion, false)
|
assetBuilder := assets.NewAssetBuilder(vfs.Context, nil, false)
|
||||||
target = fi.NewNodeupDryRunTarget(assetBuilder, out)
|
target = fi.NewNodeupDryRunTarget(assetBuilder, out)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported target type %q", c.Target)
|
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||||
|
|
Loading…
Reference in New Issue