mirror of https://github.com/kubernetes/kops.git
Change NewAssetBuilder to take a kops.Cluseter
This commit is contained in:
parent
e4fafa6065
commit
b68f58d746
|
@ -1058,7 +1058,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
return err
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(clientset, cluster, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -211,7 +211,7 @@ func RunEditCluster(f *util.Factory, cmd *cobra.Command, args []string, out io.W
|
|||
return preservedFile(fmt.Errorf("error populating configuration: %v", err), file, out)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(newCluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(newCluster, "")
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(clientset, newCluster, assetBuilder)
|
||||
if err != nil {
|
||||
results = editResults{
|
||||
|
|
|
@ -168,7 +168,7 @@ func RunEditInstanceGroup(f *util.Factory, cmd *cobra.Command, args []string, ou
|
|||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(clientset, cluster, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -289,7 +289,7 @@ func (c *UpgradeClusterCmd) Run(args []string) error {
|
|||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(clientset, cluster, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -262,7 +262,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube
|
|||
if etcdContainerImage != "" {
|
||||
image = etcdContainerImage
|
||||
}
|
||||
assets := assets.NewAssetBuilder(t.Cluster.Spec.Assets, "")
|
||||
assets := assets.NewAssetBuilder(t.Cluster, "")
|
||||
remapped, err := assets.RemapImage(image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to remap container %q: %v", image, err)
|
||||
|
|
|
@ -124,7 +124,7 @@ func (b *Builder) Build(cluster *kops.Cluster, ig *kops.InstanceGroup) (*Data, e
|
|||
|
||||
{
|
||||
phase := cloudup.PhaseCluster
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, string(phase))
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, string(phase))
|
||||
|
||||
applyCmd := &cloudup.ApplyClusterCmd{
|
||||
Cluster: cluster,
|
||||
|
|
|
@ -43,11 +43,12 @@ func buildCluster() *api.Cluster {
|
|||
|
||||
func Test_Build_KCM_Builder_Lower_Version(t *testing.T) {
|
||||
versions := []string{"v1.4.0", "v1.4.7", "v1.5.0"}
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
|
||||
for _, v := range versions {
|
||||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
Context: &OptionsContext{
|
||||
|
@ -55,17 +56,13 @@ func Test_Build_KCM_Builder_Lower_Version(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
|
||||
spec.KubernetesVersion = v
|
||||
err := kcm.BuildOptions(&spec)
|
||||
|
||||
err := kcm.BuildOptions(&c.Spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions: %v", err)
|
||||
}
|
||||
|
||||
if spec.KubeControllerManager.AttachDetachReconcileSyncPeriod != nil {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should not be set for old kubernetes version %s", spec.KubernetesVersion)
|
||||
if c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod != nil {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should not be set for old kubernetes version %s", c.Spec.KubernetesVersion)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,11 +70,11 @@ func Test_Build_KCM_Builder_Lower_Version(t *testing.T) {
|
|||
|
||||
func Test_Build_KCM_Builder_High_Enough_Version(t *testing.T) {
|
||||
versions := []string{"v1.4.8", "v1.5.2", "v1.9.0", "v2.4.0"}
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
for _, v := range versions {
|
||||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
Context: &OptionsContext{
|
||||
|
@ -85,15 +82,13 @@ func Test_Build_KCM_Builder_High_Enough_Version(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
err := kcm.BuildOptions(&spec)
|
||||
|
||||
err := kcm.BuildOptions(&c.Spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions %s", err)
|
||||
}
|
||||
|
||||
if spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration != time.Minute {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should be set to 1m - %s, for k8s version %s", spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration.String(), spec.KubernetesVersion)
|
||||
if c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration != time.Minute {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should be set to 1m - %s, for k8s version %s", c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration.String(), c.Spec.KubernetesVersion)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,30 +98,27 @@ func Test_Build_KCM_Builder_Change_Duration(t *testing.T) {
|
|||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = "v1.5.2"
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
Context: &OptionsContext{
|
||||
AssetBuilder: b,
|
||||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
|
||||
spec.KubeControllerManager = &api.KubeControllerManagerConfig{
|
||||
c.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{
|
||||
AttachDetachReconcileSyncPeriod: &metav1.Duration{},
|
||||
}
|
||||
|
||||
spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration = time.Minute * 5
|
||||
|
||||
err := kcm.BuildOptions(&spec)
|
||||
c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration = time.Minute * 5
|
||||
|
||||
err := kcm.BuildOptions(&c.Spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions %s", err)
|
||||
}
|
||||
|
||||
if spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration != time.Minute*5 {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should be set to 5m - %s, for k8s version %s", spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration.String(), spec.KubernetesVersion)
|
||||
if c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration != time.Minute*5 {
|
||||
t.Fatalf("AttachDetachReconcileSyncPeriod should be set to 5m - %s, for k8s version %s", c.Spec.KubeControllerManager.AttachDetachReconcileSyncPeriod.Duration.String(), c.Spec.KubernetesVersion)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,20 +23,20 @@ import (
|
|||
"k8s.io/kops/pkg/assets"
|
||||
)
|
||||
|
||||
func buildSpec() *kops.ClusterSpec {
|
||||
spec := kops.ClusterSpec{
|
||||
KubernetesVersion: "1.6.2",
|
||||
ServiceClusterIPRange: "10.10.0.0/16",
|
||||
Kubelet: &kops.KubeletConfigSpec{},
|
||||
func buildKubeletTestCluster() *kops.Cluster {
|
||||
return &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "1.6.2",
|
||||
ServiceClusterIPRange: "10.10.0.0/16",
|
||||
Kubelet: &kops.KubeletConfigSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
return &spec
|
||||
}
|
||||
|
||||
func buildOptions(spec *kops.ClusterSpec) error {
|
||||
ab := assets.NewAssetBuilder(nil, "")
|
||||
func buildOptions(cluster *kops.Cluster) error {
|
||||
ab := assets.NewAssetBuilder(cluster, "")
|
||||
|
||||
ver, err := KubernetesVersion(spec)
|
||||
ver, err := KubernetesVersion(&cluster.Spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func buildOptions(spec *kops.ClusterSpec) error {
|
|||
},
|
||||
}
|
||||
|
||||
err = builder.BuildOptions(spec)
|
||||
err = builder.BuildOptions(&cluster.Spec)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -57,44 +57,44 @@ func buildOptions(spec *kops.ClusterSpec) error {
|
|||
}
|
||||
|
||||
func TestFeatureGates(t *testing.T) {
|
||||
spec := buildSpec()
|
||||
err := buildOptions(spec)
|
||||
cluster := buildKubeletTestCluster()
|
||||
err := buildOptions(cluster)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gates := spec.Kubelet.FeatureGates
|
||||
gates := cluster.Spec.Kubelet.FeatureGates
|
||||
if gates["ExperimentalCriticalPodAnnotation"] != "true" {
|
||||
t.Errorf("ExperimentalCriticalPodAnnotation feature gate should be enabled by default")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeatureGatesKubernetesVersion(t *testing.T) {
|
||||
spec := buildSpec()
|
||||
spec.KubernetesVersion = "1.4.0"
|
||||
err := buildOptions(spec)
|
||||
cluster := buildKubeletTestCluster()
|
||||
cluster.Spec.KubernetesVersion = "1.4.0"
|
||||
err := buildOptions(cluster)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gates := spec.Kubelet.FeatureGates
|
||||
gates := cluster.Spec.Kubelet.FeatureGates
|
||||
if _, found := gates["ExperimentalCriticalPodAnnotation"]; found {
|
||||
t.Errorf("ExperimentalCriticalPodAnnotation feature gate should not be added on Kubernetes < 1.5.2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeatureGatesOverride(t *testing.T) {
|
||||
spec := buildSpec()
|
||||
spec.Kubelet.FeatureGates = map[string]string{
|
||||
cluster := buildKubeletTestCluster()
|
||||
cluster.Spec.Kubelet.FeatureGates = map[string]string{
|
||||
"ExperimentalCriticalPodAnnotation": "false",
|
||||
}
|
||||
|
||||
err := buildOptions(spec)
|
||||
err := buildOptions(cluster)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gates := spec.Kubelet.FeatureGates
|
||||
gates := cluster.Spec.Kubelet.FeatureGates
|
||||
if gates["ExperimentalCriticalPodAnnotation"] != "false" {
|
||||
t.Errorf("ExperimentalCriticalPodAnnotation feature should be disalbled")
|
||||
}
|
||||
|
|
|
@ -24,13 +24,13 @@ import (
|
|||
"k8s.io/kops/pkg/assets"
|
||||
)
|
||||
|
||||
func buildSchedulerConfigMapCluster() *api.Cluster {
|
||||
func buildSchedulerConfigMapCluster(version string) *api.Cluster {
|
||||
usePolicyConfigMap := true
|
||||
|
||||
return &api.Cluster{
|
||||
Spec: api.ClusterSpec{
|
||||
CloudProvider: "aws",
|
||||
KubernetesVersion: "v1.4.0",
|
||||
KubernetesVersion: version,
|
||||
KubeScheduler: &api.KubeSchedulerConfig{
|
||||
UsePolicyConfigMap: &usePolicyConfigMap,
|
||||
},
|
||||
|
@ -40,14 +40,14 @@ func buildSchedulerConfigMapCluster() *api.Cluster {
|
|||
|
||||
func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
|
||||
versions := []string{"v1.6.0", "v1.6.4", "v1.7.0", "v1.7.4"}
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
|
||||
for _, v := range versions {
|
||||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", v, err)
|
||||
}
|
||||
|
@ -59,10 +59,7 @@ func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
|
||||
spec.KubernetesVersion = v
|
||||
err = ks.BuildOptions(&spec)
|
||||
err = ks.BuildOptions(&c.Spec)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions: %v", err)
|
||||
|
@ -72,14 +69,13 @@ func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
|
|||
}
|
||||
func Test_Build_Scheduler_PolicyConfigMap_Unsupported_Version(t *testing.T) {
|
||||
versions := []string{"v1.6.0", "v1.6.4"}
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
|
||||
for _, v := range versions {
|
||||
|
||||
c := buildSchedulerConfigMapCluster()
|
||||
c := buildSchedulerConfigMapCluster(v)
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", v, err)
|
||||
}
|
||||
|
@ -91,11 +87,7 @@ func Test_Build_Scheduler_PolicyConfigMap_Unsupported_Version(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
|
||||
spec.KubernetesVersion = v
|
||||
err = ks.BuildOptions(&spec)
|
||||
|
||||
err = ks.BuildOptions(&c.Spec)
|
||||
if err == nil {
|
||||
t.Fatalf("error is expected, but none are returned")
|
||||
}
|
||||
|
@ -105,14 +97,13 @@ func Test_Build_Scheduler_PolicyConfigMap_Unsupported_Version(t *testing.T) {
|
|||
|
||||
func Test_Build_Scheduler_PolicyConfigMap_Supported_Version(t *testing.T) {
|
||||
versions := []string{"v1.7.0", "v1.7.4", "v1.8.0"}
|
||||
b := assets.NewAssetBuilder(nil, "")
|
||||
|
||||
for _, v := range versions {
|
||||
|
||||
c := buildSchedulerConfigMapCluster()
|
||||
c := buildSchedulerConfigMapCluster(v)
|
||||
b := assets.NewAssetBuilder(c, "")
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", v, err)
|
||||
}
|
||||
|
@ -124,11 +115,7 @@ func Test_Build_Scheduler_PolicyConfigMap_Supported_Version(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
spec := c.Spec
|
||||
|
||||
spec.KubernetesVersion = v
|
||||
err = ks.BuildOptions(&spec)
|
||||
|
||||
err = ks.BuildOptions(&c.Spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions %s: %v", v, err)
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ func (c *ApplyClusterCmd) Run() error {
|
|||
// This is kinda a hack. Need to move phases out of fi. If we use Phase here we introduce a circular
|
||||
// go dependency.
|
||||
phase := string(c.Phase)
|
||||
assetBuilder := assets.NewAssetBuilder(c.Cluster.Spec.Assets, phase)
|
||||
assetBuilder := assets.NewAssetBuilder(c.Cluster, phase)
|
||||
err = c.upgradeSpecs(assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -65,6 +65,7 @@ go_library(
|
|||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/diff:go_default_library",
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//pkg/featureflag:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/awsup:go_default_library",
|
||||
|
|
|
@ -24,7 +24,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"k8s.io/kops/cloudmock/aws/mockec2"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
||||
|
@ -105,7 +107,12 @@ func TestElasticIPCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
func checkNoChanges(t *testing.T, cloud fi.Cloud, allTasks map[string]fi.Task) {
|
||||
assetBuilder := assets.NewAssetBuilder(nil, "")
|
||||
cluster := &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "v1.9.0",
|
||||
},
|
||||
}
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
target := fi.NewDryRunTarget(assetBuilder, os.Stderr)
|
||||
context, err := fi.NewContext(target, nil, cloud, nil, nil, nil, true, allTasks)
|
||||
if err != nil {
|
||||
|
|
|
@ -78,7 +78,7 @@ func runChannelBuilderTest(t *testing.T, key string) {
|
|||
bcb := BootstrapChannelBuilder{
|
||||
cluster: cluster,
|
||||
templates: templates,
|
||||
assetBuilder: assets.NewAssetBuilder(nil, ""),
|
||||
assetBuilder: assets.NewAssetBuilder(cluster, ""),
|
||||
}
|
||||
|
||||
context := &fi.ModelBuilderContext{
|
||||
|
|
|
@ -33,7 +33,9 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
|
|||
}()
|
||||
|
||||
cluster := &api.Cluster{}
|
||||
assetBuilder := assets.NewAssetBuilder(nil, "")
|
||||
cluster.Spec.KubernetesVersion = "v1.9.0"
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
cniAsset, cniAssetHashString, err := findCNIAssets(cluster, assetBuilder)
|
||||
|
||||
if err != nil {
|
||||
|
@ -51,9 +53,9 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
|
|||
|
||||
func Test_FindCNIAssetDefaultValue1_6(t *testing.T) {
|
||||
|
||||
cluster := &api.Cluster{Spec: api.ClusterSpec{}}
|
||||
cluster := &api.Cluster{}
|
||||
cluster.Spec.KubernetesVersion = "v1.7.0"
|
||||
assetBuilder := assets.NewAssetBuilder(nil, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
cniAsset, cniAssetHashString, err := findCNIAssets(cluster, assetBuilder)
|
||||
|
||||
if err != nil {
|
||||
|
@ -72,9 +74,9 @@ func Test_FindCNIAssetDefaultValue1_6(t *testing.T) {
|
|||
|
||||
func Test_FindCNIAssetDefaultValue1_5(t *testing.T) {
|
||||
|
||||
cluster := &api.Cluster{Spec: api.ClusterSpec{}}
|
||||
cluster := &api.Cluster{}
|
||||
cluster.Spec.KubernetesVersion = "v1.5.12"
|
||||
assetBuilder := assets.NewAssetBuilder(nil, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
cniAsset, cniAssetHashString, err := findCNIAssets(cluster, assetBuilder)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -104,7 +104,7 @@ func TestPopulateCluster_Default_NoError(t *testing.T) {
|
|||
func mockedPopulateClusterSpec(c *api.Cluster) (*api.Cluster, error) {
|
||||
vfs.Context.ResetMemfsContext(true)
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(nil, "")
|
||||
assetBuilder := assets.NewAssetBuilder(c, "")
|
||||
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building vfspath: %v", err)
|
||||
|
|
|
@ -272,7 +272,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
Tags: nodeTags,
|
||||
}
|
||||
case "dryrun":
|
||||
assetBuilder := assets.NewAssetBuilder(c.cluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(c.cluster, "")
|
||||
target = fi.NewDryRunTarget(assetBuilder, out)
|
||||
case "cloudinit":
|
||||
checkExisting = false
|
||||
|
|
|
@ -106,7 +106,7 @@ func (x *ConvertKubeupCluster) Upgrade() error {
|
|||
delete(cluster.ObjectMeta.Annotations, api.AnnotationNameManagement)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, "")
|
||||
assetBuilder := assets.NewAssetBuilder(cluster, "")
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(x.Clientset, cluster, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in New Issue