mirror of https://github.com/kubernetes/kops.git
Merge pull request #15663 from johngmyers/vfscontext
More VFSContext refactoring
This commit is contained in:
commit
e18eaae082
|
@ -656,7 +656,7 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
|
|||
return err
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, cloud, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -673,7 +673,7 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
|
|||
}
|
||||
|
||||
for _, p := range c.AddonPaths {
|
||||
addon, err := clusteraddons.LoadClusterAddon(p)
|
||||
addon, err := clusteraddons.LoadClusterAddon(clientset.VFSContext(), p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading cluster addon %s: %v", p, err)
|
||||
}
|
||||
|
|
|
@ -260,7 +260,7 @@ func updateCluster(ctx context.Context, clientset simple.Clientset, oldCluster,
|
|||
return "", fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(newCluster.Spec.Assets, newCluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), newCluster.Spec.Assets, newCluster.Spec.KubernetesVersion, false)
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, newCluster, cloud, assetBuilder)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
||||
|
|
|
@ -295,7 +295,7 @@ func updateInstanceGroup(ctx context.Context, clientset simple.Clientset, channe
|
|||
return "", fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, cloud, assetBuilder)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error populating cluster spec: %s", err), nil
|
||||
|
|
|
@ -141,7 +141,7 @@ func RunGetAssets(ctx context.Context, f *util.Factory, out io.Writer, options *
|
|||
}
|
||||
|
||||
if options.Copy {
|
||||
err := assets.Copy(updateClusterResults.ImageAssets, updateClusterResults.FileAssets, updateClusterResults.Cluster)
|
||||
err := assets.Copy(updateClusterResults.ImageAssets, updateClusterResults.FileAssets, f.VFSContext(), updateClusterResults.Cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -292,7 +292,7 @@ func BuildNodeupModelContext(model *testutils.Model) (*NodeupModelContext, error
|
|||
func mockedPopulateClusterSpec(ctx context.Context, c *kops.Cluster, cloud fi.Cloud) (*kops.Cluster, error) {
|
||||
vfs.Context.ResetMemfsContext(true)
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building vfspath: %v", err)
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
|
||||
// AssetBuilder discovers and remaps assets.
|
||||
type AssetBuilder struct {
|
||||
vfsContext *vfs.VFSContext
|
||||
ImageAssets []*ImageAsset
|
||||
FileAssets []*FileAsset
|
||||
AssetsLocation *kops.AssetsSpec
|
||||
|
@ -101,8 +102,9 @@ type FileAsset struct {
|
|||
}
|
||||
|
||||
// NewAssetBuilder creates a new AssetBuilder.
|
||||
func NewAssetBuilder(assets *kops.AssetsSpec, kubernetesVersion string, getAssets bool) *AssetBuilder {
|
||||
func NewAssetBuilder(vfsContext *vfs.VFSContext, assets *kops.AssetsSpec, kubernetesVersion string, getAssets bool) *AssetBuilder {
|
||||
a := &AssetBuilder{
|
||||
vfsContext: vfsContext,
|
||||
AssetsLocation: assets,
|
||||
GetAssets: getAssets,
|
||||
}
|
||||
|
@ -336,7 +338,7 @@ func (a *AssetBuilder) findHash(file *FileAsset) (*hashing.Hash, error) {
|
|||
for _, mirror := range mirrors.FindUrlMirrors(u.String()) {
|
||||
hashURL := mirror + ext
|
||||
klog.V(3).Infof("Trying to read hash fie: %q", hashURL)
|
||||
b, err := vfs.Context.ReadFile(hashURL, vfs.WithBackoff(backoff))
|
||||
b, err := a.vfsContext.ReadFile(hashURL, vfs.WithBackoff(backoff))
|
||||
if err != nil {
|
||||
// Try to log without being too alarming - issue #7550
|
||||
klog.V(2).Infof("Unable to read hash file %q: %v", hashURL, err)
|
||||
|
|
|
@ -22,13 +22,14 @@ import (
|
|||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
type assetTask interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func Copy(imageAssets []*ImageAsset, fileAssets []*FileAsset, cluster *kops.Cluster) error {
|
||||
func Copy(imageAssets []*ImageAsset, fileAssets []*FileAsset, vfsContext *vfs.VFSContext, cluster *kops.Cluster) error {
|
||||
tasks := map[string]assetTask{}
|
||||
|
||||
for _, imageAsset := range imageAssets {
|
||||
|
@ -56,6 +57,7 @@ func Copy(imageAssets []*ImageAsset, fileAssets []*FileAsset, cluster *kops.Clus
|
|||
TargetFile: fileAsset.DownloadURL.String(),
|
||||
SourceFile: fileAsset.CanonicalURL.String(),
|
||||
SHA: fileAsset.SHAValue,
|
||||
VFSContext: vfsContext,
|
||||
Cluster: cluster,
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ type CopyFile struct {
|
|||
SourceFile string
|
||||
TargetFile string
|
||||
SHA string
|
||||
VFSContext *vfs.VFSContext
|
||||
Cluster *kops.Cluster
|
||||
}
|
||||
|
||||
|
@ -66,7 +67,7 @@ func (e *CopyFile) Run() error {
|
|||
|
||||
targetSHAFile := e.TargetFile + shaExtension
|
||||
|
||||
targetSHABytes, err := vfs.Context.ReadFile(targetSHAFile)
|
||||
targetSHABytes, err := e.VFSContext.ReadFile(targetSHAFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
klog.V(4).Infof("unable to download: %q, assuming target file is not present, and if not present may not be an error: %v",
|
||||
|
@ -91,7 +92,7 @@ func (e *CopyFile) Run() error {
|
|||
|
||||
klog.V(2).Infof("copying bits from %q to %q", source, target)
|
||||
|
||||
if err := transferFile(ctx, e.Cluster, source, target, sourceSha); err != nil {
|
||||
if err := transferFile(ctx, e.VFSContext, e.Cluster, source, target, sourceSha); err != nil {
|
||||
return fmt.Errorf("unable to transfer %q to %q: %v", source, target, err)
|
||||
}
|
||||
|
||||
|
@ -100,11 +101,11 @@ func (e *CopyFile) Run() error {
|
|||
|
||||
// transferFile downloads a file from the source location, validates the file matches the SHA,
|
||||
// and uploads the file to the target location.
|
||||
func transferFile(ctx context.Context, cluster *kops.Cluster, source string, target string, sha string) error {
|
||||
func transferFile(ctx context.Context, vfsContext *vfs.VFSContext, cluster *kops.Cluster, source string, target string, sha string) error {
|
||||
// TODO drop file to disk, as vfs reads file into memory. We load kubelet into memory for instance.
|
||||
// TODO in s3 can we do a copy file ... would need to test
|
||||
|
||||
data, err := vfs.Context.ReadFile(source)
|
||||
data, err := vfsContext.ReadFile(source)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("file not found %q: %v", source, err)
|
||||
|
@ -118,7 +119,7 @@ func transferFile(ctx context.Context, cluster *kops.Cluster, source string, tar
|
|||
return err
|
||||
}
|
||||
|
||||
uploadVFS, err := vfs.Context.BuildVfsPath(objectStore)
|
||||
uploadVFS, err := vfsContext.BuildVfsPath(objectStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building path %q: %v", objectStore, err)
|
||||
}
|
||||
|
@ -129,7 +130,7 @@ func transferFile(ctx context.Context, cluster *kops.Cluster, source string, tar
|
|||
}
|
||||
|
||||
shaTarget := objectStore + shaExtension
|
||||
shaVFS, err := vfs.Context.BuildVfsPath(shaTarget)
|
||||
shaVFS, err := vfsContext.BuildVfsPath(shaTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building path %q: %v", shaTarget, err)
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ type ClusterAddon struct {
|
|||
}
|
||||
|
||||
// LoadClusterAddon loads a set of objects from the specified VFS location
|
||||
func LoadClusterAddon(location string) (*ClusterAddon, error) {
|
||||
func LoadClusterAddon(vfsContext *vfs.VFSContext, location string) (*ClusterAddon, error) {
|
||||
u, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid addon location: %q", location)
|
||||
|
@ -41,7 +41,7 @@ func LoadClusterAddon(location string) (*ClusterAddon, error) {
|
|||
|
||||
resolved := u.String()
|
||||
klog.V(2).Infof("Loading addon from %q", resolved)
|
||||
addonBytes, err := vfs.Context.ReadFile(resolved)
|
||||
addonBytes, err := vfsContext.ReadFile(resolved)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading addon %q: %v", resolved, err)
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func UpdateCluster(ctx context.Context, clientset simple.Clientset, cluster *kop
|
|||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, cloud, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -78,7 +78,7 @@ func UpdateInstanceGroup(ctx context.Context, clientset simple.Clientset, cluste
|
|||
return fmt.Errorf("error populating configuration: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
fullCluster, err := cloudup.PopulateClusterSpec(ctx, clientset, cluster, cloud, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -58,7 +58,7 @@ func getTestSetupOS(t *testing.T, ctx context.Context) (*RollingUpdateCluster, *
|
|||
t.Fatalf("Failed to perform assignments: %v", err)
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(inCluster.Spec.Assets, inCluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, inCluster.Spec.Assets, inCluster.Spec.KubernetesVersion, false)
|
||||
basePath, _ := vfs.Context.BuildVfsPath(inCluster.Spec.ConfigBase)
|
||||
clientset := vfsclientset.NewVFSClientset(vfs.Context, basePath)
|
||||
cluster, err := cloudup.PopulateClusterSpec(ctx, clientset, inCluster, mockcloud, assetBuilder)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
kopsapi "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func buildContainerdCluster(version string) *kopsapi.Cluster {
|
||||
|
@ -45,7 +46,7 @@ func Test_Build_Containerd_Supported_Version(t *testing.T) {
|
|||
|
||||
c := buildContainerdCluster(v)
|
||||
c.Spec.ContainerRuntime = "containerd"
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
if err != nil {
|
||||
|
@ -80,7 +81,7 @@ func Test_Build_Containerd_Unneeded_Runtime(t *testing.T) {
|
|||
c.Spec.Docker = &kopsapi.DockerConfig{
|
||||
Version: &v,
|
||||
}
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
ob := &ContainerdOptionsBuilder{
|
||||
&OptionsContext{
|
||||
|
@ -109,7 +110,7 @@ func Test_Build_Containerd_Needed_Runtime(t *testing.T) {
|
|||
c.Spec.Docker = &kopsapi.DockerConfig{
|
||||
Version: &v,
|
||||
}
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
ob := &ContainerdOptionsBuilder{
|
||||
&OptionsContext{
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kops/pkg/model/iam"
|
||||
"k8s.io/kops/pkg/testutils"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func Test_RunEtcdManagerBuilder(t *testing.T) {
|
||||
|
@ -52,7 +53,7 @@ func Test_RunEtcdManagerBuilder(t *testing.T) {
|
|||
|
||||
builder := EtcdManagerBuilder{
|
||||
KopsModelContext: kopsModelContext,
|
||||
AssetBuilder: assets.NewAssetBuilder(kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
}
|
||||
|
||||
if err := builder.Build(context); err != nil {
|
||||
|
|
|
@ -78,7 +78,7 @@ func TestImage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(g.Cluster.Spec.Assets, g.Cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, g.Cluster.Spec.Assets, g.Cluster.Spec.KubernetesVersion, false)
|
||||
actual, err := Image(g.Component, &g.Cluster.Spec, assetBuilder)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error from image %q %v: %v",
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kops/pkg/model/iam"
|
||||
"k8s.io/kops/pkg/testutils"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func Test_RunKubeApiserverBuilder(t *testing.T) {
|
||||
|
@ -49,7 +50,7 @@ func Test_RunKubeApiserverBuilder(t *testing.T) {
|
|||
|
||||
builder := KubeApiserverBuilder{
|
||||
KopsModelContext: kopsModelContext,
|
||||
AssetBuilder: assets.NewAssetBuilder(kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
}
|
||||
|
||||
if err := builder.Build(context); err != nil {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func buildCluster() *api.Cluster {
|
||||
|
@ -46,7 +47,7 @@ func Test_Build_KCM_Builder(t *testing.T) {
|
|||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
OptionsContext: &OptionsContext{
|
||||
|
@ -67,7 +68,7 @@ func Test_Build_KCM_Builder(t *testing.T) {
|
|||
|
||||
func Test_Build_KCM_Builder_Change_Duration(t *testing.T) {
|
||||
c := buildCluster()
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
OptionsContext: &OptionsContext{
|
||||
|
@ -142,7 +143,7 @@ func Test_Build_KCM_Builder_CIDR_Mask_Size(t *testing.T) {
|
|||
for _, tc := range grid {
|
||||
t.Run(tc.PodCIDR+":"+tc.ClusterCIDR, func(t *testing.T) {
|
||||
c := buildCluster()
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
kcm := &KubeControllerManagerOptionsBuilder{
|
||||
OptionsContext: &OptionsContext{
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func buildKubeletTestCluster() *kops.Cluster {
|
||||
|
@ -37,7 +38,7 @@ func buildKubeletTestCluster() *kops.Cluster {
|
|||
}
|
||||
|
||||
func buildOptions(cluster *kops.Cluster) error {
|
||||
ab := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
ab := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
|
||||
ver, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kops/pkg/model/iam"
|
||||
"k8s.io/kops/pkg/testutils"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func Test_RunKubeSchedulerBuilder(t *testing.T) {
|
||||
|
@ -50,7 +51,7 @@ func Test_RunKubeSchedulerBuilder(t *testing.T) {
|
|||
|
||||
builder := KubeSchedulerBuilder{
|
||||
KopsModelContext: kopsModelContext,
|
||||
AssetBuilder: assets.NewAssetBuilder(kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
AssetBuilder: assets.NewAssetBuilder(vfs.Context, kopsModelContext.Cluster.Spec.Assets, kopsModelContext.Cluster.Spec.KubernetesVersion, false),
|
||||
}
|
||||
|
||||
if err := builder.Build(context); err != nil {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func buildSchedulerConfigMapCluster(version string) *api.Cluster {
|
||||
|
@ -47,7 +48,7 @@ func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
|
|||
|
||||
c := buildCluster()
|
||||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
if err != nil {
|
||||
|
@ -75,7 +76,7 @@ func Test_Build_Scheduler_PolicyConfigMap_Supported_Version(t *testing.T) {
|
|||
for _, v := range versions {
|
||||
|
||||
c := buildSchedulerConfigMapCluster(v)
|
||||
b := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
if err != nil {
|
||||
|
|
|
@ -37,7 +37,8 @@ type Package struct {
|
|||
}
|
||||
|
||||
type Builder struct {
|
||||
Cluster *kops.Cluster
|
||||
VFSContext *vfs.VFSContext
|
||||
Cluster *kops.Cluster
|
||||
}
|
||||
|
||||
func (b *Builder) Build(objects kubemanifest.ObjectList) ([]*Package, kubemanifest.ObjectList, error) {
|
||||
|
@ -94,7 +95,7 @@ func (b *Builder) loadClusterPackage(u *unstructured.Unstructured) (*Package, er
|
|||
|
||||
locationURL := channelURL.ResolveReference(&url.URL{Path: location}).String()
|
||||
|
||||
manifestBytes, err := vfs.Context.ReadFile(locationURL)
|
||||
manifestBytes, err := b.VFSContext.ReadFile(locationURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading operator manifest %q: %v", locationURL, err)
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
|
|||
clusterLifecycle = fi.LifecycleIgnore
|
||||
}
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(c.Cluster.Spec.Assets, c.Cluster.Spec.KubernetesVersion, c.GetAssets)
|
||||
assetBuilder := assets.NewAssetBuilder(c.Clientset.VFSContext(), c.Cluster.Spec.Assets, c.Cluster.Spec.KubernetesVersion, c.GetAssets)
|
||||
err = c.upgradeSpecs(ctx, assetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
|
||||
"k8s.io/kops/cloudmock/aws/mockec2"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
|
@ -127,7 +128,7 @@ func checkNoChanges(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks
|
|||
KubernetesVersion: "v1.9.0",
|
||||
},
|
||||
}
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
||||
context, err := fi.NewCloudupContext(ctx, target, nil, cloud, nil, nil, nil, allTasks)
|
||||
if err != nil {
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
channelsapi "k8s.io/kops/channels/pkg/api"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
|
@ -47,6 +46,7 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/fitasks"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
// BootstrapChannelBuilder is responsible for handling the addons in channels
|
||||
|
@ -162,7 +162,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.CloudupModelBuilderContext) error
|
|||
|
||||
if featureflag.UseAddonOperators.Enabled() {
|
||||
ob := &wellknownoperators.Builder{
|
||||
Cluster: b.Cluster,
|
||||
VFSContext: vfs.Context,
|
||||
Cluster: b.Cluster,
|
||||
}
|
||||
|
||||
addonPackages, clusterAddons, err := ob.Build(b.ClusterAddons)
|
||||
|
|
|
@ -162,7 +162,7 @@ func runChannelBuilderTest(t *testing.T, key string, addonManifests []string) {
|
|||
bcb := bootstrapchannelbuilder.NewBootstrapChannelBuilder(
|
||||
&kopsModel,
|
||||
fi.LifecycleSync,
|
||||
assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false),
|
||||
assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false),
|
||||
templates,
|
||||
nil,
|
||||
)
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func TestServiceAccount(t *testing.T) {
|
||||
|
@ -117,7 +118,7 @@ func doDryRun(t *testing.T, ctx context.Context, cloud fi.Cloud, allTasks map[st
|
|||
KubernetesVersion: "v1.23.0",
|
||||
},
|
||||
}
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
target := fi.NewCloudupDryRunTarget(assetBuilder, os.Stderr)
|
||||
context, err := fi.NewCloudupContext(ctx, target, nil, cloud, nil, nil, nil, allTasks)
|
||||
if err != nil {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
api "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
|
||||
|
@ -34,7 +35,7 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
|
|||
cluster := &api.Cluster{}
|
||||
cluster.Spec.KubernetesVersion = "v1.18.0"
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
cniAsset, cniAssetHash, err := findCNIAssets(cluster, assetBuilder, architectures.ArchitectureAmd64)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to parse CNI version %s", err)
|
||||
|
@ -56,7 +57,7 @@ func Test_FindCNIAssetFromDefaults122(t *testing.T) {
|
|||
cluster := &api.Cluster{}
|
||||
cluster.Spec.KubernetesVersion = "v1.22.0"
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
cniAsset, cniAssetHash, err := findCNIAssets(cluster, assetBuilder, architectures.ArchitectureAmd64)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to parse CNI version %s", err)
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestPopulateCluster_Subnets(t *testing.T) {
|
|||
func mockedPopulateClusterSpec(ctx context.Context, c *kopsapi.Cluster, cloud fi.Cloud) (*kopsapi.Cluster, error) {
|
||||
vfs.Context.ResetMemfsContext(true)
|
||||
|
||||
assetBuilder := assets.NewAssetBuilder(c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building vfspath: %v", err)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
func Test_tryResourceAsString(t *testing.T) {
|
||||
|
@ -66,7 +67,7 @@ func (*testTask) Run(_ *CloudupContext) error {
|
|||
}
|
||||
|
||||
func Test_DryrunTarget_PrintReport(t *testing.T) {
|
||||
builder := assets.NewAssetBuilder(nil, "1.17.3", false)
|
||||
builder := assets.NewAssetBuilder(vfs.Context, nil, "1.17.3", false)
|
||||
var stdout bytes.Buffer
|
||||
target := newDryRunTarget[CloudupSubContext](builder, &stdout)
|
||||
tasks := map[string]CloudupTask{}
|
||||
|
|
|
@ -376,7 +376,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
Cloud: cloud,
|
||||
}
|
||||
case "dryrun":
|
||||
assetBuilder := assets.NewAssetBuilder(c.cluster.Spec.Assets, c.cluster.Spec.KubernetesVersion, false)
|
||||
assetBuilder := assets.NewAssetBuilder(vfs.Context, c.cluster.Spec.Assets, c.cluster.Spec.KubernetesVersion, false)
|
||||
target = fi.NewNodeupDryRunTarget(assetBuilder, out)
|
||||
default:
|
||||
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||
|
|
Loading…
Reference in New Issue