cluster-api-provider-rke2/test/e2e/e2e_suite_test.go

329 lines
14 KiB
Go

//go:build e2e
// +build e2e
/*
Copyright 2023 SUSE.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
bootstrapv1 "github.com/rancher/cluster-api-provider-rke2/bootstrap/api/v1beta1"
controlplanev1alpha1 "github.com/rancher/cluster-api-provider-rke2/controlplane/api/v1alpha1"
controlplanev1 "github.com/rancher/cluster-api-provider-rke2/controlplane/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
dockerinfrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
)
// Test suite flags
var (
// configPath is the path to the e2e config file.
configPath string
// useExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply).
useExistingCluster bool
// artifactFolder is the folder to store e2e test artifacts.
artifactFolder string
// skipCleanup prevents cleanup of test resources e.g. for debug purposes.
skipCleanup bool
)
// Test suite global vars
var (
ctx = ctrl.SetupSignalHandler()
// e2eConfig to be used for this test, read from configPath.
e2eConfig *clusterctl.E2EConfig
// clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository
// with the providers specified in the configPath.
clusterctlConfigPath string
// bootstrapClusterProvider manages provisioning of the the bootstrap cluster to be used for the e2e tests.
// Please note that provisioning will be skipped if e2e.use-existing-cluster is provided.
bootstrapClusterProvider bootstrap.ClusterProvider
// bootstrapClusterProxy allows to interact with the bootstrap cluster to be used for the e2e tests.
bootstrapClusterProxy framework.ClusterProxy
// kubetestConfigFilePath is the path to the kubetest configuration file
kubetestConfigFilePath string
// kubetestRepoListPath
kubetestRepoListPath string
// useCIArtifacts specifies whether or not to use the latest build from the main branch of the Kubernetes repository
useCIArtifacts bool
// usePRArtifacts specifies whether or not to use the build from a PR of the Kubernetes repository
usePRArtifacts bool
)
func init() {
flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file")
flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored")
flag.BoolVar(&useCIArtifacts, "kubetest.use-ci-artifacts", false, "use the latest build from the main branch of the Kubernetes repository. Set KUBERNETES_VERSION environment variable to latest-1.xx to use the build from 1.xx release branch.")
flag.BoolVar(&usePRArtifacts, "kubetest.use-pr-artifacts", false, "use the build from a PR of the Kubernetes repository")
flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped")
flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)")
flag.StringVar(&kubetestConfigFilePath, "kubetest.config-file", "", "path to the kubetest configuration file")
flag.StringVar(&kubetestRepoListPath, "kubetest.repo-list-path", "", "path to the kubetest repo-list path")
}
func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)
ctrl.SetLogger(klog.Background())
RunSpecs(t, "caprke2-e2e")
}
// Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads).
// The local clusterctl repository & the bootstrap cluster are created once and shared across all the tests.
var _ = SynchronizedBeforeSuite(func() []byte {
// Before all ParallelNodes.
Expect(configPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.")
Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder)
By("Initializing a runtime.Scheme with all the GVK relevant for this test")
scheme := initScheme()
By(fmt.Sprintf("Loading the e2e test configuration from %q", configPath))
e2eConfig = loadE2EConfig(configPath)
By(fmt.Sprintf("Creating a clusterctl local repository into %q", artifactFolder))
clusterctlConfigPath = createClusterctlLocalRepository(e2eConfig, filepath.Join(artifactFolder, "repository"))
By("Setting up the bootstrap cluster")
bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, scheme, useExistingCluster)
return []byte(
strings.Join([]string{
artifactFolder,
configPath,
clusterctlConfigPath,
bootstrapClusterProxy.GetKubeconfigPath(),
}, ","),
)
}, func(data []byte) {
// Before each ParallelNode.
parts := strings.Split(string(data), ",")
Expect(parts).To(HaveLen(4))
artifactFolder = parts[0]
configPath = parts[1]
clusterctlConfigPath = parts[2]
kubeconfigPath := parts[3]
e2eConfig = loadE2EConfig(configPath)
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(framework.DockerLogCollector{}))
})
// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).
// The bootstrap cluster is shared across all the tests, so it should be deleted only after all ParallelNodes completes.
// The local clusterctl repository is preserved like everything else created into the artifact folder.
var _ = SynchronizedAfterSuite(func() {
// After each ParallelNode.
}, func() {
// After all ParallelNodes.
By("Dumping logs from the bootstrap cluster")
dumpBootstrapClusterLogs(bootstrapClusterProxy)
By("Tearing down the management cluster")
if !skipCleanup {
tearDown(bootstrapClusterProvider, bootstrapClusterProxy)
}
})
func initScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
framework.TryAddDefaultSchemes(scheme)
Expect(controlplanev1.AddToScheme(scheme)).To(Succeed())
Expect(controlplanev1alpha1.AddToScheme(scheme)).To(Succeed())
Expect(bootstrapv1.AddToScheme(scheme)).To(Succeed())
Expect(clusterv1.AddToScheme(scheme)).To(Succeed())
Expect(clusterv1exp.AddToScheme(scheme)).To(Succeed())
Expect(dockerinfrav1.AddToScheme(scheme)).To(Succeed())
return scheme
}
func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
// TODO: This is commented out as it assumes kubeadm and errors if its not there
// Remove localLoadE2EConfig and use the line below when this issue is resolved:
// https://github.com/kubernetes-sigs/cluster-api/issues/3983
// config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
config := localLoadE2EConfig(configPath)
Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath)
return config
}
func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFolder string) string {
createRepositoryInput := clusterctl.CreateRepositoryInput{
E2EConfig: config,
RepositoryFolder: repositoryFolder,
}
// NOTE: if wanted to test externally installed CNI we could add it here
clusterctlConfig := clusterctl.CreateRepository(ctx, createRepositoryInput)
Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder)
return clusterctlConfig
}
func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, useExistingCluster bool) (bootstrap.ClusterProvider, framework.ClusterProxy) {
var clusterProvider bootstrap.ClusterProvider
kubeconfigPath := ""
if !useExistingCluster {
By("Creating the bootstrap cluster")
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
Name: config.ManagementClusterName,
KubernetesVersion: config.MustGetVariable(KubernetesVersionManagement),
RequiresDockerSock: config.HasDockerProvider(),
Images: config.Images,
IPFamily: config.MustGetVariable(IPFamily),
LogFolder: filepath.Join(artifactFolder, "kind"),
ExtraPortMappings: []v1alpha4.PortMapping{
{
// This is used by postgresql for the external datastore tests.
ContainerPort: 30000,
HostPort: 30000,
Protocol: v1alpha4.PortMappingProtocolTCP,
},
},
})
Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster")
localImagesPath := e2eConfig.MustGetVariable(LocalImages)
Byf("Storing Images in %s", localImagesPath)
Expect(StoreImages(ctx, StoreImagesInput{
Directory: localImagesPath,
Images: config.Images,
})).Should(Succeed())
kubeconfigPath = clusterProvider.GetKubeconfigPath()
Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the bootstrap cluster")
} else {
By("Using an existing bootstrap cluster")
}
clusterProxy := framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme)
Expect(clusterProxy).ToNot(BeNil(), "Failed to get a bootstrap cluster proxy")
return clusterProvider, clusterProxy
}
// initBootstrapCluster initializes a bootstrap cluster with the latest minor version.
func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
InitManagementCluster(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
IPAMProviders: config.IPAMProviders(),
RuntimeExtensionProviders: config.RuntimeExtensionProviders(),
BootstrapProviders: []string{"rke2-bootstrap"},
ControlPlaneProviders: []string{"rke2-control-plane"},
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
DisableMetricsCollection: true,
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
}
// initUpgradableBootstrapCluster initializes a bootstrap cluster with the latest minor version and used to perform an upgrade to the next version.
// Make sure to update the version in the providers list to the latest minor version.
func initUpgradableBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
InitManagementCluster(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
IPAMProviders: config.IPAMProviders(),
RuntimeExtensionProviders: config.RuntimeExtensionProviders(),
BootstrapProviders: []string{"rke2-bootstrap:v0.20.0"},
ControlPlaneProviders: []string{"rke2-control-plane:v0.20.0"},
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
DisableMetricsCollection: true,
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
}
func dumpBootstrapClusterLogs(bootstrapClusterProxy framework.ClusterProxy) {
if bootstrapClusterProxy == nil {
return
}
clusterLogCollector := bootstrapClusterProxy.GetLogCollector()
if clusterLogCollector == nil {
return
}
nodes, err := bootstrapClusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
fmt.Printf("Failed to get nodes for the bootstrap cluster: %v\n", err)
return
}
for i := range nodes.Items {
nodeName := nodes.Items[i].GetName()
err = clusterLogCollector.CollectMachineLog(
ctx,
bootstrapClusterProxy.GetClient(),
// The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
// we create a fake machine that wraps the node.
// NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
// This might not work if you are using an existing bootstrap cluster provided by other means.
&clusterv1.Machine{
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
},
filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "machines", nodeName),
)
if err != nil {
fmt.Printf("Failed to get logs for the bootstrap cluster node %s: %v\n", nodeName, err)
}
}
}
func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {
if bootstrapClusterProxy != nil {
bootstrapClusterProxy.Dispose(ctx)
}
if bootstrapClusterProvider != nil {
bootstrapClusterProvider.Dispose(ctx)
}
}