add operator e2e suites

Signed-off-by: zhzhuang-zju <m17799853869@163.com>
This commit is contained in:
zhzhuang-zju 2025-02-26 10:35:15 +08:00
parent 00f936b565
commit 4e592c1bee
31 changed files with 758 additions and 73 deletions

View File

@ -161,3 +161,59 @@ jobs:
with:
name: karmada_kind_log_${{ matrix.k8s }}
path: /tmp/karmada/
e2e-operator:
name: operator e2e test
needs: build
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
steps:
# Free up disk space on Ubuntu
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed, if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: false
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
fetch-depth: 0
- name: install Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: setup operator e2e test environment
run: |
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
hack/operator-e2e-environment.sh
- name: run e2e
run: |
export ARTIFACTS_PATH=${{ github.workspace }}/karmada-operator-e2e-logs/${{ matrix.k8s }}/
hack/run-e2e-operator.sh
- name: upload logs
if: always()
uses: actions/upload-artifact@v4
with:
name: karmada_operator_e2e_log_${{ matrix.k8s }}
path: ${{ github.workspace }}/karmada-operator-e2e-logs/${{ matrix.k8s }}/
- name: upload kind logs
if: always()
uses: actions/upload-artifact@v4
with:
name: karmada_operator_kind_log_${{ matrix.k8s }}
path: /tmp/karmada/

View File

@ -0,0 +1,82 @@
#!/usr/bin/env bash
# Copyright 2025 The Karmada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# This script is used in workflow to set up a local karmada-operator e2e testing environment.
# It deploys karmada-operator and related resources to the host cluster.
# This script depends on utils in: ${REPO_ROOT}/hack/util.sh.
function usage() {
echo "Usage:"
echo " hack/operator-e2e-environment.sh [-h]"
echo " h: print help information"
}
function getCrdsDir() {
local path=$1
local url=$2
local key=$(echo "$url" | xargs) # Trim whitespace using xargs
local hash=$(echo -n "$key" | sha256sum | awk '{print $1}') # Calculate SHA256 hash
local hashedKey=${hash:0:64} # Take the first 64 characters of the hash
echo "${path}/cache/${hashedKey}"
}
while getopts 'h' OPT; do
case $OPT in
h)
usage
exit 0
;;
?)
usage
exit 1
;;
esac
done
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${REPO_ROOT}"/hack/util.sh
KARMADA_SYSTEM_NAMESPACE="karmada-system"
# variable define
export KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"}
export MAIN_KUBECONFIG=${MAIN_KUBECONFIG:-"${KUBECONFIG_PATH}/karmada.config"}
export HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"}
# step1. set up a base development environment
"${REPO_ROOT}"/hack/setup-dev-base.sh
export KUBECONFIG="${MAIN_KUBECONFIG}"
# step2. deploy karmada-operator
"${REPO_ROOT}"/hack/deploy-karmada-operator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}"
# step3. install karmada instance by karmada-operator
# step3.1 prepare the local crds
echo "Prepare the local crds"
cd ${REPO_ROOT}/charts/karmada/
cp -r _crds crds
tar -zcvf ../../crds.tar.gz crds
cd -
# step3.2 copy the local crds.tar.gz file to the specified path of the karmada-operator, so that the karmada-operator will skip the step of downloading CRDs.
CRDTARBALL_URL="http://local"
DATA_DIR="/var/lib/karmada"
CRD_CACHE_DIR=$(getCrdsDir "${DATA_DIR}" "${CRDTARBALL_URL}")
OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l karmada-app=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers)
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" exec -i ${OPERATOR_POD_NAME} -n ${KARMADA_SYSTEM_NAMESPACE} -- mkdir -p ${CRD_CACHE_DIR}
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" cp ${REPO_ROOT}/crds.tar.gz ${KARMADA_SYSTEM_NAMESPACE}/${OPERATOR_POD_NAME}:${CRD_CACHE_DIR}

63
hack/run-e2e-operator.sh Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Copyright 2025 The Karmada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# This script runs e2e test against on a kubernetes cluster which deployed karmada operator.
# You should prepare your environment in advance, for example, use hack/operator-e2e-environment.sh to set up the environment.
#
# Usage: hack/run-e2e-operator.sh
# Example 1: hack/run-e2e-operator.sh (run operator e2e with default config)
KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"}
HOST_KUBECONFIG=${HOST_KUBECONFIG:-"$KUBECONFIG_PATH/karmada.config"}
# RUNNING_ON_KIND indicates if current testing against on a kind cluster.
# Defaults to true.
# For kind cluster, the kind related logs will be collected after the testing.
RUNNING_ON_KIND=${RUNNING_ON_KIND:-true}
HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"}
ARTIFACTS_PATH=${ARTIFACTS_PATH:-"${HOME}/karmada-operator-e2e-logs"}
mkdir -p "$ARTIFACTS_PATH"
# Install ginkgo
GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo
# Run e2e
export KUBECONFIG=${HOST_KUBECONFIG}
set +e
ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/suites/operator
TESTING_RESULT=$?
# Collect logs
echo "Collect logs to $ARTIFACTS_PATH..."
cp "$HOST_KUBECONFIG" "$ARTIFACTS_PATH"
if [ "$RUNNING_ON_KIND" = true ]; then
echo "Collecting $HOST_CLUSTER_NAME logs..."
mkdir -p "$ARTIFACTS_PATH/$HOST_CLUSTER_NAME"
kind export logs --name="$HOST_CLUSTER_NAME" "$ARTIFACTS_PATH/$HOST_CLUSTER_NAME"
fi
echo "Collected logs at $ARTIFACTS_PATH:"
ls -al "$ARTIFACTS_PATH"
exit $TESTING_RESULT

View File

@ -264,7 +264,7 @@ func UpdateClusterLabels(client karmada.Interface, clusterName string, labels ma
return false, err
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// DeleteClusterLabels deletes cluster labels if it exists.
@ -286,7 +286,7 @@ func DeleteClusterLabels(client karmada.Interface, clusterName string, labels ma
return false, err
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// GetClusterNamesFromClusters will get Clusters' names form Clusters Object.
@ -306,7 +306,7 @@ func WaitClusterFitWith(c client.Client, clusterName string, fit func(cluster *c
return false, err
}
return fit(currentCluster), nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// LoadRESTClientConfig creates a rest.Config using the passed kubeconfig. If context is empty, current context in kubeconfig will be used.
@ -363,5 +363,5 @@ func UpdateClusterStatusCondition(client karmada.Interface, clusterName string,
return false, err
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -34,5 +34,5 @@ func WaitClusterResourceBindingFitWith(client karmada.Interface, name string, fi
return false
}
return fit(clusterResourceBinding)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -68,7 +68,7 @@ func WaitConfigMapPresentOnClusterFitWith(cluster, namespace, name string, fit f
return false
}
return fit(configmap)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// UpdateConfigMapWithPatch update configmap with patch bytes.
@ -99,7 +99,7 @@ func WaitConfigMapDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get configmap(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitConfigMapDisappearOnClusters wait configmap disappear on member clusters until timeout.

View File

@ -19,10 +19,10 @@ package framework
import "time"
const (
// pollInterval defines the interval time for a poll operation.
pollInterval = 5 * time.Second
// pollTimeout defines the time after which the poll operation times out.
pollTimeout = 420 * time.Second
// PollInterval defines the interval time for a poll operation.
PollInterval = 5 * time.Second
// PollTimeout defines the time after which the poll operation times out.
PollTimeout = 420 * time.Second
// metricsCreationDelay defines the maximum time metrics not yet available for pod.
metricsCreationDelay = 2 * time.Minute
)

View File

@ -74,7 +74,7 @@ func WaitCRDPresentOnClusters(client karmada.Interface, clusters []string, crdAP
cluster, err := FetchCluster(client, clusterName)
g.Expect(err).NotTo(gomega.HaveOccurred())
return helper.IsAPIEnabled(cluster.Status.APIEnablements, crdAPIVersion, crdKind), nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
})
}
@ -90,7 +90,7 @@ func WaitCRDDisappearedOnClusters(clusters []string, crdName string) {
gomega.Eventually(func() bool {
_, err := clusterDynamicClient.Resource(crdGVR).Get(context.TODO(), crdName, metav1.GetOptions{})
return apierrors.IsNotFound(err)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
})
}
@ -108,5 +108,5 @@ func WaitCRDFitWith(client dynamic.Interface, crdName string, fit func(crd *apie
return false
}
return fit(crd)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -55,7 +55,7 @@ func UpdateDeploymentPaused(client kubernetes.Interface, deployment *appsv1.Depl
deploy.Spec.Paused = paused
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -79,7 +79,7 @@ func WaitDeploymentPresentOnClusterFitWith(cluster, namespace, name string, fit
return false
}
return fit(dep)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitDeploymentFitWith wait deployment sync with fit func.
@ -90,7 +90,7 @@ func WaitDeploymentFitWith(client kubernetes.Interface, namespace, name string,
return false
}
return fit(dep)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitDeploymentPresentOnClustersFitWith wait deployment present on cluster sync with fit func.
@ -111,7 +111,7 @@ func WaitDeploymentStatus(client kubernetes.Interface, deployment *appsv1.Deploy
return false
}
return CheckDeploymentReadyStatus(deploy, replicas)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
})
}
@ -132,7 +132,7 @@ func WaitDeploymentDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get deployment(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitDeploymentDisappearOnClusters wait deployment disappear on member clusters until timeout.
@ -155,7 +155,7 @@ func UpdateDeploymentReplicas(client kubernetes.Interface, deployment *appsv1.De
deploy.Spec.Replicas = &replicas
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -170,7 +170,7 @@ func UpdateDeploymentAnnotations(client kubernetes.Interface, deployment *appsv1
deploy.Annotations = annotations
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -190,7 +190,7 @@ func AppendDeploymentAnnotations(client kubernetes.Interface, deployment *appsv1
}
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -205,7 +205,7 @@ func UpdateDeploymentLabels(client kubernetes.Interface, deployment *appsv1.Depl
deploy.Labels = labels
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -220,7 +220,7 @@ func UpdateDeploymentVolumes(client kubernetes.Interface, deployment *appsv1.Dep
deploy.Spec.Template.Spec.Volumes = volumes
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -235,7 +235,7 @@ func UpdateDeploymentServiceAccountName(client kubernetes.Interface, deployment
deploy.Spec.Template.Spec.ServiceAccountName = serviceAccountName
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -252,7 +252,7 @@ func ExtractTargetClustersFrom(c client.Client, deployment *appsv1.Deployment) [
return false, nil
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
targetClusterNames := make([]string, 0, len(binding.Spec.Clusters))
for _, cluster := range binding.Spec.Clusters {
@ -282,7 +282,7 @@ func WaitDeploymentGetByClientFitWith(client kubernetes.Interface, namespace, na
return false
}
return fit(dep)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
})
}
@ -305,6 +305,6 @@ func WaitDeploymentReplicasFitWith(clusters []string, namespace, name string, ex
}
klog.Infof("The total replicas of deployment(%s/%s) is %d", namespace, name, totalReplicas)
return totalReplicas == expectReplicas
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
})
}

View File

@ -35,5 +35,5 @@ func WaitEventFitWith(kubeClient kubernetes.Interface, namespace string, involve
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
return slices.ContainsFunc(eventList.Items, fit)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -86,6 +86,6 @@ func WaitFederatedResourceQuotaCollectStatus(client karmada.Interface, namespace
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
})
}

View File

@ -67,7 +67,7 @@ func WaitJobPresentOnClusterFitWith(cluster, namespace, name string, fit func(jo
return false
}
return fit(dep)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitJobPresentOnClustersFitWith wait job present on cluster sync with fit func.
@ -96,7 +96,7 @@ func WaitJobDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get job(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitJobDisappearOnClusters wait job disappear on member clusters until timeout.

View File

@ -48,7 +48,7 @@ func UpdateMultiClusterService(client karmada.Interface, mcs *networkingv1alpha1
mcsExist.Spec = mcs.Spec
_, err = client.NetworkingV1alpha1().MultiClusterServices(mcsExist.Namespace).Update(context.TODO(), mcsExist, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -68,5 +68,5 @@ func WaitMultiClusterServicePresentOnClustersFitWith(client karmada.Interface, n
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
return fit(mcs)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -52,7 +52,7 @@ func WaitNamespacePresentOnClusterByClient(client kubernetes.Interface, name str
_, err := client.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitNamespacePresentOnCluster wait namespace present on cluster until timeout.
@ -65,7 +65,7 @@ func WaitNamespacePresentOnCluster(cluster, name string) {
_, err := clusterClient.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitNamespacePresentOnClusters wait namespace present on clusters until timeout.
@ -94,7 +94,7 @@ func WaitNamespaceDisappearOnCluster(cluster, name string) {
klog.Errorf("Failed to get namespace(%s) on cluster(%s), err: %v", name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitNamespaceDisappearOnClusters wait namespace disappear on clusters until timeout.
@ -117,6 +117,6 @@ func UpdateNamespaceLabels(client kubernetes.Interface, namespace *corev1.Namesp
ns.Labels = labels
_, err = client.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -66,7 +66,7 @@ func WaitPVCPresentOnClusterFitWith(cluster, namespace, name string, fit func(pv
return false
}
return fit(pvc)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitPVCDisappearOnCluster wait PersistentVolumeClaim disappear on cluster until timeout.
@ -86,7 +86,7 @@ func WaitPVCDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get PersistentVolumeClaim(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitPVCDisappearOnClusters Wait for the PersistentVolumeClaim to disappear on member clusters until timeout.

View File

@ -64,7 +64,7 @@ func WaitPodPresentOnClusterFitWith(cluster, namespace, name string, fit func(po
return false
}
return fit(pod)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitPodMetricsReady wait podMetrics to be ready.
@ -90,7 +90,7 @@ func WaitPodMetricsReady(kubeClient kubernetes.Interface, karmadaClient karmada.
return false
}
return true
}, metricsCreationDelay, pollInterval).Should(gomega.Equal(true))
}, metricsCreationDelay, PollInterval).Should(gomega.Equal(true))
}
// WaitPodPresentOnClustersFitWith wait pod present on cluster sync with fit func.
@ -119,7 +119,7 @@ func WaitPodDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get pod(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitPodDisappearOnClusters wait pod disappear on member clusters until timeout.

View File

@ -94,5 +94,5 @@ func WaitPropagationPolicyFitWith(client karmada.Interface, namespace, name stri
return false
}
return fit(policy)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -70,7 +70,7 @@ func WaitRolePresentOnClusterFitWith(cluster, namespace, name string, fit func(r
return false
}
return fit(role)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitRoleDisappearOnClusters wait role disappear on member clusters until timeout.
@ -99,7 +99,7 @@ func WaitRoleDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get role(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// CreateClusterRole create clusterRole.
@ -142,7 +142,7 @@ func WaitClusterRolePresentOnClusterFitWith(cluster, name string, fit func(clust
return false
}
return fit(clusterRole)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitClusterRoleDisappearOnClusters wait clusterRole disappear on member clusters until timeout.
@ -171,7 +171,7 @@ func WaitClusterRoleDisappearOnCluster(cluster, name string) {
klog.Errorf("Failed to get clusterRole(%s) on cluster(%s), err: %v", name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitClusterRoleGetByClientFitWith wait clusterRole get by client fit with func.
@ -183,7 +183,7 @@ func WaitClusterRoleGetByClientFitWith(client kubernetes.Interface, name string,
return false
}
return fit(clusterRole)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
})
}
@ -227,7 +227,7 @@ func WaitRoleBindingPresentOnClusterFitWith(cluster, namespace, name string, fit
return false
}
return fit(roleBinding)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitRoleBindingDisappearOnClusters wait roleBinding disappear on member clusters until timeout.
@ -256,7 +256,7 @@ func WaitRoleBindingDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get roleBinding(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// CreateClusterRoleBinding create clusterRoleBinding.
@ -299,7 +299,7 @@ func WaitClusterRoleBindingPresentOnClusterFitWith(cluster, name string, fit fun
return false
}
return fit(clusterRoleBinding)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitClusterRoleBindingDisappearOnClusters wait clusterRoleBinding disappear on member clusters until timeout.
@ -328,7 +328,7 @@ func WaitClusterRoleBindingDisappearOnCluster(cluster, name string) {
klog.Errorf("Failed to get clusterRoleBinding(%s) on cluster(%s), err: %v", name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// CreateServiceAccount create serviceaccount.
@ -362,7 +362,7 @@ func WaitServiceAccountPresentOnClusterFitWith(cluster, namespace, name string,
return false
}
return fit(sa)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitServiceAccountPresentOnClustersFitWith wait sa present on cluster sync with fit func.
@ -391,7 +391,7 @@ func WaitServiceAccountDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get sa(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitServiceAccountDisappearOnClusters wait sa disappear on member clusters until timeout.

View File

@ -0,0 +1,87 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
operator "github.com/karmada-io/karmada/operator/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/test/e2e/framework"
)
// WaitKarmadaReady wait karmada instance ready until timeout.
// Since the karmada-operator updates the `karmada.spec` first and then the `karmada.status`, in order to ensure that the `ready` condition indicates
// that the `karmada.spec` has been applied correctly, it will check the `lastTransitionTime` of the `ready` condition.
func WaitKarmadaReady(client operator.Interface, namespace, name string, lastTransitionTime time.Time) {
klog.Infof("Waiting for karmada instance %s/%s ready", namespace, name)
ginkgo.By(fmt.Sprintf("Waiting for karmada instance %s/%s ready", namespace, name), func() {
gomega.Eventually(func(g gomega.Gomega) bool {
karmada, err := client.OperatorV1alpha1().Karmadas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
for _, condition := range karmada.Status.Conditions {
if condition.Type == "Ready" && condition.Status == "True" && condition.LastTransitionTime.After(lastTransitionTime) {
return true
}
}
return false
}, framework.PollTimeout, framework.PollInterval).Should(gomega.Equal(true))
})
}
// CreateKarmadaInstance creates a karmada instance.
func CreateKarmadaInstance(operatorClient operator.Interface, karmada *operatorv1alpha1.Karmada) error {
_, err := operatorClient.OperatorV1alpha1().Karmadas(karmada.GetNamespace()).Create(context.TODO(), karmada, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
}
return nil
}
// UpdateKarmadaInstanceWithSpec updates karmada instance with spec.
func UpdateKarmadaInstanceWithSpec(client operator.Interface, namespace, name string, karmadaSpec operatorv1alpha1.KarmadaSpec) {
ginkgo.By(fmt.Sprintf("Updating Karmada(%s/%s) spec", namespace, name), func() {
karmada, err := client.OperatorV1alpha1().Karmadas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
karmada.Spec = karmadaSpec
_, err = client.OperatorV1alpha1().Karmadas(namespace).Update(context.TODO(), karmada, metav1.UpdateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// GetLastTransitionTime gets the last transition time of the condition, return time.Now() if not found.
func GetLastTransitionTime(karmada *operatorv1alpha1.Karmada, conditionType operatorv1alpha1.ConditionType) time.Time {
for _, condition := range karmada.Status.Conditions {
if condition.Type == string(conditionType) {
return condition.LastTransitionTime.Time
}
}
return time.Now()
}

View File

@ -38,7 +38,7 @@ func WaitResourceBindingFitWith(client karmada.Interface, namespace, name string
return false
}
return fit(resourceBinding)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// AssertBindingScheduledClusters wait deployment present on member clusters sync with fit func.
@ -61,7 +61,7 @@ func AssertBindingScheduledClusters(client karmada.Interface, namespace, name st
}
}
return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClusters, expectedResults)
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -77,6 +77,6 @@ func WaitGracefulEvictionTasksDone(client karmada.Interface, namespace, name str
return fmt.Errorf("%d GracefulEvictionTasks is being processing", len(binding.Spec.GracefulEvictionTasks))
}
return nil
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -46,7 +46,7 @@ func WaitResourceQuotaPresentOnCluster(cluster, namespace, name string) {
_, err := clusterClient.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitResourceQuotaDisappearOnClusters wait resourceQuota disappear on clusters until timeout.
@ -75,5 +75,5 @@ func WaitResourceQuotaDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get resourceQuota(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}

View File

@ -66,7 +66,7 @@ func WaitSecretPresentOnClusterFitWith(cluster, namespace, name string, fit func
return false
}
return fit(secret)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitSecretDisappearOnCluster wait secret disappear on cluster until timeout.
@ -86,7 +86,7 @@ func WaitSecretDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get secret(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitSecretDisappearOnClusters wait service disappear on member clusters until timeout.

View File

@ -59,7 +59,7 @@ func WaitServicePresentOnClusterFitWith(cluster, namespace, name string, fit fun
return false
}
return fit(svc)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitServicePresentOnClustersFitWith wait service present on cluster sync with fit func.
@ -88,7 +88,7 @@ func WaitServiceDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get service(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitServiceDisappearOnClusters wait service disappear on member clusters until timeout.

View File

@ -50,6 +50,6 @@ func UpdateStatefulSetReplicas(client kubernetes.Interface, statefulSet *appsv1.
gomega.Eventually(func() error {
_, err := client.AppsV1().StatefulSets(statefulSet.Namespace).Update(context.TODO(), statefulSet, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -40,5 +40,5 @@ func WaitForWorkToDisappear(client karmada.Interface, namespace, name string) {
return fmt.Errorf("failed to get work(%s/%s), err: %w", namespace, name, err)
}
return nil
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
}

View File

@ -55,7 +55,7 @@ func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
gomega.Eventually(func() error {
_, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{}, subresources...)
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -69,7 +69,7 @@ func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1al
gomega.Eventually(func() error {
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
@ -101,7 +101,7 @@ func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit fu
err = runtime.DefaultUnstructuredConverter.FromUnstructured(workload.UnstructuredContent(), typedObj)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
return fit(typedObj), nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitWorkloadPresentOnClustersFitWith waits workload present on member clusters sync with fit func.
@ -130,7 +130,7 @@ func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
klog.Errorf("Failed to get workload(%s/%s) on cluster(%s), err: %v", namespace, name, cluster, err)
return false
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}, PollTimeout, PollInterval).Should(gomega.Equal(true))
}
// WaitWorkloadDisappearOnClusters wait workload disappear on member clusters until timeout.

View File

@ -64,7 +64,7 @@ func UpdateWorkloadRebalancer(client karmada.Interface, name string, workloads *
}
_, err = client.AppsV1alpha1().WorkloadRebalancers().Update(context.TODO(), rebalancer, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -80,7 +80,7 @@ func WaitRebalancerObservedWorkloads(client karmada.Interface, name string, expe
return fmt.Errorf("observedWorkloads: %+v, expectedWorkloads: %+v", rebalancer.Status.ObservedWorkloads, expectedWorkloads)
}
return nil
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}
@ -96,6 +96,6 @@ func WaitRebalancerDisappear(client karmada.Interface, name string) {
return err
}
return fmt.Errorf("WorkloadRebalancer %s still exist: %+v", name, rebalancer)
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
}, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -0,0 +1,82 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
"github.com/karmada-io/karmada/test/e2e/framework/resource/operator"
)
var _ = ginkgo.Describe("PriorityClass configuration testing", func() {
var karmadaName string
var karmadaObject *operatorv1alpha1.Karmada
var err error
ginkgo.Context("PriorityClass configuration testing", func() {
ginkgo.BeforeEach(func() {
karmadaName = KarmadaInstanceNamePrefix + rand.String(RandomStrLength)
InitializeKarmadaInstance(operatorClient, testNamespace, karmadaName)
})
ginkgo.AfterEach(func() {
err = operatorClient.OperatorV1alpha1().Karmadas(testNamespace).Delete(context.TODO(), karmadaName, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("Custom priorityClass configuration", func() {
ginkgo.By("Check if default value is system-node-critical", func() {
// take etcd as a representative of StatefulSet.
etcd, err := kubeClient.AppsV1().StatefulSets(testNamespace).Get(context.TODO(), karmadaName+"-etcd", metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(etcd.Spec.Template.Spec.PriorityClassName).Should(gomega.Equal("system-node-critical"))
// take karmada-apiserver as a representative of Deployment.
karmadaApiserver, err := kubeClient.AppsV1().Deployments(testNamespace).Get(context.TODO(), karmadaName+"-apiserver", metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(karmadaApiserver.Spec.Template.Spec.PriorityClassName).Should(gomega.Equal("system-node-critical"))
})
ginkgo.By("Set priorityClass to system-cluster-critical", func() {
karmadaObject, err = operatorClient.OperatorV1alpha1().Karmadas(testNamespace).Get(context.TODO(), karmadaName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
karmadaObject.Spec.Components.Etcd.Local.PriorityClassName = "system-cluster-critical"
karmadaObject.Spec.Components.KarmadaAPIServer.PriorityClassName = "system-cluster-critical"
operator.UpdateKarmadaInstanceWithSpec(operatorClient, testNamespace, karmadaName, karmadaObject.Spec)
operator.WaitKarmadaReady(operatorClient, testNamespace, karmadaName, operator.GetLastTransitionTime(karmadaObject, operatorv1alpha1.Ready))
})
ginkgo.By("Check if the PriorityClass is applied correctly", func() {
// take etcd as a representative of StatefulSet.
etcd, err := kubeClient.AppsV1().StatefulSets(testNamespace).Get(context.TODO(), karmadaName+"-etcd", metav1.GetOptions{ResourceVersion: "0"})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(etcd.Spec.Template.Spec.PriorityClassName).Should(gomega.Equal("system-cluster-critical"))
// take karmada-apiserver as a representative of Deployment.
karmadaApiserver, err := kubeClient.AppsV1().Deployments(testNamespace).Get(context.TODO(), karmadaName+"-apiserver", metav1.GetOptions{ResourceVersion: "0"})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(karmadaApiserver.Spec.Template.Spec.PriorityClassName).Should(gomega.Equal("system-cluster-critical"))
})
})
})
})

View File

@ -0,0 +1,86 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
operatorutil "github.com/karmada-io/karmada/operator/pkg/util"
)
var _ = ginkgo.Describe("Status testing", func() {
var karmadaName string
var karmadaObject *operatorv1alpha1.Karmada
var err error
ginkgo.Context("Karmada instance status testing", func() {
ginkgo.BeforeEach(func() {
karmadaName = KarmadaInstanceNamePrefix + rand.String(RandomStrLength)
InitializeKarmadaInstance(operatorClient, testNamespace, karmadaName)
})
ginkgo.AfterEach(func() {
err = operatorClient.OperatorV1alpha1().Karmadas(testNamespace).Delete(context.TODO(), karmadaName, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("Check if the karmada status meets the expectations", func() {
ginkgo.By("Get the latest karmada instance", func() {
karmadaObject, err = operatorClient.OperatorV1alpha1().Karmadas(testNamespace).Get(context.TODO(), karmadaName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("Check if status.conditions meets the expectations", func() {
conditions := karmadaObject.Status.Conditions
gomega.Expect(len(conditions)).Should(gomega.BeNumerically(">", 0))
// check if the Ready condition is true
hasReadyCondition := false
for i := range karmadaObject.Status.Conditions {
switch karmadaObject.Status.Conditions[i].Type {
case string(operatorv1alpha1.Ready):
gomega.Expect(karmadaObject.Status.Conditions[i].Status).Should(gomega.Equal(metav1.ConditionTrue))
hasReadyCondition = true
}
}
gomega.Expect(hasReadyCondition).Should(gomega.BeTrue())
})
ginkgo.By("Check if the status.SecretRef can ref to the right secret", func() {
secretRef := karmadaObject.Status.SecretRef
gomega.Expect(secretRef).ShouldNot(gomega.BeNil())
gomega.Expect(secretRef.Namespace).Should(gomega.Equal(karmadaObject.GetNamespace()))
gomega.Expect(secretRef.Name).Should(gomega.Equal(operatorutil.AdminKarmadaConfigSecretName(karmadaObject.GetName())))
_, err := kubeClient.CoreV1().Secrets(secretRef.Namespace).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("Check if the status.apiServerService can ref to the right service", func() {
apiServerService := karmadaObject.Status.APIServerService
gomega.Expect(apiServerService).ShouldNot(gomega.BeNil())
gomega.Expect(apiServerService.Name).Should(gomega.Equal(operatorutil.KarmadaAPIServerName(karmadaObject.GetName())))
_, err := kubeClient.CoreV1().Services(karmadaObject.GetNamespace()).Get(context.TODO(), apiServerService.Name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
})
})

View File

@ -0,0 +1,142 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"flag"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
operator "github.com/karmada-io/karmada/operator/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/test/e2e/framework"
operatorresource "github.com/karmada-io/karmada/test/e2e/framework/resource/operator"
"github.com/karmada-io/karmada/test/helper"
)
const (
// RandomStrLength represents the random string length to combine names.
RandomStrLength = 5
// KarmadaInstanceNamePrefix the prefix of the karmada instance name.
KarmadaInstanceNamePrefix = "karmadatest-"
)
var (
// pollInterval defines the interval time for a poll operation.
pollInterval time.Duration
// pollTimeout defines the time after which the poll operation times out.
pollTimeout time.Duration
)
var (
hostContext string
kubeconfig string
karmadactlPath string
restConfig *rest.Config
kubeClient kubernetes.Interface
testNamespace string
operatorClient operator.Interface
)
func init() {
// usage ginkgo -- --poll-interval=5s --poll-timeout=5m
// eg. ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- --poll-interval=5s --poll-timeout=5m
flag.DurationVar(&pollInterval, "poll-interval", 5*time.Second, "poll-interval defines the interval time for a poll operation")
flag.DurationVar(&pollTimeout, "poll-timeout", 300*time.Second, "poll-timeout defines the time which the poll operation times out")
flag.StringVar(&hostContext, "host-context", "karmada-host", "Name of the host cluster context in control plane kubeconfig file.")
}
func TestE2E(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "E2E Operator Suite")
}
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
return nil
}, func([]byte) {
kubeconfig = os.Getenv("KUBECONFIG")
gomega.Expect(kubeconfig).ShouldNot(gomega.BeEmpty())
goPathCmd := exec.Command("go", "env", "GOPATH")
goPath, err := goPathCmd.CombinedOutput()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
formatGoPath := strings.Trim(string(goPath), "\n")
karmadactlPath = formatGoPath + "/bin/karmadactl"
gomega.Expect(karmadactlPath).ShouldNot(gomega.BeEmpty())
restConfig, err = framework.LoadRESTClientConfig(kubeconfig, hostContext)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
kubeClient, err = kubernetes.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
testNamespace = fmt.Sprintf("operatortest-%s", rand.String(RandomStrLength))
err = setupTestNamespace(testNamespace, kubeClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
operatorClient, err = operator.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
var _ = ginkgo.SynchronizedAfterSuite(func() {
// cleanup all namespaces we created both in control plane and member clusters.
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
err := cleanupTestNamespace(testNamespace, kubeClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, func() {})
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface) error {
namespaceObj := helper.NewNamespace(namespace)
_, err := util.CreateNamespace(kubeClient, namespaceObj)
if err != nil {
return err
}
return nil
}
// cleanupTestNamespace will remove the namespace we set up before for the whole testing.
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface) error {
err := util.DeleteNamespace(kubeClient, namespace)
if err != nil {
return err
}
return nil
}
// InitializeKarmadaInstance initializes a karmada instance.
func InitializeKarmadaInstance(client operator.Interface, namespace, name string) {
karmada := helper.NewKarmada(namespace, name)
now := time.Now()
err := operatorresource.CreateKarmadaInstance(client, karmada)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
operatorresource.WaitKarmadaReady(client, namespace, name, now)
}

87
test/helper/karmada.go Normal file
View File

@ -0,0 +1,87 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
)
// NewKarmada returns a new Karmada instance.
func NewKarmada(namespace string, name string) *operatorv1alpha1.Karmada {
return &operatorv1alpha1.Karmada{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: operatorv1alpha1.KarmadaSpec{
CRDTarball: &operatorv1alpha1.CRDTarball{
HTTPSource: &operatorv1alpha1.HTTPSource{URL: "http://local"},
},
Components: &operatorv1alpha1.KarmadaComponents{
Etcd: &operatorv1alpha1.Etcd{},
KarmadaAggregatedAPIServer: &operatorv1alpha1.KarmadaAggregatedAPIServer{
CommonSettings: operatorv1alpha1.CommonSettings{
Image: operatorv1alpha1.Image{
ImageRepository: "docker.io/karmada/karmada-aggregated-apiserver",
ImageTag: "latest",
},
Replicas: ptr.To[int32](1),
},
},
KarmadaControllerManager: &operatorv1alpha1.KarmadaControllerManager{
CommonSettings: operatorv1alpha1.CommonSettings{
Image: operatorv1alpha1.Image{
ImageRepository: "docker.io/karmada/karmada-controller-manager",
ImageTag: "latest",
},
Replicas: ptr.To[int32](1),
},
},
KarmadaScheduler: &operatorv1alpha1.KarmadaScheduler{
CommonSettings: operatorv1alpha1.CommonSettings{
Image: operatorv1alpha1.Image{
ImageRepository: "docker.io/karmada/karmada-scheduler",
ImageTag: "latest",
},
Replicas: ptr.To[int32](1),
},
},
KarmadaWebhook: &operatorv1alpha1.KarmadaWebhook{
CommonSettings: operatorv1alpha1.CommonSettings{
Image: operatorv1alpha1.Image{
ImageRepository: "docker.io/karmada/karmada-webhook",
ImageTag: "latest",
},
Replicas: ptr.To[int32](1),
},
},
KarmadaMetricsAdapter: &operatorv1alpha1.KarmadaMetricsAdapter{
CommonSettings: operatorv1alpha1.CommonSettings{
Image: operatorv1alpha1.Image{
ImageRepository: "docker.io/karmada/karmada-metrics-adapter",
ImageTag: "latest",
},
Replicas: ptr.To[int32](1),
},
},
},
},
}
}