rename membercluster to cluster in relevant implementations

Signed-off-by: Kevin Wang <kevinwzf0126@gmail.com>
This commit is contained in:
Kevin Wang 2021-01-22 22:32:13 +08:00 committed by Hongcai Ren
parent 03120e6923
commit e33559250c
26 changed files with 472 additions and 283 deletions

View File

@ -29,7 +29,7 @@ ETCD stores the karmada API objects, the API Server is the REST endpoint all oth
The Karmada Controller Manager runs the various controllers, the controllers watch karmada objects and then talk to the underlying clusters API servers to create regular Kubernetes resources.
1. Cluster Controller: attach kubernetes clusters to Karmada for managing the lifecycle of the clusters by creating membercluster object.
1. Cluster Controller: attach kubernetes clusters to Karmada for managing the lifecycle of the clusters by creating cluster object.
2. Policy Controller: the controller watches PropagationPolicy objects. When PropagationPolicy object is added, it selects a group of resources matching the resourceSelector and create PropagationBinding with each single resource object.
3. Binding Controller: the controller watches PropagationBinding object and create PropagationWork object corresponding to each cluster with single resource manifest.
@ -144,12 +144,12 @@ Then, install `karmadactl` command and join the member cluster:
# go get github.com/karmada-io/karmada/cmd/karmadactl
# karmadactl join member1 --member-cluster-kubeconfig=/root/.kube/member1.config
```
The `karmadactl join` command will create a `MemberCluster` object to reflect the member cluster.
The `karmadactl join` command will create a `Cluster` object to reflect the member cluster.
### 3. Check member cluster status
Now, check the member clusters from karmada control plane by following command:
```
# kubectl get membercluster
# kubectl get cluster
NAME VERSION READY AGE
member1 v1.19.1 True 66s
```

View File

@ -1,27 +1,27 @@
apiVersion: cluster.karmada.io/v1alpha1
kind: MemberCluster
metadata:
name: cluster-foo
namespace: karmada-cluster
spec:
manageMode: Delegation
accepted: true
apiEndpoint: https://10.10.10.10:6339
secretRef:
namespace: karmada-cluster
name: secret-foo
provider: huaweicloud
region: ap-southeast-1
zone: az-1
---
apiVersion: v1
kind: Secret
metadata:
name: secret-foo
namespace: karmada-cluster
type: Opaque
stringData:
token: dummy
caBundle: dummy
apiVersion: cluster.karmada.io/v1alpha1
kind: Cluster
metadata:
name: cluster-foo
namespace: karmada-cluster
spec:
manageMode: Delegation
accepted: true
apiEndpoint: https://10.10.10.10:6339
secretRef:
namespace: karmada-cluster
name: secret-foo
provider: huaweicloud
region: ap-southeast-1
zone: az-1
---
apiVersion: v1
kind: Secret
metadata:
name: secret-foo
namespace: karmada-cluster
type: Opaque
stringData:
token: dummy
caBundle: dummy

View File

@ -14,8 +14,8 @@ import (
"github.com/karmada-io/karmada/cmd/controller-manager/app/options"
"github.com/karmada-io/karmada/pkg/controllers/binding"
"github.com/karmada-io/karmada/pkg/controllers/cluster"
"github.com/karmada-io/karmada/pkg/controllers/execution"
"github.com/karmada-io/karmada/pkg/controllers/membercluster"
"github.com/karmada-io/karmada/pkg/controllers/policy"
"github.com/karmada-io/karmada/pkg/controllers/status"
"github.com/karmada-io/karmada/pkg/util/gclient"
@ -83,22 +83,22 @@ func setupControllers(mgr controllerruntime.Manager, stopChan <-chan struct{}) {
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), kubeClientSet, mgr.GetRESTMapper())
overridemanager := overridemanager.New(mgr.GetClient())
MemberClusterController := &membercluster.Controller{
ClusterController := &cluster.Controller{
Client: mgr.GetClient(),
KubeClientSet: kubeClientSet,
EventRecorder: mgr.GetEventRecorderFor(membercluster.ControllerName),
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
}
if err := MemberClusterController.SetupWithManager(mgr); err != nil {
if err := ClusterController.SetupWithManager(mgr); err != nil {
klog.Fatalf("Failed to setup cluster controller: %v", err)
}
MemberClusterStatusController := &status.MemberClusterStatusController{
ClusterStatusController := &status.ClusterStatusController{
Client: mgr.GetClient(),
KubeClientSet: kubeClientSet,
EventRecorder: mgr.GetEventRecorderFor(status.ControllerName),
}
if err := MemberClusterStatusController.SetupWithManager(mgr); err != nil {
klog.Fatalf("Failed to setup memberclusterstatus controller: %v", err)
if err := ClusterStatusController.SetupWithManager(mgr); err != nil {
klog.Fatalf("Failed to setup clusterstatus controller: %v", err)
}
policyController := &policy.PropagationPolicyController{

View File

@ -66,7 +66,7 @@ function installCRDs() {
# install APIs
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster.karmada.io_clusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml"

View File

@ -73,7 +73,7 @@ function installCRDs() {
# install APIs
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster.karmada.io_clusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml"

View File

@ -130,7 +130,7 @@ func (c *PropagationBindingController) findOrphanWorks(ownerLabel string, cluste
var orphanWorks []v1alpha1.PropagationWork
expectClusters := sets.NewString(clusterNames...)
for _, work := range propagationWorkList.Items {
workTargetCluster, err := names.GetMemberClusterName(work.GetNamespace())
workTargetCluster, err := names.GetClusterName(work.GetNamespace())
if err != nil {
klog.Errorf("Failed to get cluster name which PropagationWork %s/%s belongs to. Error: %v.",
work.GetNamespace(), work.GetName(), err)

View File

@ -0,0 +1,189 @@
package cluster
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
)
const (
// ControllerName is the controller name that will be used when reporting events.
ControllerName = "cluster-controller"
executionSpaceLabelKey = "karmada.io/executionspace"
executionSpaceLabelValue = ""
)
// Controller is to sync Cluster.
type Controller struct {
client.Client // used to operate Cluster resources.
KubeClientSet kubernetes.Interface // used to get kubernetes resources.
EventRecorder record.EventRecorder
}
// Reconcile performs a full reconciliation for the object referred to by the Request.
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling cluster %s", req.NamespacedName.Name)
cluster := &v1alpha1.Cluster{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, cluster); err != nil {
// The resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{Requeue: true}, err
}
if !cluster.DeletionTimestamp.IsZero() {
return c.removeCluster(cluster)
}
return c.syncCluster(cluster)
}
// SetupWithManager creates a controller and register to controller manager.
func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.Cluster{}).Complete(c)
}
func (c *Controller) syncCluster(cluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
// create execution space
err := c.createExecutionSpace(cluster)
if err != nil {
return controllerruntime.Result{Requeue: true}, err
}
// ensure finalizer
return c.ensureFinalizer(cluster)
}
func (c *Controller) removeCluster(cluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
err := c.removeExecutionSpace(cluster)
if apierrors.IsNotFound(err) {
return c.removeFinalizer(cluster)
}
if err != nil {
klog.Errorf("Failed to remove execution space %v, err is %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
// make sure the given execution space has been deleted
existES, err := c.ensureRemoveExecutionSpace(cluster)
if err != nil {
klog.Errorf("Failed to check weather the execution space exist in the given member cluster or not, error is: %v", err)
return controllerruntime.Result{Requeue: true}, err
} else if existES {
return controllerruntime.Result{Requeue: true}, fmt.Errorf("requeuing operation until the execution space %v deleted, ", cluster.Name)
}
return c.removeFinalizer(cluster)
}
// removeExecutionSpace delete the given execution space
func (c *Controller) removeExecutionSpace(cluster *v1alpha1.Cluster) error {
executionSpace, err := names.GenerateExecutionSpaceName(cluster.Name)
if err != nil {
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
return err
}
if err := c.KubeClientSet.CoreV1().Namespaces().Delete(context.TODO(), executionSpace, v1.DeleteOptions{}); err != nil {
klog.Errorf("Error while deleting namespace %s: %s", executionSpace, err)
return err
}
return nil
}
// ensureRemoveExecutionSpace make sure the given execution space has been deleted
func (c *Controller) ensureRemoveExecutionSpace(cluster *v1alpha1.Cluster) (bool, error) {
executionSpace, err := names.GenerateExecutionSpaceName(cluster.Name)
if err != nil {
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
return false, err
}
_, err = c.KubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
klog.Errorf("Failed to get execution space %v, err is %v ", executionSpace, err)
return false, err
}
return true, nil
}
func (c *Controller) removeFinalizer(cluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
if !controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) {
return controllerruntime.Result{}, nil
}
controllerutil.RemoveFinalizer(cluster, util.ClusterControllerFinalizer)
err := c.Client.Update(context.TODO(), cluster)
if err != nil {
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
func (c *Controller) ensureFinalizer(cluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
if controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) {
return controllerruntime.Result{}, nil
}
controllerutil.AddFinalizer(cluster, util.ClusterControllerFinalizer)
err := c.Client.Update(context.TODO(), cluster)
if err != nil {
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
// createExecutionSpace create member cluster execution space when member cluster joined
func (c *Controller) createExecutionSpace(cluster *v1alpha1.Cluster) error {
executionSpace, err := names.GenerateExecutionSpaceName(cluster.Name)
if err != nil {
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
return err
}
// create member cluster execution space when member cluster joined
_, err = c.KubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
clusterES := &corev1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: executionSpace,
Labels: map[string]string{executionSpaceLabelKey: executionSpaceLabelValue},
},
}
_, err = c.KubeClientSet.CoreV1().Namespaces().Create(context.TODO(), clusterES, v1.CreateOptions{})
if err != nil {
klog.Errorf("Failed to create execution space for cluster %v", cluster.Name)
return err
}
} else {
klog.Errorf("Could not get %s namespace: %v", executionSpace, err)
return err
}
}
return nil
}

View File

@ -102,21 +102,21 @@ func (c *Controller) isResourceApplied(propagationWorkStatus *propagationstrateg
// tryDeleteWorkload tries to delete resource in the given member cluster.
// Abort deleting when the member cluster is unready, otherwise we can't unjoin the member cluster when the member cluster is unready
func (c *Controller) tryDeleteWorkload(propagationWork *propagationstrategy.PropagationWork) error {
memberClusterName, err := names.GetMemberClusterName(propagationWork.Namespace)
clusterName, err := names.GetClusterName(propagationWork.Namespace)
if err != nil {
klog.Errorf("Failed to get member cluster name for propagationWork %s/%s", propagationWork.Namespace, propagationWork.Name)
return err
}
memberCluster, err := util.GetMemberCluster(c.Client, memberClusterName)
cluster, err := util.GetCluster(c.Client, clusterName)
if err != nil {
klog.Errorf("Failed to get the given member cluster %s", memberClusterName)
klog.Errorf("Failed to get the given member cluster %s", clusterName)
return err
}
// Do not clean up resource in the given member cluster if the status of the given member cluster is unready
if !util.IsMemberClusterReady(&memberCluster.Status) {
klog.Infof("Do not clean up resource in the given member cluster if the status of the given member cluster %s is unready", memberCluster.Name)
if !util.IsClusterReady(&cluster.Status) {
klog.Infof("Do not clean up resource in the given member cluster if the status of the given member cluster %s is unready", cluster.Name)
return nil
}
@ -128,9 +128,9 @@ func (c *Controller) tryDeleteWorkload(propagationWork *propagationstrategy.Prop
return err
}
err = c.ObjectWatcher.Delete(memberClusterName, workload)
err = c.ObjectWatcher.Delete(clusterName, workload)
if err != nil {
klog.Errorf("Failed to delete resource in the given member cluster %v, err is %v", memberCluster.Name, err)
klog.Errorf("Failed to delete resource in the given member cluster %v, err is %v", cluster.Name, err)
return err
}
}
@ -139,24 +139,24 @@ func (c *Controller) tryDeleteWorkload(propagationWork *propagationstrategy.Prop
}
func (c *Controller) dispatchPropagationWork(propagationWork *propagationstrategy.PropagationWork) error {
memberClusterName, err := names.GetMemberClusterName(propagationWork.Namespace)
clusterName, err := names.GetClusterName(propagationWork.Namespace)
if err != nil {
klog.Errorf("Failed to get member cluster name for propagationWork %s/%s", propagationWork.Namespace, propagationWork.Name)
return err
}
memberCluster, err := util.GetMemberCluster(c.Client, memberClusterName)
cluster, err := util.GetCluster(c.Client, clusterName)
if err != nil {
klog.Errorf("Failed to the get given member cluster %s", memberClusterName)
klog.Errorf("Failed to the get given member cluster %s", clusterName)
return err
}
if !util.IsMemberClusterReady(&memberCluster.Status) {
klog.Errorf("The status of the given member cluster %s is unready", memberCluster.Name)
return fmt.Errorf("cluster %s is not ready, requeuing operation until cluster state is ready", memberCluster.Name)
if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("The status of the given member cluster %s is unready", cluster.Name)
return fmt.Errorf("cluster %s is not ready, requeuing operation until cluster state is ready", cluster.Name)
}
err = c.syncToMemberClusters(memberCluster, propagationWork)
err = c.syncToClusters(cluster, propagationWork)
if err != nil {
klog.Errorf("Failed to dispatch propagationWork %v, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
return err
@ -165,9 +165,9 @@ func (c *Controller) dispatchPropagationWork(propagationWork *propagationstrateg
return nil
}
// syncToMemberClusters ensures that the state of the given object is synchronized to member clusters.
func (c *Controller) syncToMemberClusters(memberCluster *v1alpha1.Cluster, propagationWork *propagationstrategy.PropagationWork) error {
memberClusterDynamicClient, err := util.NewClusterDynamicClientSet(memberCluster, c.KubeClientSet)
// syncToClusters ensures that the state of the given object is synchronized to member clusters.
func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, propagationWork *propagationstrategy.PropagationWork) error {
clusterDynamicClient, err := util.NewClusterDynamicClientSet(cluster, c.KubeClientSet)
if err != nil {
return err
}
@ -184,28 +184,28 @@ func (c *Controller) syncToMemberClusters(memberCluster *v1alpha1.Cluster, propa
applied := c.isResourceApplied(&propagationWork.Status)
if applied {
// todo: get memberClusterObj from cache
// todo: get clusterObj from cache
dynamicResource, err := restmapper.GetGroupVersionResource(c.RESTMapper, workload.GroupVersionKind())
if err != nil {
klog.Errorf("Failed to get resource(%s/%s) as mapping GVK to GVR failed: %v", workload.GetNamespace(), workload.GetName(), err)
return err
}
memberClusterObj, err := memberClusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), v1.GetOptions{})
clusterObj, err := clusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), v1.GetOptions{})
if err != nil {
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
return err
}
err = c.ObjectWatcher.Update(memberCluster.Name, memberClusterObj, workload)
err = c.ObjectWatcher.Update(cluster.Name, clusterObj, workload)
if err != nil {
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", memberCluster.Name, err)
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", cluster.Name, err)
return err
}
} else {
err = c.ObjectWatcher.Create(memberCluster.Name, workload)
err = c.ObjectWatcher.Create(cluster.Name, workload)
if err != nil {
klog.Errorf("Failed to create resource in the given member cluster %s, err is %v", memberCluster.Name, err)
klog.Errorf("Failed to create resource in the given member cluster %s, err is %v", cluster.Name, err)
return err
}

View File

@ -32,8 +32,8 @@ const (
clusterNotReachableMsg = "cluster is not reachable"
)
// MemberClusterStatusController is to sync status of Cluster.
type MemberClusterStatusController struct {
// ClusterStatusController is to sync status of Cluster.
type ClusterStatusController struct {
client.Client // used to operate Cluster resources.
KubeClientSet kubernetes.Interface // used to get kubernetes resources.
EventRecorder record.EventRecorder
@ -42,11 +42,11 @@ type MemberClusterStatusController struct {
// Reconcile syncs status of the given member cluster.
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will requeue the reconcile key after the duration.
func (c *MemberClusterStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Syncing memberCluster status: %s", req.NamespacedName.String())
func (c *ClusterStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Syncing cluster status: %s", req.NamespacedName.String())
memberCluster := &v1alpha1.Cluster{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, memberCluster); err != nil {
cluster := &v1alpha1.Cluster{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, cluster); err != nil {
// The resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
return controllerruntime.Result{}, nil
@ -55,82 +55,82 @@ func (c *MemberClusterStatusController) Reconcile(req controllerruntime.Request)
return controllerruntime.Result{Requeue: true}, err
}
if !memberCluster.DeletionTimestamp.IsZero() {
if !cluster.DeletionTimestamp.IsZero() {
return controllerruntime.Result{}, nil
}
// start syncing status only when the finalizer is present on the given Cluster to
// avoid conflict with cluster controller.
if !controllerutil.ContainsFinalizer(memberCluster, util.MemberClusterControllerFinalizer) {
klog.V(2).Infof("waiting finalizer present for member cluster: %s", memberCluster.Name)
if !controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) {
klog.V(2).Infof("waiting finalizer present for member cluster: %s", cluster.Name)
return controllerruntime.Result{Requeue: true}, nil
}
return c.syncMemberClusterStatus(memberCluster)
return c.syncClusterStatus(cluster)
}
// SetupWithManager creates a controller and register to controller manager.
func (c *MemberClusterStatusController) SetupWithManager(mgr controllerruntime.Manager) error {
func (c *ClusterStatusController) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.Cluster{}).Complete(c)
}
func (c *MemberClusterStatusController) syncMemberClusterStatus(memberCluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
func (c *ClusterStatusController) syncClusterStatus(cluster *v1alpha1.Cluster) (controllerruntime.Result, error) {
// create a ClusterClient for the given member cluster
clusterClient, err := util.NewClusterClientSet(memberCluster, c.KubeClientSet)
clusterClient, err := util.NewClusterClientSet(cluster, c.KubeClientSet)
if err != nil {
klog.Errorf("Failed to create a ClusterClient for the given member cluster: %v, err is : %v", memberCluster.Name, err)
klog.Errorf("Failed to create a ClusterClient for the given member cluster: %v, err is : %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
var currentClusterStatus = v1alpha1.ClusterStatus{}
// get the health status of member cluster
online, healthy := getMemberClusterHealthStatus(clusterClient)
online, healthy := getClusterHealthStatus(clusterClient)
if !online || !healthy {
// generate conditions according to the health status of member cluster
currentClusterStatus.Conditions = generateReadyCondition(online, healthy)
setTransitionTime(&memberCluster.Status, &currentClusterStatus)
return c.updateStatusIfNeeded(memberCluster, currentClusterStatus)
setTransitionTime(&cluster.Status, &currentClusterStatus)
return c.updateStatusIfNeeded(cluster, currentClusterStatus)
}
clusterVersion, err := getKubernetesVersion(clusterClient)
if err != nil {
klog.Errorf("Failed to get server version of the member cluster: %v, err is : %v", memberCluster.Name, err)
klog.Errorf("Failed to get server version of the member cluster: %v, err is : %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
// get the list of APIs installed in the member cluster
apiEnables, err := getAPIEnablements(clusterClient)
if err != nil {
klog.Errorf("Failed to get APIs installed in the member cluster: %v, err is : %v", memberCluster.Name, err)
klog.Errorf("Failed to get APIs installed in the member cluster: %v, err is : %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
// get the summary of nodes status in the member cluster
nodeSummary, err := getNodeSummary(clusterClient)
if err != nil {
klog.Errorf("Failed to get summary of nodes status in the member cluster: %v, err is : %v", memberCluster.Name, err)
klog.Errorf("Failed to get summary of nodes status in the member cluster: %v, err is : %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
currentClusterStatus.Conditions = generateReadyCondition(online, healthy)
setTransitionTime(&memberCluster.Status, &currentClusterStatus)
setTransitionTime(&cluster.Status, &currentClusterStatus)
currentClusterStatus.KubernetesVersion = clusterVersion
currentClusterStatus.APIEnablements = apiEnables
currentClusterStatus.NodeSummary = nodeSummary
return c.updateStatusIfNeeded(memberCluster, currentClusterStatus)
return c.updateStatusIfNeeded(cluster, currentClusterStatus)
}
// updateStatusIfNeeded calls updateStatus only if the status of the member cluster is not the same as the old status
func (c *MemberClusterStatusController) updateStatusIfNeeded(memberCluster *v1alpha1.Cluster, currentClusterStatus v1alpha1.ClusterStatus) (controllerruntime.Result, error) {
if !equality.Semantic.DeepEqual(memberCluster.Status, currentClusterStatus) {
klog.V(4).Infof("Start to update memberCluster status: %s", memberCluster.Name)
memberCluster.Status = currentClusterStatus
err := c.Client.Status().Update(context.TODO(), memberCluster)
func (c *ClusterStatusController) updateStatusIfNeeded(cluster *v1alpha1.Cluster, currentClusterStatus v1alpha1.ClusterStatus) (controllerruntime.Result, error) {
if !equality.Semantic.DeepEqual(cluster.Status, currentClusterStatus) {
klog.V(4).Infof("Start to update cluster status: %s", cluster.Name)
cluster.Status = currentClusterStatus
err := c.Client.Status().Update(context.TODO(), cluster)
if err != nil {
klog.Errorf("Failed to update health status of the member cluster: %v, err is : %v", memberCluster.Name, err)
klog.Errorf("Failed to update health status of the member cluster: %v, err is : %v", cluster.Name, err)
return controllerruntime.Result{Requeue: true}, err
}
}
@ -138,7 +138,7 @@ func (c *MemberClusterStatusController) updateStatusIfNeeded(memberCluster *v1al
return controllerruntime.Result{RequeueAfter: 10 * time.Second}, nil
}
func getMemberClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy bool) {
func getClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy bool) {
healthStatus, err := healthEndpointCheck(clusterClient.KubeClient, "/readyz")
if err != nil && healthStatus == http.StatusNotFound {
// do health check with healthz endpoint if the readyz endpoint is not installed in member cluster
@ -207,7 +207,7 @@ func generateReadyCondition(online, healthy bool) []v1.Condition {
func setTransitionTime(oldClusterStatus, newClusterStatus *v1alpha1.ClusterStatus) {
// preserve the last transition time if the status of member cluster not changed
if util.IsMemberClusterReady(oldClusterStatus) == util.IsMemberClusterReady(newClusterStatus) {
if util.IsClusterReady(oldClusterStatus) == util.IsClusterReady(newClusterStatus) {
if len(oldClusterStatus.Conditions) != 0 {
for i := 0; i < len(newClusterStatus.Conditions); i++ {
newClusterStatus.Conditions[i].LastTransitionTime = oldClusterStatus.Conditions[0].LastTransitionTime

View File

@ -142,7 +142,7 @@ func (c *PropagationWorkStatusController) syncPropagationWorkStatus(key string)
util.MergeLabel(desireObj, util.OwnerLabel, names.GenerateOwnerLabelValue(workObject.GetNamespace(), workObject.GetName()))
clusterName, err := names.GetMemberClusterName(ownerNamespace)
clusterName, err := names.GetClusterName(ownerNamespace)
if err != nil {
klog.Errorf("Failed to get member cluster name: %v", err)
return err
@ -362,13 +362,13 @@ func (c *PropagationWorkStatusController) getObjectFromCache(key string) (*unstr
// registerInformersAndStart builds informer manager for cluster if it doesn't exist, then constructs informers for gvr
// and start it.
func (c *PropagationWorkStatusController) registerInformersAndStart(work *v1alpha1.PropagationWork) error {
memberClusterName, err := names.GetMemberClusterName(work.GetNamespace())
clusterName, err := names.GetClusterName(work.GetNamespace())
if err != nil {
klog.Errorf("Failed to get member cluster name by %s. Error: %v.", work.GetNamespace(), err)
return err
}
singleClusterInformerManager, err := c.getSingleClusterManager(memberClusterName)
singleClusterInformerManager, err := c.getSingleClusterManager(clusterName)
if err != nil {
return err
}
@ -382,11 +382,11 @@ func (c *PropagationWorkStatusController) registerInformersAndStart(work *v1alph
singleClusterInformerManager.ForResource(gvr, c.getEventHandler())
}
c.InformerManager.Start(memberClusterName, c.StopChan)
synced := c.InformerManager.WaitForCacheSync(memberClusterName, c.StopChan)
c.InformerManager.Start(clusterName, c.StopChan)
synced := c.InformerManager.WaitForCacheSync(clusterName, c.StopChan)
if synced == nil {
klog.Errorf("No informerFactory for cluster %s exist.", memberClusterName)
return fmt.Errorf("no informerFactory for cluster %s exist", memberClusterName)
klog.Errorf("No informerFactory for cluster %s exist.", clusterName)
return fmt.Errorf("no informerFactory for cluster %s exist", clusterName)
}
for gvr := range gvrTargets {
if !synced[gvr] {
@ -419,14 +419,14 @@ func (c *PropagationWorkStatusController) getGVRsFromPropagationWork(work *v1alp
// getSingleClusterManager gets singleClusterInformerManager with clusterName.
// If manager is not exist, create it, otherwise gets it from map.
func (c *PropagationWorkStatusController) getSingleClusterManager(memberClusterName string) (informermanager.SingleClusterInformerManager, error) {
func (c *PropagationWorkStatusController) getSingleClusterManager(clusterName string) (informermanager.SingleClusterInformerManager, error) {
// TODO(chenxianpao): If cluster A is removed, then a new cluster that name also is A joins karmada,
// the cache in informer manager should be updated.
singleClusterInformerManager := c.InformerManager.GetSingleClusterManager(memberClusterName)
singleClusterInformerManager := c.InformerManager.GetSingleClusterManager(clusterName)
if singleClusterInformerManager == nil {
dynamicClusterClient, err := util.BuildDynamicClusterClient(c.Client, c.KubeClientSet, memberClusterName)
dynamicClusterClient, err := util.BuildDynamicClusterClient(c.Client, c.KubeClientSet, clusterName)
if err != nil {
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", memberClusterName)
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", clusterName)
return nil, err
}
singleClusterInformerManager = c.InformerManager.ForCluster(dynamicClusterClient.ClusterName, dynamicClusterClient.DynamicClientSet, 0)

View File

@ -17,7 +17,7 @@ import (
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
memberclusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
clusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/karmadactl/options"
@ -94,14 +94,14 @@ func NewCmdJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig) *cobra.Command {
type CommandJoinOption struct {
options.GlobalCommandOptions
// MemberClusterName is the member cluster's name that we are going to join with.
MemberClusterName string
// ClusterName is the member cluster's name that we are going to join with.
ClusterName string
// MemberClusterContext is the member cluster's context that we are going to join with.
MemberClusterContext string
// ClusterContext is the member cluster's context that we are going to join with.
ClusterContext string
// MemberClusterKubeConfig is the member cluster's kubeconfig path.
MemberClusterKubeConfig string
// ClusterKubeConfig is the member cluster's kubeconfig path.
ClusterKubeConfig string
}
// Complete ensures that options are valid and marshals them if necessary.
@ -110,11 +110,11 @@ func (j *CommandJoinOption) Complete(args []string) error {
if len(args) == 0 {
return errors.New("member cluster name is required")
}
j.MemberClusterName = args[0]
j.ClusterName = args[0]
// If '--member-cluster-context' not specified, take the cluster name as the context.
if len(j.MemberClusterContext) == 0 {
j.MemberClusterContext = j.MemberClusterName
if len(j.ClusterContext) == 0 {
j.ClusterContext = j.ClusterName
}
return nil
@ -124,16 +124,16 @@ func (j *CommandJoinOption) Complete(args []string) error {
func (j *CommandJoinOption) AddFlags(flags *pflag.FlagSet) {
j.GlobalCommandOptions.AddFlags(flags)
flags.StringVar(&j.MemberClusterContext, "member-cluster-context", "",
flags.StringVar(&j.ClusterContext, "member-cluster-context", "",
"Context name of member cluster in kubeconfig. Only works when there are multiple contexts in the kubeconfig.")
flags.StringVar(&j.MemberClusterKubeConfig, "member-cluster-kubeconfig", "",
flags.StringVar(&j.ClusterKubeConfig, "member-cluster-kubeconfig", "",
"Path of the member cluster's kubeconfig.")
}
// RunJoin is the implementation of the 'join' command.
// TODO(RainbowMango): consider to remove the 'KarmadaConfig'.
func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOption) error {
klog.V(1).Infof("joining member cluster. member cluster name: %s", opts.MemberClusterName)
klog.V(1).Infof("joining member cluster. member cluster name: %s", opts.ClusterName)
klog.V(1).Infof("joining member cluster. cluster namespace: %s", opts.ClusterNamespace)
// Get control plane kube-apiserver client
@ -148,29 +148,29 @@ func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOpti
controlPlaneKubeClient := kubeclient.NewForConfigOrDie(controlPlaneRestConfig)
// Get member cluster config
memberClusterConfig, err := karmadaConfig.GetRestConfig(opts.MemberClusterContext, opts.MemberClusterKubeConfig)
clusterConfig, err := karmadaConfig.GetRestConfig(opts.ClusterContext, opts.ClusterKubeConfig)
if err != nil {
klog.V(1).Infof("failed to get joining member cluster config. error: %v", err)
return err
}
memberClusterKubeClient := kubeclient.NewForConfigOrDie(memberClusterConfig)
clusterKubeClient := kubeclient.NewForConfigOrDie(clusterConfig)
klog.V(1).Infof("joining member cluster config. endpoint: %s", memberClusterConfig.Host)
klog.V(1).Infof("joining member cluster config. endpoint: %s", clusterConfig.Host)
// ensure namespace where the member cluster object be stored exists in control plane.
if _, err := ensureNamespaceExist(controlPlaneKubeClient, opts.ClusterNamespace, opts.DryRun); err != nil {
return err
}
// ensure namespace where the karmada control plane credential be stored exists in member cluster.
if _, err := ensureNamespaceExist(memberClusterKubeClient, opts.ClusterNamespace, opts.DryRun); err != nil {
if _, err := ensureNamespaceExist(clusterKubeClient, opts.ClusterNamespace, opts.DryRun); err != nil {
return err
}
// create a ServiceAccount in member cluster.
serviceAccountObj := &corev1.ServiceAccount{}
serviceAccountObj.Namespace = opts.ClusterNamespace
serviceAccountObj.Name = names.GenerateServiceAccountName(opts.MemberClusterName)
if serviceAccountObj, err = ensureServiceAccountExist(memberClusterKubeClient, serviceAccountObj, opts.DryRun); err != nil {
serviceAccountObj.Name = names.GenerateServiceAccountName(opts.ClusterName)
if serviceAccountObj, err = ensureServiceAccountExist(clusterKubeClient, serviceAccountObj, opts.DryRun); err != nil {
return err
}
@ -178,7 +178,7 @@ func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOpti
clusterRole := &rbacv1.ClusterRole{}
clusterRole.Name = names.GenerateRoleName(serviceAccountObj.Name)
clusterRole.Rules = clusterPolicyRules
if _, err := ensureClusterRoleExist(memberClusterKubeClient, clusterRole, opts.DryRun); err != nil {
if _, err := ensureClusterRoleExist(clusterKubeClient, clusterRole, opts.DryRun); err != nil {
return err
}
@ -187,19 +187,19 @@ func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOpti
clusterRoleBinding.Name = clusterRole.Name
clusterRoleBinding.Subjects = buildRoleBindingSubjects(serviceAccountObj.Name, serviceAccountObj.Namespace)
clusterRoleBinding.RoleRef = buildClusterRoleReference(clusterRole.Name)
if _, err := ensureClusterRoleBindingExist(memberClusterKubeClient, clusterRoleBinding, opts.DryRun); err != nil {
if _, err := ensureClusterRoleBindingExist(clusterKubeClient, clusterRoleBinding, opts.DryRun); err != nil {
return err
}
var memberClusterSecret *corev1.Secret
var clusterSecret *corev1.Secret
// It will take a short time to create service account secret for member cluster.
err = wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
serviceAccountObj, err = memberClusterKubeClient.CoreV1().ServiceAccounts(serviceAccountObj.Namespace).Get(context.TODO(), serviceAccountObj.Name, metav1.GetOptions{})
serviceAccountObj, err = clusterKubeClient.CoreV1().ServiceAccounts(serviceAccountObj.Namespace).Get(context.TODO(), serviceAccountObj.Name, metav1.GetOptions{})
if err != nil {
klog.Errorf("Failed to retrieve service account(%s/%s) from member cluster. err: %v", serviceAccountObj.Namespace, serviceAccountObj.Name, err)
return false, err
}
memberClusterSecret, err = util.GetTargetSecret(memberClusterKubeClient, serviceAccountObj.Secrets, corev1.SecretTypeServiceAccountToken, opts.ClusterNamespace)
clusterSecret, err = util.GetTargetSecret(clusterKubeClient, serviceAccountObj.Secrets, corev1.SecretTypeServiceAccountToken, opts.ClusterNamespace)
if err != nil {
return false, err
}
@ -213,15 +213,15 @@ func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOpti
// create secret in control plane
secretNamespace := opts.ClusterNamespace
secretName := opts.MemberClusterName
secretName := opts.ClusterName
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: secretNamespace,
Name: secretName,
},
Data: map[string][]byte{
caDataKey: memberClusterSecret.Data["ca.crt"], // TODO(RainbowMango): change ca bundle key to 'ca.crt'.
tokenKey: memberClusterSecret.Data[tokenKey],
caDataKey: clusterSecret.Data["ca.crt"], // TODO(RainbowMango): change ca bundle key to 'ca.crt'.
tokenKey: clusterSecret.Data[tokenKey],
},
}
secret, err = util.CreateSecret(controlPlaneKubeClient, secret)
@ -234,23 +234,23 @@ func RunJoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandJoinOpti
return nil
}
memberClusterObj := &memberclusterapi.Cluster{}
memberClusterObj.Name = opts.MemberClusterName
memberClusterObj.Spec.APIEndpoint = memberClusterConfig.Host
memberClusterObj.Spec.SecretRef = &memberclusterapi.LocalSecretReference{
clusterObj := &clusterapi.Cluster{}
clusterObj.Name = opts.ClusterName
clusterObj.Spec.APIEndpoint = clusterConfig.Host
clusterObj.Spec.SecretRef = &clusterapi.LocalSecretReference{
Namespace: secretNamespace,
Name: secretName,
}
memberCluster, err := createMemberClusterObject(controlPlaneKarmadaClient, memberClusterObj, false)
cluster, err := createClusterObject(controlPlaneKarmadaClient, clusterObj, false)
if err != nil {
klog.Errorf("failed to create member cluster object. cluster name: %s, error: %v", opts.MemberClusterName, err)
klog.Errorf("failed to create member cluster object. cluster name: %s, error: %v", opts.ClusterName, err)
return err
}
patchSecretBody := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(memberCluster, resourceKind),
*metav1.NewControllerRef(cluster, resourceKind),
},
},
}
@ -372,34 +372,34 @@ func ensureClusterRoleBindingExist(client kubeclient.Interface, clusterRoleBindi
return createdObj, nil
}
func createMemberClusterObject(controlPlaneClient *karmadaclientset.Clientset, memberClusterObj *memberclusterapi.Cluster, errorOnExisting bool) (*memberclusterapi.Cluster, error) {
memberCluster, exist, err := GetMemberCluster(controlPlaneClient, memberClusterObj.Namespace, memberClusterObj.Name)
func createClusterObject(controlPlaneClient *karmadaclientset.Clientset, clusterObj *clusterapi.Cluster, errorOnExisting bool) (*clusterapi.Cluster, error) {
cluster, exist, err := GetCluster(controlPlaneClient, clusterObj.Namespace, clusterObj.Name)
if err != nil {
klog.Errorf("failed to create member cluster object. member cluster: %s/%s, error: %v", memberClusterObj.Namespace, memberClusterObj.Name, err)
klog.Errorf("failed to create member cluster object. member cluster: %s/%s, error: %v", clusterObj.Namespace, clusterObj.Name, err)
return nil, err
}
if exist {
if errorOnExisting {
klog.Errorf("failed to create member cluster object. member cluster: %s/%s, error: %v", memberClusterObj.Namespace, memberClusterObj.Name, err)
return memberCluster, err
klog.Errorf("failed to create member cluster object. member cluster: %s/%s, error: %v", clusterObj.Namespace, clusterObj.Name, err)
return cluster, err
}
klog.V(1).Infof("create member cluster succeed as already exist. member cluster: %s/%s", memberClusterObj.Namespace, memberClusterObj.Name)
return memberCluster, nil
klog.V(1).Infof("create member cluster succeed as already exist. member cluster: %s/%s", clusterObj.Namespace, clusterObj.Name)
return cluster, nil
}
if memberCluster, err = CreateMemberCluster(controlPlaneClient, memberClusterObj); err != nil {
klog.Warningf("failed to create member cluster. member cluster: %s/%s, error: %v", memberClusterObj.Namespace, memberClusterObj.Name, err)
if cluster, err = CreateCluster(controlPlaneClient, clusterObj); err != nil {
klog.Warningf("failed to create member cluster. member cluster: %s/%s, error: %v", clusterObj.Namespace, clusterObj.Name, err)
return nil, err
}
return memberCluster, nil
return cluster, nil
}
// GetMemberCluster tells if a member cluster (namespace/name) already joined to control plane.
func GetMemberCluster(client karmadaclientset.Interface, namespace string, name string) (*memberclusterapi.Cluster, bool, error) {
memberCluster, err := client.MemberclusterV1alpha1().MemberClusters().Get(context.TODO(), name, metav1.GetOptions{})
// GetCluster tells if a member cluster (namespace/name) already joined to control plane.
func GetCluster(client karmadaclientset.Interface, namespace string, name string) (*clusterapi.Cluster, bool, error) {
cluster, err := client.ClusterV1alpha1().Clusters().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, false, nil
@ -409,18 +409,18 @@ func GetMemberCluster(client karmadaclientset.Interface, namespace string, name
return nil, false, err
}
return memberCluster, true, nil
return cluster, true, nil
}
// CreateMemberCluster creates a new member cluster object in control plane.
func CreateMemberCluster(controlPlaneClient karmadaclientset.Interface, cluster *memberclusterapi.Cluster) (*memberclusterapi.Cluster, error) {
memberCluster, err := controlPlaneClient.MemberclusterV1alpha1().MemberClusters().Create(context.TODO(), cluster, metav1.CreateOptions{})
// CreateCluster creates a new member cluster object in control plane.
func CreateCluster(controlPlaneClient karmadaclientset.Interface, cluster *clusterapi.Cluster) (*clusterapi.Cluster, error) {
cluster, err := controlPlaneClient.ClusterV1alpha1().Clusters().Create(context.TODO(), cluster, metav1.CreateOptions{})
if err != nil {
klog.Warningf("failed to create member cluster. member cluster: %s/%s, error: %v", cluster.Namespace, cluster.Name, err)
return memberCluster, err
return cluster, err
}
return memberCluster, nil
return cluster, nil
}
// buildRoleBindingSubjects will generate a subject as per service account.

View File

@ -62,14 +62,14 @@ func NewCmdUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig) *cobra.Command
type CommandUnjoinOption struct {
options.GlobalCommandOptions
// MemberClusterName is the member cluster's name that we are going to join with.
MemberClusterName string
// ClusterName is the member cluster's name that we are going to join with.
ClusterName string
// MemberClusterContext is the member cluster's context that we are going to join with.
MemberClusterContext string
// ClusterContext is the member cluster's context that we are going to join with.
ClusterContext string
// MemberClusterKubeConfig is the member cluster's kubeconfig path.
MemberClusterKubeConfig string
// ClusterKubeConfig is the member cluster's kubeconfig path.
ClusterKubeConfig string
forceDeletion bool
}
@ -80,11 +80,11 @@ func (j *CommandUnjoinOption) Complete(args []string) error {
if len(args) == 0 {
return errors.New("member cluster name is required")
}
j.MemberClusterName = args[0]
j.ClusterName = args[0]
// If '--member-cluster-context' not specified, take the cluster name as the context.
if len(j.MemberClusterContext) == 0 {
j.MemberClusterContext = j.MemberClusterName
if len(j.ClusterContext) == 0 {
j.ClusterContext = j.ClusterName
}
return nil
@ -94,9 +94,9 @@ func (j *CommandUnjoinOption) Complete(args []string) error {
func (j *CommandUnjoinOption) AddFlags(flags *pflag.FlagSet) {
j.GlobalCommandOptions.AddFlags(flags)
flags.StringVar(&j.MemberClusterContext, "member-cluster-context", "",
flags.StringVar(&j.ClusterContext, "member-cluster-context", "",
"Context name of member cluster in kubeconfig. Only works when there are multiple contexts in the kubeconfig.")
flags.StringVar(&j.MemberClusterKubeConfig, "member-cluster-kubeconfig", "",
flags.StringVar(&j.ClusterKubeConfig, "member-cluster-kubeconfig", "",
"Path of the member cluster's kubeconfig.")
flags.BoolVar(&j.forceDeletion, "force", false,
"Delete cluster and secret resources even if resources in the member cluster targeted for unjoin are not removed successfully.")
@ -105,7 +105,7 @@ func (j *CommandUnjoinOption) AddFlags(flags *pflag.FlagSet) {
// RunUnjoin is the implementation of the 'unjoin' command.
// TODO(RainbowMango): consider to remove the 'KarmadaConfig'.
func RunUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandUnjoinOption) error {
klog.V(1).Infof("unjoining member cluster. member cluster name: %s", opts.MemberClusterName)
klog.V(1).Infof("unjoining member cluster. member cluster name: %s", opts.ClusterName)
klog.V(1).Infof("unjoining member cluster. cluster namespace: %s", opts.ClusterNamespace)
// Get control plane kube-apiserver client
@ -121,7 +121,7 @@ func RunUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandUnjoin
// todo: taint member cluster object instead of deleting execution space.
// Once the member cluster is tainted, eviction controller will delete all propagationwork in the execution space of the member cluster.
executionSpaceName, err := names.GenerateExecutionSpaceName(opts.MemberClusterName)
executionSpaceName, err := names.GenerateExecutionSpaceName(opts.ClusterName)
if err != nil {
return err
}
@ -134,43 +134,43 @@ func RunUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandUnjoin
// Attempt to delete the cluster role, cluster rolebindings and service account from the unjoining member cluster
// if user provides the kubeconfig of member cluster
if opts.MemberClusterKubeConfig != "" {
if opts.ClusterKubeConfig != "" {
// Get member cluster config
memberClusterConfig, err := karmadaConfig.GetRestConfig(opts.MemberClusterContext, opts.MemberClusterKubeConfig)
clusterConfig, err := karmadaConfig.GetRestConfig(opts.ClusterContext, opts.ClusterKubeConfig)
if err != nil {
klog.V(1).Infof("failed to get unjoining member cluster config. error: %v", err)
return err
}
memberClusterKubeClient := kubeclient.NewForConfigOrDie(memberClusterConfig)
clusterKubeClient := kubeclient.NewForConfigOrDie(clusterConfig)
klog.V(1).Infof("unjoining member cluster config. endpoint: %s", memberClusterConfig.Host)
klog.V(1).Infof("unjoining member cluster config. endpoint: %s", clusterConfig.Host)
// delete RBAC resource from unjoining member cluster
err = deleteRBACResources(memberClusterKubeClient, opts.MemberClusterName, opts.forceDeletion, opts.DryRun)
err = deleteRBACResources(clusterKubeClient, opts.ClusterName, opts.forceDeletion, opts.DryRun)
if err != nil {
klog.Errorf("Failed to delete RBAC resource in unjoining member cluster %q: %v", opts.MemberClusterName, err)
klog.Errorf("Failed to delete RBAC resource in unjoining member cluster %q: %v", opts.ClusterName, err)
return err
}
// delete service account from unjoining member cluster
err = deleteServiceAccount(memberClusterKubeClient, opts.ClusterNamespace, opts.MemberClusterName, opts.forceDeletion, opts.DryRun)
err = deleteServiceAccount(clusterKubeClient, opts.ClusterNamespace, opts.ClusterName, opts.forceDeletion, opts.DryRun)
if err != nil {
klog.Errorf("Failed to delete service account in unjoining member cluster %q: %v", opts.MemberClusterName, err)
klog.Errorf("Failed to delete service account in unjoining member cluster %q: %v", opts.ClusterName, err)
return err
}
// delete namespace from unjoining member cluster
err = deleteNamespaceFromUnjoinCluster(memberClusterKubeClient, opts.ClusterNamespace, opts.MemberClusterName, opts.forceDeletion, opts.DryRun)
err = deleteNamespaceFromUnjoinCluster(clusterKubeClient, opts.ClusterNamespace, opts.ClusterName, opts.forceDeletion, opts.DryRun)
if err != nil {
klog.Errorf("Failed to delete namespace in unjoining member cluster %q: %v", opts.MemberClusterName, err)
klog.Errorf("Failed to delete namespace in unjoining member cluster %q: %v", opts.ClusterName, err)
return err
}
}
// delete the member cluster object in host cluster that associates the unjoining member cluster
err = deleteMemberClusterObject(controlPlaneKarmadaClient, opts.MemberClusterName, opts.DryRun)
err = deleteClusterObject(controlPlaneKarmadaClient, opts.ClusterName, opts.DryRun)
if err != nil {
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", opts.MemberClusterName, err)
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", opts.ClusterName, err)
return err
}
@ -178,7 +178,7 @@ func RunUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandUnjoin
}
// deleteRBACResources deletes the cluster role, cluster rolebindings from the unjoining member cluster.
func deleteRBACResources(memberClusterKubeClient kubeclient.Interface, unjoiningClusterName string, forceDeletion, dryRun bool) error {
func deleteRBACResources(clusterKubeClient kubeclient.Interface, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
@ -187,7 +187,7 @@ func deleteRBACResources(memberClusterKubeClient kubeclient.Interface, unjoining
clusterRoleName := names.GenerateRoleName(serviceAccountName)
clusterRoleBindingName := clusterRoleName
err := util.DeleteClusterRoleBinding(memberClusterKubeClient, clusterRoleBindingName)
err := util.DeleteClusterRoleBinding(clusterKubeClient, clusterRoleBindingName)
if err != nil {
if !forceDeletion {
return err
@ -195,7 +195,7 @@ func deleteRBACResources(memberClusterKubeClient kubeclient.Interface, unjoining
klog.Errorf("Force deletion. Could not delete cluster role binding %q for service account %q in unjoining member cluster %q: %v.", clusterRoleBindingName, serviceAccountName, unjoiningClusterName, err)
}
err = util.DeleteClusterRole(memberClusterKubeClient, clusterRoleName)
err = util.DeleteClusterRole(clusterKubeClient, clusterRoleName)
if err != nil {
if !forceDeletion {
return err
@ -207,13 +207,13 @@ func deleteRBACResources(memberClusterKubeClient kubeclient.Interface, unjoining
}
// deleteServiceAccount deletes the service account from the unjoining member cluster.
func deleteServiceAccount(memberClusterKubeClient kubeclient.Interface, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
func deleteServiceAccount(clusterKubeClient kubeclient.Interface, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
serviceAccountName := names.GenerateServiceAccountName(unjoiningClusterName)
err := util.DeleteServiceAccount(memberClusterKubeClient, namespace, serviceAccountName)
err := util.DeleteServiceAccount(clusterKubeClient, namespace, serviceAccountName)
if err != nil {
if !forceDeletion {
return err
@ -225,12 +225,12 @@ func deleteServiceAccount(memberClusterKubeClient kubeclient.Interface, namespac
}
// deleteNSFromUnjoinCluster deletes the namespace from the unjoining member cluster.
func deleteNamespaceFromUnjoinCluster(memberClusterKubeClient kubeclient.Interface, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
func deleteNamespaceFromUnjoinCluster(clusterKubeClient kubeclient.Interface, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
err := util.DeleteNamespace(memberClusterKubeClient, namespace)
err := util.DeleteNamespace(clusterKubeClient, namespace)
if err != nil {
if !forceDeletion {
return err
@ -272,36 +272,36 @@ func deleteExecutionSpace(hostClient kubeclient.Interface, namespace string, dry
return nil
}
// deleteMemberClusterObject delete the member cluster object in host cluster that associates the unjoining member cluster
func deleteMemberClusterObject(controlPlaneKarmadaClient *karmadaclientset.Clientset, memberClusterName string, dryRun bool) error {
// deleteClusterObject delete the member cluster object in host cluster that associates the unjoining member cluster
func deleteClusterObject(controlPlaneKarmadaClient *karmadaclientset.Clientset, clusterName string, dryRun bool) error {
if dryRun {
return nil
}
err := controlPlaneKarmadaClient.MemberclusterV1alpha1().MemberClusters().Delete(context.TODO(), memberClusterName, metav1.DeleteOptions{})
err := controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", memberClusterName, err)
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", clusterName, err)
return err
}
// make sure the given member cluster object has been deleted
err = wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
_, err = controlPlaneKarmadaClient.MemberclusterV1alpha1().MemberClusters().Get(context.TODO(), memberClusterName, metav1.GetOptions{})
_, err = controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil
}
if err != nil {
klog.Errorf("Failed to get member cluster %s. err: %v", memberClusterName, err)
klog.Errorf("Failed to get member cluster %s. err: %v", clusterName, err)
return false, err
}
klog.Infof("Waiting for the member cluster object %s to be deleted", memberClusterName)
klog.Infof("Waiting for the member cluster object %s to be deleted", clusterName)
return false, nil
})
if err != nil {
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", memberClusterName, err)
klog.Errorf("Failed to delete member cluster object. cluster name: %s, error: %v", clusterName, err)
return err
}

View File

@ -14,7 +14,7 @@ func NewEmptySnapshot() *Snapshot {
return &Snapshot{}
}
// NumOfClusters returns the number of memberClusters.
// NumOfClusters returns the number of clusters.
func (s *Snapshot) NumOfClusters() int {
return len(s.clusterInfoList)
}

View File

@ -6,7 +6,7 @@ import (
"k8s.io/klog/v2"
memberclusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
clusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
lister "github.com/karmada-io/karmada/pkg/generated/listers/propagationstrategy/v1alpha1"
"github.com/karmada-io/karmada/pkg/scheduler/cache"
@ -92,8 +92,8 @@ func (g *genericScheduler) findClustersThatFit(
ctx context.Context,
fwk framework.Framework,
placement *v1alpha1.Placement,
clusterInfo *cache.Snapshot) ([]*memberclusterapi.Cluster, error) {
var out []*memberclusterapi.Cluster
clusterInfo *cache.Snapshot) ([]*clusterapi.Cluster, error) {
var out []*clusterapi.Cluster
clusters := clusterInfo.GetClusters()
for _, c := range clusters {
resMap := fwk.RunFilterPlugins(ctx, placement, c.Cluster())
@ -113,7 +113,7 @@ func (g *genericScheduler) prioritizeClusters(
ctx context.Context,
fwk framework.Framework,
placement *v1alpha1.Placement,
clusters []*memberclusterapi.Cluster) (result framework.ClusterScoreList, err error) {
clusters []*clusterapi.Cluster) (result framework.ClusterScoreList, err error) {
scoresMap, err := fwk.RunScorePlugins(ctx, placement, clusters)
if err != nil {
return result, err

View File

@ -5,7 +5,7 @@ import (
"errors"
"strings"
membercluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
cluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
)
@ -15,10 +15,10 @@ type Framework interface {
// RunFilterPlugins runs the set of configured Filter plugins for resources on
// the given cluster.
RunFilterPlugins(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) PluginToResult
RunFilterPlugins(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) PluginToResult
// RunScorePlugins runs the set of configured Score plugins, it returns a map of plugin name to cores
RunScorePlugins(ctx context.Context, placement *v1alpha1.Placement, clusters []*membercluster.Cluster) (PluginToClusterScores, error)
RunScorePlugins(ctx context.Context, placement *v1alpha1.Placement, clusters []*cluster.Cluster) (PluginToClusterScores, error)
}
// Plugin is the parent type for all the scheduling framework plugins.
@ -31,7 +31,7 @@ type Plugin interface {
type FilterPlugin interface {
Plugin
// Filter is called by the scheduling framework.
Filter(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) *Result
Filter(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) *Result
}
// Result indicates the result of running a plugin. It consists of a code, a
@ -124,7 +124,7 @@ type ScorePlugin interface {
// Score is called on each filtered cluster. It must return success and an integer
// indicating the rank of the cluster. All scoring plugins must return success or
// the resource will be rejected.
Score(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) (float64, *Result)
Score(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) (float64, *Result)
}
// ClusterScore represent the cluster score.

View File

@ -3,7 +3,7 @@ package clusteraffinity
import (
"context"
membercluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
cluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
)
@ -30,7 +30,7 @@ func (p *ClusterAffinity) Name() string {
}
// Filter checks if the cluster matched the placement cluster affinity constraint.
func (p *ClusterAffinity) Filter(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) *framework.Result {
func (p *ClusterAffinity) Filter(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) *framework.Result {
affinity := placement.ClusterAffinity
if affinity != nil {
for _, clusterName := range affinity.ExcludeClusters {
@ -54,6 +54,6 @@ func (p *ClusterAffinity) Filter(ctx context.Context, placement *v1alpha1.Placem
}
// Score calculates the score on the candidate cluster.
func (p *ClusterAffinity) Score(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) (float64, *framework.Result) {
func (p *ClusterAffinity) Score(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) (float64, *framework.Result) {
return 0, nil
}

View File

@ -7,7 +7,7 @@ import (
"k8s.io/klog/v2"
membercluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
cluster "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
plugins2 "github.com/karmada-io/karmada/pkg/scheduler/framework/plugins"
@ -52,7 +52,7 @@ func NewFramework(plugins []string) framework.Framework {
// RunFilterPlugins runs the set of configured Filter plugins for resources on the cluster.
// If any of the result is not success, the cluster is not suited for the resource.
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *v1alpha1.Placement, cluster *membercluster.Cluster) framework.PluginToResult {
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) framework.PluginToResult {
result := make(framework.PluginToResult, len(frw.filterPlugins))
for _, p := range frw.filterPlugins {
pluginResult := p.Filter(ctx, placement, cluster)
@ -64,7 +64,7 @@ func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *v1alp
// RunFilterPlugins runs the set of configured Filter plugins for resources on the cluster.
// If any of the result is not success, the cluster is not suited for the resource.
func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, placement *v1alpha1.Placement, clusters []*membercluster.Cluster) (framework.PluginToClusterScores, error) {
func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, placement *v1alpha1.Placement, clusters []*cluster.Cluster) (framework.PluginToClusterScores, error) {
result := make(framework.PluginToClusterScores, len(frw.filterPlugins))
for _, p := range frw.scorePlugins {
for i, cluster := range clusters {

View File

@ -82,7 +82,7 @@ func NewScheduler(dynamicClient dynamic.Interface, karmadaClient karmadaclientse
UpdateFunc: sched.onPropagationBindingUpdate,
})
memclusterInformer := factory.Membercluster().V1alpha1().MemberClusters().Informer()
memclusterInformer := factory.Cluster().V1alpha1().Clusters().Informer()
memclusterInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: sched.addCluster,
@ -195,14 +195,14 @@ func (s *Scheduler) handleErr(err error, key interface{}) {
}
func (s *Scheduler) addCluster(obj interface{}) {
membercluster, ok := obj.(*memclusterapi.Cluster)
cluster, ok := obj.(*memclusterapi.Cluster)
if !ok {
klog.Errorf("cannot convert to Cluster: %v", obj)
return
}
klog.V(3).Infof("add event for cluster %s", membercluster.Name)
klog.V(3).Infof("add event for cluster %s", cluster.Name)
s.schedulerCache.AddCluster(membercluster)
s.schedulerCache.AddCluster(cluster)
}
func (s *Scheduler) updateCluster(_, newObj interface{}) {

View File

@ -11,8 +11,8 @@ import (
"github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
)
// IsMemberClusterReady tells whether the cluster status in 'Ready' condition.
func IsMemberClusterReady(clusterStatus *v1alpha1.ClusterStatus) bool {
// IsClusterReady tells whether the cluster status in 'Ready' condition.
func IsClusterReady(clusterStatus *v1alpha1.ClusterStatus) bool {
for _, condition := range clusterStatus.Conditions {
// TODO(RainbowMango): Condition type should be defined in API, and after that update this hard code accordingly.
if condition.Type == v1alpha1.ClusterConditionReady {
@ -24,24 +24,24 @@ func IsMemberClusterReady(clusterStatus *v1alpha1.ClusterStatus) bool {
return false
}
// GetMemberCluster returns the given Cluster resource
func GetMemberCluster(hostClient client.Client, clusterName string) (*v1alpha1.Cluster, error) {
memberCluster := &v1alpha1.Cluster{}
if err := hostClient.Get(context.TODO(), types.NamespacedName{Name: clusterName}, memberCluster); err != nil {
// GetCluster returns the given Cluster resource
func GetCluster(hostClient client.Client, clusterName string) (*v1alpha1.Cluster, error) {
cluster := &v1alpha1.Cluster{}
if err := hostClient.Get(context.TODO(), types.NamespacedName{Name: clusterName}, cluster); err != nil {
return nil, err
}
return memberCluster, nil
return cluster, nil
}
// BuildDynamicClusterClient builds dynamic client for informerFactory by clusterName,
// it will build kubeconfig from memberCluster resource and construct dynamic client.
func BuildDynamicClusterClient(hostClient client.Client, kubeClient kubeclientset.Interface, cluster string) (*DynamicClusterClient, error) {
memberCluster, err := GetMemberCluster(hostClient, cluster)
// it will build kubeconfig from cluster resource and construct dynamic client.
func BuildDynamicClusterClient(hostClient client.Client, kubeClient kubeclientset.Interface, clusterName string) (*DynamicClusterClient, error) {
cluster, err := GetCluster(hostClient, clusterName)
if err != nil {
return nil, err
}
dynamicClusterClient, err := NewClusterDynamicClientSet(memberCluster, kubeClient)
dynamicClusterClient, err := NewClusterDynamicClientSet(cluster, kubeClient)
if err != nil {
return nil, err
}

View File

@ -26,9 +26,9 @@ const (
// Define finalizers used by karmada system.
const (
// MemberClusterControllerFinalizer is added to MemberCluster to ensure PropagationWork as well as the
// ClusterControllerFinalizer is added to Cluster to ensure PropagationWork as well as the
// execution space (namespace) is deleted before itself is deleted.
MemberClusterControllerFinalizer = "karmada.io/cluster-controller"
ClusterControllerFinalizer = "karmada.io/cluster-controller"
// ExecutionControllerFinalizer is added to PropagationWork to ensure manifests propagated to member cluster
// is deleted before PropagationWork itself is deleted.

View File

@ -6,7 +6,7 @@ import (
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
memberclusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
propagationv1alpha1 "github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
)
@ -14,9 +14,9 @@ import (
var aggregatedScheme = runtime.NewScheme()
func init() {
var _ = scheme.AddToScheme(aggregatedScheme) // add Kubernetes schemes
var _ = propagationv1alpha1.AddToScheme(aggregatedScheme) // add propagation schemes
var _ = memberclusterv1alpha1.AddToScheme(aggregatedScheme) // add cluster schemes
var _ = scheme.AddToScheme(aggregatedScheme) // add Kubernetes schemes
var _ = propagationv1alpha1.AddToScheme(aggregatedScheme) // add propagation schemes
var _ = clusterv1alpha1.AddToScheme(aggregatedScheme) // add cluster schemes
}
// NewSchema returns a singleton schema set which aggregated Kubernetes's schemes and extended schemes.

View File

@ -9,16 +9,16 @@ import (
const executionSpacePrefix = "karmada-es-"
// GenerateExecutionSpaceName generates execution space name for the given member cluster
func GenerateExecutionSpaceName(memberClusterName string) (string, error) {
if memberClusterName == "" {
func GenerateExecutionSpaceName(clusterName string) (string, error) {
if clusterName == "" {
return "", fmt.Errorf("the member cluster name is empty")
}
executionSpace := executionSpacePrefix + memberClusterName
executionSpace := executionSpacePrefix + clusterName
return executionSpace, nil
}
// GetMemberClusterName returns member cluster name for the given execution space
func GetMemberClusterName(executionSpaceName string) (string, error) {
// GetClusterName returns member cluster name for the given execution space
func GetClusterName(executionSpaceName string) (string, error) {
if !strings.HasPrefix(executionSpaceName, executionSpacePrefix) {
return "", fmt.Errorf("the execution space name is in wrong format")
}

View File

@ -4,7 +4,7 @@ import "testing"
func TestGenerateExecutionSpaceName(t *testing.T) {
type args struct {
memberClusterName string
clusterName string
}
tests := []struct {
name string
@ -13,19 +13,19 @@ func TestGenerateExecutionSpaceName(t *testing.T) {
wantErr bool
}{
{name: "normal cluster name",
args: args{memberClusterName: "member-cluster-normal"},
args: args{clusterName: "member-cluster-normal"},
want: "karmada-es-member-cluster-normal",
wantErr: false,
},
{name: "empty member cluster name",
args: args{memberClusterName: ""},
args: args{clusterName: ""},
want: "",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GenerateExecutionSpaceName(tt.args.memberClusterName)
got, err := GenerateExecutionSpaceName(tt.args.clusterName)
if (err != nil) != tt.wantErr {
t.Errorf("GenerateExecutionSpaceName() error = %v, wantErr %v", err, tt.wantErr)
return
@ -37,7 +37,7 @@ func TestGenerateExecutionSpaceName(t *testing.T) {
}
}
func TestGetMemberClusterName(t *testing.T) {
func TestGetClusterName(t *testing.T) {
type args struct {
executionSpaceName string
}
@ -65,13 +65,13 @@ func TestGetMemberClusterName(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetMemberClusterName(tt.args.executionSpaceName)
got, err := GetClusterName(tt.args.executionSpaceName)
if (err != nil) != tt.wantErr {
t.Errorf("GetMemberClusterName() error = %v, wantErr %v", err, tt.wantErr)
t.Errorf("GetClusterName() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetMemberClusterName() got = %v, want %v", got, tt.want)
t.Errorf("GetClusterName() got = %v, want %v", got, tt.want)
}
})
}

View File

@ -108,7 +108,7 @@ func getClusterNameFromLabel(resource *unstructured.Unstructured) (string, error
klog.Errorf("Failed to get executionNamespace from label %s", value)
return "", err
}
cluster, err := names.GetMemberClusterName(executionNamespace)
cluster, err := names.GetClusterName(executionNamespace)
if err != nil {
klog.Errorf("Failed to get member cluster name by %s. Error: %v.", value, err)
return "", err

View File

@ -34,14 +34,14 @@ var _ = ginkgo.Describe("[propagation policy] propagation policy functionality t
ginkgo.It("propagate deployment", func() {
policyName := rand.String(6)
policyNamespace := testNamespace // keep policy in the same namespace with the resource
policy := helper.NewPolicyWithSingleDeployment(policyNamespace, policyName, deployment, memberClusterNames)
policy := helper.NewPolicyWithSingleDeployment(policyNamespace, policyName, deployment, clusterNames)
ginkgo.By(fmt.Sprintf("creating policy: %s/%s", policyNamespace, policyName), func() {
_, err = karmadaClient.PropagationstrategyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if resource appear in member clusters", func() {
for _, cluster := range memberClusters {
for _, cluster := range clusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
@ -61,7 +61,7 @@ var _ = ginkgo.Describe("[propagation policy] propagation policy functionality t
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if resource disappear from member clusters", func() {
for _, cluster := range memberClusters {
for _, cluster := range clusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())

View File

@ -34,19 +34,19 @@ const (
// pollTimeout defines the time after which the poll operation times out.
pollTimeout = 60 * time.Second
// MinimumMemberCluster represents the minimum number of member clusters to run E2E test.
MinimumMemberCluster = 2
// MinimumCluster represents the minimum number of member clusters to run E2E test.
MinimumCluster = 2
)
var (
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
memberClusters []*clusterapi.Cluster
memberClusterNames []string
memberClusterClients []*util.ClusterClient
testNamespace = fmt.Sprintf("karmada-e2e-%s", rand.String(3))
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
clusters []*clusterapi.Cluster
clusterNames []string
clusterClients []*util.ClusterClient
testNamespace = fmt.Sprintf("karmada-e2e-%s", rand.String(3))
)
func TestE2E(t *testing.T) {
@ -68,37 +68,37 @@ var _ = ginkgo.BeforeSuite(func() {
karmadaClient, err = karmada.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
memberClusters, err = fetchMemberClusters(karmadaClient)
clusters, err = fetchClusters(karmadaClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
var meetRequirement bool
meetRequirement, err = isMemberClusterMeetRequirements(memberClusters)
meetRequirement, err = isClusterMeetRequirements(clusters)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
for _, cluster := range memberClusters {
memberClusterNames = append(memberClusterNames, cluster.Name)
for _, cluster := range clusters {
clusterNames = append(clusterNames, cluster.Name)
memberClusterClient, err := util.NewClusterClientSet(cluster, kubeClient)
clusterClient, err := util.NewClusterClientSet(cluster, kubeClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
memberClusterClients = append(memberClusterClients, memberClusterClient)
clusterClients = append(clusterClients, clusterClient)
}
gomega.Expect(memberClusterNames).Should(gomega.HaveLen(len(memberClusters)))
gomega.Expect(clusterNames).Should(gomega.HaveLen(len(clusters)))
err = setupTestNamespace(testNamespace, kubeClient, memberClusterClients)
err = setupTestNamespace(testNamespace, kubeClient, clusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteSetupTimeOut.Seconds())
var _ = ginkgo.AfterSuite(func() {
// cleanup all namespaces we created both in control plane and member clusters.
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
err := cleanupTestNamespace(testNamespace, kubeClient, memberClusterClients)
err := cleanupTestNamespace(testNamespace, kubeClient, clusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteTeardownTimeOut.Seconds())
// fetchMemberClusters will fetch all member clusters we have.
func fetchMemberClusters(client karmada.Interface) ([]*clusterapi.Cluster, error) {
clusterList, err := client.MemberclusterV1alpha1().MemberClusters().List(context.TODO(), v1.ListOptions{})
// fetchClusters will fetch all member clusters we have.
func fetchClusters(client karmada.Interface) ([]*clusterapi.Cluster, error) {
clusterList, err := client.ClusterV1alpha1().Clusters().List(context.TODO(), v1.ListOptions{})
if err != nil {
return nil, err
}
@ -112,16 +112,16 @@ func fetchMemberClusters(client karmada.Interface) ([]*clusterapi.Cluster, error
return clusters, nil
}
// isMemberClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isMemberClusterMeetRequirements(clusters []*clusterapi.Cluster) (bool, error) {
// isClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isClusterMeetRequirements(clusters []*clusterapi.Cluster) (bool, error) {
// check if member cluster number meets requirements
if len(clusters) < MinimumMemberCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumMemberCluster, len(clusters))
if len(clusters) < MinimumCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumCluster, len(clusters))
}
// check if all member cluster status is ready
for _, cluster := range clusters {
if !util.IsMemberClusterReady(&cluster.Status) {
if !util.IsClusterReady(&cluster.Status) {
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
}
}
@ -132,14 +132,14 @@ func isMemberClusterMeetRequirements(clusters []*clusterapi.Cluster) (bool, erro
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, memberClusterClients []*util.ClusterClient) error {
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
namespaceObj := helper.NewNamespace(namespace)
_, err := util.CreateNamespace(kubeClient, namespaceObj)
if err != nil {
return err
}
for _, clusterClient := range memberClusterClients {
for _, clusterClient := range clusterClients {
_, err = util.CreateNamespace(clusterClient.KubeClient, namespaceObj)
if err != nil {
return err
@ -150,13 +150,13 @@ func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, membe
}
// cleanupTestNamespace will remove the namespace we setup before for the whole testing.
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, memberClusterClients []*util.ClusterClient) error {
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
err := util.DeleteNamespace(kubeClient, namespace)
if err != nil {
return err
}
for _, clusterClient := range memberClusterClients {
for _, clusterClient := range clusterClients {
err = util.DeleteNamespace(clusterClient.KubeClient, namespace)
if err != nil {
return err
@ -167,7 +167,7 @@ func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, mem
}
func getClusterClient(clusterName string) kubernetes.Interface {
for _, client := range memberClusterClients {
for _, client := range clusterClients {
if client.ClusterName == clusterName {
return client.KubeClient
}