refactor membercluster controller and execution controller with controller-runtime (#55)
This commit is contained in:
parent
28ecd3b0ff
commit
75d912aa5c
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/component-base/logs"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/huawei-cloudnative/karmada/pkg/controllers/execution"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/membercluster"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/policy"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/util"
|
||||
karmadaclientset "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
|
@ -134,23 +134,23 @@ func Run(opts *options.Options, stopChan <-chan struct{}) error {
|
|||
}
|
||||
|
||||
func startControllers(opts *options.Options, stopChan <-chan struct{}) {
|
||||
controllerConfig := &util.ControllerConfig{
|
||||
HeadClusterConfig: opts.KubeConfig,
|
||||
}
|
||||
|
||||
if err := membercluster.StartMemberClusterController(controllerConfig, stopChan); err != nil {
|
||||
klog.Fatalf("Failed to start member cluster controller. error: %v", err)
|
||||
}
|
||||
|
||||
if err := execution.StartExecutionController(controllerConfig, stopChan); err != nil {
|
||||
klog.Fatalf("Failed to start execution controller. error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupControllers(mgr controllerruntime.Manager) {
|
||||
resetConfig := mgr.GetConfig()
|
||||
dynamicClientSet := dynamic.NewForConfigOrDie(resetConfig)
|
||||
karmadaClient := karmadaclientset.NewForConfigOrDie(resetConfig)
|
||||
kubeClientSet := kubernetes.NewForConfigOrDie(resetConfig)
|
||||
|
||||
MemberClusterController := &membercluster.Controller{
|
||||
Client: mgr.GetClient(),
|
||||
KubeClientSet: kubeClientSet,
|
||||
EventRecorder: mgr.GetEventRecorderFor(membercluster.ControllerName),
|
||||
}
|
||||
if err := MemberClusterController.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatalf("Failed to setup membercluster controller: %v", err)
|
||||
}
|
||||
|
||||
policyController := &policy.PropagationPolicyController{
|
||||
Client: mgr.GetClient(),
|
||||
|
@ -171,4 +171,15 @@ func setupControllers(mgr controllerruntime.Manager) {
|
|||
if err := bindingController.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatalf("Failed to setup binding controller: %v", err)
|
||||
}
|
||||
|
||||
executionController := &execution.Controller{
|
||||
Client: mgr.GetClient(),
|
||||
KubeClientSet: kubeClientSet,
|
||||
KarmadaClient: karmadaClient,
|
||||
EventRecorder: mgr.GetEventRecorderFor(execution.ControllerName),
|
||||
}
|
||||
if err := executionController.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatalf("Failed to setup execution controller: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -3,289 +3,97 @@ package execution
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/huawei-cloudnative/karmada/pkg/apis/membercluster/v1alpha1"
|
||||
pagationstrategy "github.com/huawei-cloudnative/karmada/pkg/apis/propagationstrategy/v1alpha1"
|
||||
propagationstrategy "github.com/huawei-cloudnative/karmada/pkg/apis/propagationstrategy/v1alpha1"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/util"
|
||||
clientset "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
|
||||
karmadakubecheme "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned/scheme"
|
||||
informers "github.com/huawei-cloudnative/karmada/pkg/generated/informers/externalversions"
|
||||
listers "github.com/huawei-cloudnative/karmada/pkg/generated/listers/propagationstrategy/v1alpha1"
|
||||
karmadaclientset "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerAgentName = "execution-controller"
|
||||
finalizer = "karmada.io/execution-controller"
|
||||
memberClusterNS = "karmada-cluster"
|
||||
// ControllerName is the controller name that will be used when reporting events.
|
||||
ControllerName = "execution-controller"
|
||||
finalizer = "karmada.io/execution-controller"
|
||||
memberClusterNS = "karmada-cluster"
|
||||
)
|
||||
|
||||
// Controller is the controller implementation for PropagationWork resources
|
||||
// Controller is to sync PropagationWork.
|
||||
type Controller struct {
|
||||
// karmadaClientSet is the clientset for our own API group.
|
||||
karmadaClientSet clientset.Interface
|
||||
|
||||
// kubeClientSet is a standard kubernetes clientset.
|
||||
kubeClientSet kubernetes.Interface
|
||||
|
||||
karmadaInformerFactory informers.SharedInformerFactory
|
||||
propagationWorkLister listers.PropagationWorkLister
|
||||
propagationWorkSynced cache.InformerSynced
|
||||
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
eventRecorder record.EventRecorder
|
||||
client.Client // used to operate PropagationWork resources.
|
||||
KubeClientSet kubernetes.Interface // used to get kubernetes resources.
|
||||
KarmadaClient karmadaclientset.Interface // used to get MemberCluster resources.
|
||||
EventRecorder record.EventRecorder
|
||||
}
|
||||
|
||||
// StartExecutionController starts a new execution controller.
|
||||
func StartExecutionController(config *util.ControllerConfig, stopChan <-chan struct{}) error {
|
||||
controller, err := newExecutionController(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Infof("Starting execution controller")
|
||||
// Reconcile performs a full reconciliation for the object referred to by the Request.
|
||||
// The Controller will requeue the Request to be processed again if an error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
|
||||
klog.V(4).Infof("Reconciling PropagationWork %s", req.NamespacedName.String())
|
||||
|
||||
go wait.Until(func() {
|
||||
if err := controller.Run(2, stopChan); err != nil {
|
||||
klog.Errorf("controller exit unexpected! will restart later, controller: %s, error: %v", controllerAgentName, err)
|
||||
}
|
||||
}, 1*time.Second, stopChan)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newExecutionController returns a new controller.
|
||||
func newExecutionController(config *util.ControllerConfig) (*Controller, error) {
|
||||
headClusterConfig := rest.CopyConfig(config.HeadClusterConfig)
|
||||
kubeClientSet := kubernetes.NewForConfigOrDie(headClusterConfig)
|
||||
|
||||
karmadaClientSet := clientset.NewForConfigOrDie(headClusterConfig)
|
||||
karmadaInformerFactory := informers.NewSharedInformerFactory(karmadaClientSet, 0)
|
||||
PropagationWorkInformer := karmadaInformerFactory.Propagationstrategy().V1alpha1().PropagationWorks()
|
||||
|
||||
// Add karmada types to the default Kubernetes Scheme so Events can be logged for karmada types.
|
||||
utilruntime.Must(karmadakubecheme.AddToScheme(scheme.Scheme))
|
||||
|
||||
// Create event broadcaster
|
||||
klog.V(1).Infof("Creating event broadcaster for %s", controllerAgentName)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClientSet.CoreV1().Events("")})
|
||||
|
||||
controller := &Controller{
|
||||
karmadaClientSet: karmadaClientSet,
|
||||
kubeClientSet: kubeClientSet,
|
||||
karmadaInformerFactory: karmadaInformerFactory,
|
||||
propagationWorkLister: PropagationWorkInformer.Lister(),
|
||||
propagationWorkSynced: PropagationWorkInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}),
|
||||
}
|
||||
|
||||
klog.Info("Setting up event handlers")
|
||||
PropagationWorkInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
klog.Infof("Received add event. just add to queue.")
|
||||
controller.enqueueEventResource(obj)
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
klog.Infof("Received update event. just add to queue.")
|
||||
controller.enqueueEventResource(new)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
klog.Infof("Received delete event. just add to queue.")
|
||||
controller.enqueueEventResource(obj)
|
||||
},
|
||||
})
|
||||
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
// Run will set up the event handlers for types we are interested in, as well
|
||||
// as syncing informer caches and starting workers. It will block until stopCh
|
||||
// is closed, at which point it will shutdown the workqueue and wait for
|
||||
// workers to finish processing their current work items.
|
||||
func (c *Controller) Run(workerNumber int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
klog.Infof("Run controller: %s", controllerAgentName)
|
||||
c.karmadaInformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.propagationWorkSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Infof("Starting workers for controller. worker number: %d, controller: %s", workerNumber, controllerAgentName)
|
||||
for i := 0; i < workerNumber; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Controller will block here until stopCh is closed.
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runWorker is a long-running function that will continually call the
|
||||
// processNextWorkItem function in order to read and process a message on the
|
||||
// workqueue.
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling the syncHandler.
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// PropagateStrategy resource to be synced.
|
||||
if err := c.syncHandler(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the PropagateStrategy resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) syncHandler(key string) error {
|
||||
// Convert the namespace/name string into a distinct namespace and name
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the resource with this namespace/name
|
||||
propagationWork, err := c.propagationWorkLister.PropagationWorks(namespace).Get(name)
|
||||
if err != nil {
|
||||
// The PropagationWork resource may no longer exist, in which case we stop
|
||||
// processing.
|
||||
work := &propagationstrategy.PropagationWork{}
|
||||
if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil {
|
||||
// The resource may no longer exist, in which case we stop processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("PropagationWork '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
if propagationWork.GetDeletionTimestamp() != nil {
|
||||
applied := c.isResourceApplied(&propagationWork.Status)
|
||||
if !work.DeletionTimestamp.IsZero() {
|
||||
applied := c.isResourceApplied(&work.Status)
|
||||
if applied {
|
||||
err = c.deletePropagationWork(propagationWork)
|
||||
err := c.deletePropagationWork(work)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to delete propagationwork %v, err is %v", propagationWork.Name, err)
|
||||
return err
|
||||
klog.Errorf("Failed to delete propagationWork %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
}
|
||||
return c.removeFinalizer(propagationWork)
|
||||
return c.removeFinalizer(work)
|
||||
}
|
||||
|
||||
return c.syncWork(work)
|
||||
}
|
||||
|
||||
// SetupWithManager creates a controller and register to controller manager.
|
||||
func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&propagationstrategy.PropagationWork{}).Complete(c)
|
||||
}
|
||||
|
||||
func (c *Controller) syncWork(propagationWork *propagationstrategy.PropagationWork) (controllerruntime.Result, error) {
|
||||
// ensure finalizer
|
||||
updated, err := c.ensureFinalizer(propagationWork)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to ensure finalizer for propagationwork %q", propagationWork.Name)
|
||||
return err
|
||||
klog.Errorf("Failed to ensure finalizer for propagationWork %q, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
} else if updated {
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
err = c.dispatchPropagationWork(propagationWork)
|
||||
if err != nil {
|
||||
return err
|
||||
klog.Errorf("Failed to dispatch propagationWork %q, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
klog.Infof("Sync propagationWork: %s/%s", propagationWork.Namespace, propagationWork.Name)
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
// enqueueFoo takes a resource and converts it into a namespace/name
|
||||
// string which is then put onto the work queue. This method should *not* be
|
||||
// passed resources of any type other than PropagationWork.
|
||||
func (c *Controller) enqueueEventResource(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
// IsMemberClusterReady checking readiness for the given member cluster
|
||||
// isMemberClusterReady checking readiness for the given member cluster
|
||||
func (c *Controller) isMemberClusterReady(clusterStatus *v1alpha1.MemberClusterStatus) bool {
|
||||
for _, condition := range clusterStatus.Conditions {
|
||||
if condition.Type == "ClusterReady" {
|
||||
|
@ -297,8 +105,8 @@ func (c *Controller) isMemberClusterReady(clusterStatus *v1alpha1.MemberClusterS
|
|||
return false
|
||||
}
|
||||
|
||||
// IsResourceExist checking weather resource exist in host cluster
|
||||
func (c *Controller) isResourceApplied(propagationWorkStatus *pagationstrategy.PropagationWorkStatus) bool {
|
||||
// isResourceApplied checking weather resource has been dispatched to member cluster or not
|
||||
func (c *Controller) isResourceApplied(propagationWorkStatus *propagationstrategy.PropagationWorkStatus) bool {
|
||||
for _, condition := range propagationWorkStatus.Conditions {
|
||||
if condition.Type == "Applied" {
|
||||
if condition.Status == v1.ConditionTrue {
|
||||
|
@ -309,22 +117,21 @@ func (c *Controller) isResourceApplied(propagationWorkStatus *pagationstrategy.P
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) deletePropagationWork(propagationWork *pagationstrategy.PropagationWork) error {
|
||||
func (c *Controller) deletePropagationWork(propagationWork *propagationstrategy.PropagationWork) error {
|
||||
// TODO(RainbowMango): retrieve member cluster from the local cache instead of a real request to API server.
|
||||
membercluster, err := c.karmadaClientSet.MemberclusterV1alpha1().MemberClusters(memberClusterNS).Get(context.TODO(), propagationWork.Namespace, v1.GetOptions{})
|
||||
memberCluster, err := c.KarmadaClient.MemberclusterV1alpha1().MemberClusters(memberClusterNS).Get(context.TODO(), propagationWork.Namespace, v1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get status of the given member cluster")
|
||||
klog.Errorf("Failed to get status of the given member cluster %s", propagationWork.Namespace)
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.isMemberClusterReady(&membercluster.Status) {
|
||||
klog.Errorf("The status of the given member cluster is unready")
|
||||
return fmt.Errorf("cluster %s not ready, requeuing operation until cluster state is ready", membercluster.Name)
|
||||
if !c.isMemberClusterReady(&memberCluster.Status) {
|
||||
klog.Errorf("The status of the given member cluster %s is unready", memberCluster.Name)
|
||||
return fmt.Errorf("cluster %s not ready, requeuing operation until cluster state is ready", memberCluster.Name)
|
||||
}
|
||||
|
||||
memberclusterDynamicClient, err := util.NewClusterDynamicClientSet(membercluster, c.kubeClientSet, membercluster.Spec.SecretRef.Namespace)
|
||||
memberClusterDynamicClient, err := util.NewClusterDynamicClientSet(memberCluster, c.KubeClientSet)
|
||||
if err != nil {
|
||||
c.eventRecorder.Eventf(membercluster, corev1.EventTypeWarning, "MalformedClusterConfig", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -336,9 +143,9 @@ func (c *Controller) deletePropagationWork(propagationWork *pagationstrategy.Pro
|
|||
return err
|
||||
}
|
||||
|
||||
err = c.deleteResource(memberclusterDynamicClient, workload)
|
||||
err = c.deleteResource(memberClusterDynamicClient, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete resource in the given member cluster, err is %v", err)
|
||||
klog.Errorf("Failed to delete resource in the given member cluster %v, err is %v", memberCluster.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -346,22 +153,22 @@ func (c *Controller) deletePropagationWork(propagationWork *pagationstrategy.Pro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) dispatchPropagationWork(propagationWork *pagationstrategy.PropagationWork) error {
|
||||
func (c *Controller) dispatchPropagationWork(propagationWork *propagationstrategy.PropagationWork) error {
|
||||
// TODO(RainbowMango): retrieve member cluster from the local cache instead of a real request to API server.
|
||||
membercluster, err := c.karmadaClientSet.MemberclusterV1alpha1().MemberClusters(memberClusterNS).Get(context.TODO(), propagationWork.Namespace, v1.GetOptions{})
|
||||
memberCluster, err := c.KarmadaClient.MemberclusterV1alpha1().MemberClusters(memberClusterNS).Get(context.TODO(), propagationWork.Namespace, v1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get status of the given member cluster")
|
||||
klog.Errorf("Failed to get status of the given member cluster %s", propagationWork.Namespace)
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.isMemberClusterReady(&membercluster.Status) {
|
||||
klog.Errorf("The status of the given member cluster is unready")
|
||||
return fmt.Errorf("cluster %s not ready, requeuing operation until cluster state is ready", membercluster.Name)
|
||||
if !c.isMemberClusterReady(&memberCluster.Status) {
|
||||
klog.Errorf("The status of the given member cluster %s is unready", memberCluster.Name)
|
||||
return fmt.Errorf("cluster %s is not ready, requeuing operation until cluster state is ready", memberCluster.Name)
|
||||
}
|
||||
|
||||
err = c.syncToMemberClusters(membercluster, propagationWork)
|
||||
err = c.syncToMemberClusters(memberCluster, propagationWork)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to delete propagationwork %v, err is %v", propagationWork.Name, err)
|
||||
klog.Errorf("Failed to dispatch propagationWork %v, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -369,10 +176,9 @@ func (c *Controller) dispatchPropagationWork(propagationWork *pagationstrategy.P
|
|||
}
|
||||
|
||||
// syncToMemberClusters ensures that the state of the given object is synchronized to member clusters.
|
||||
func (c *Controller) syncToMemberClusters(membercluster *v1alpha1.MemberCluster, propagationWork *pagationstrategy.PropagationWork) error {
|
||||
memberclusterDynamicClient, err := util.NewClusterDynamicClientSet(membercluster, c.kubeClientSet, membercluster.Spec.SecretRef.Namespace)
|
||||
func (c *Controller) syncToMemberClusters(membercluster *v1alpha1.MemberCluster, propagationWork *propagationstrategy.PropagationWork) error {
|
||||
memberClusterDynamicClient, err := util.NewClusterDynamicClientSet(membercluster, c.KubeClientSet)
|
||||
if err != nil {
|
||||
c.eventRecorder.Eventf(membercluster, corev1.EventTypeWarning, "MalformedClusterConfig", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -386,21 +192,21 @@ func (c *Controller) syncToMemberClusters(membercluster *v1alpha1.MemberCluster,
|
|||
|
||||
applied := c.isResourceApplied(&propagationWork.Status)
|
||||
if applied {
|
||||
err = c.updateResource(memberclusterDynamicClient, workload)
|
||||
err = c.updateResource(memberClusterDynamicClient, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update resource in the given member cluster, err is %v", err)
|
||||
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", membercluster.Name, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.createResource(memberclusterDynamicClient, workload)
|
||||
err = c.createResource(memberClusterDynamicClient, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create resource in the given member cluster,err is %v", err)
|
||||
klog.Errorf("Failed to create resource in the given member cluster %s, err is %v", membercluster.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
err := c.updateAppliedCondition(propagationWork)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update applied status for given propagationwork %v, err is %v", propagationWork.Name, err)
|
||||
klog.Errorf("Failed to update applied status for given propagationWork %v, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -409,20 +215,21 @@ func (c *Controller) syncToMemberClusters(membercluster *v1alpha1.MemberCluster,
|
|||
}
|
||||
|
||||
// deleteResource delete resource in member cluster
|
||||
func (c *Controller) deleteResource(memberclusterDynamicClient *util.DynamicClusterClient, workload *unstructured.Unstructured) error {
|
||||
func (c *Controller) deleteResource(memberClusterDynamicClient *util.DynamicClusterClient, workload *unstructured.Unstructured) error {
|
||||
// start delete resource in member cluster
|
||||
// todo: get GVR by RESTMappings
|
||||
groupVersion, err := schema.ParseGroupVersion(workload.GetAPIVersion())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
return fmt.Errorf("can't parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
workload.GetName(), util.ResourceKindMap[workload.GetKind()], err)
|
||||
}
|
||||
dynamicResource := schema.GroupVersionResource{Group: groupVersion.Group, Version: groupVersion.Version, Resource: util.ResourceKindMap[workload.GetKind()]}
|
||||
err = memberclusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Delete(context.TODO(), workload.GetName(), v1.DeleteOptions{})
|
||||
err = memberClusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Delete(context.TODO(), workload.GetName(), v1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
klog.Infof("Failed to delete resource %v, err is %v ", workload.GetName(), err)
|
||||
klog.Errorf("Failed to delete resource %v, err is %v ", workload.GetName(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -431,9 +238,10 @@ func (c *Controller) deleteResource(memberclusterDynamicClient *util.DynamicClus
|
|||
// createResource create resource in member cluster
|
||||
func (c *Controller) createResource(memberclusterDynamicClient *util.DynamicClusterClient, workload *unstructured.Unstructured) error {
|
||||
// start create resource in member cluster
|
||||
// todo: get GVR by RESTMapping
|
||||
groupVersion, err := schema.ParseGroupVersion(workload.GetAPIVersion())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
return fmt.Errorf("can't parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
workload.GetName(), util.ResourceKindMap[workload.GetKind()], err)
|
||||
}
|
||||
dynamicResource := schema.GroupVersionResource{Group: groupVersion.Group, Version: groupVersion.Version, Resource: util.ResourceKindMap[workload.GetKind()]}
|
||||
|
@ -442,7 +250,7 @@ func (c *Controller) createResource(memberclusterDynamicClient *util.DynamicClus
|
|||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Infof("Failed to create resource %v, err is %v ", workload.GetName(), err)
|
||||
klog.Errorf("Failed to create resource %v, err is %v ", workload.GetName(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -451,38 +259,42 @@ func (c *Controller) createResource(memberclusterDynamicClient *util.DynamicClus
|
|||
// updateResource update resource in member cluster
|
||||
func (c *Controller) updateResource(memberclusterDynamicClient *util.DynamicClusterClient, workload *unstructured.Unstructured) error {
|
||||
// start update resource in member cluster
|
||||
// todo: get GVR by RESTMapping
|
||||
groupVersion, err := schema.ParseGroupVersion(workload.GetAPIVersion())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
return fmt.Errorf("can't parse groupVersion[namespace: %s name: %s kind: %s]. error: %v", workload.GetNamespace(),
|
||||
workload.GetName(), util.ResourceKindMap[workload.GetKind()], err)
|
||||
}
|
||||
dynamicResource := schema.GroupVersionResource{Group: groupVersion.Group, Version: groupVersion.Version, Resource: util.ResourceKindMap[workload.GetKind()]}
|
||||
_, err = memberclusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Update(context.TODO(), workload, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Infof("Failed to update resource %v, err is %v ", workload.GetName(), err)
|
||||
klog.Errorf("Failed to update resource %v, err is %v ", workload.GetName(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeFinalizer remove finalizer from the given propagationWork
|
||||
func (c *Controller) removeFinalizer(propagationWork *pagationstrategy.PropagationWork) error {
|
||||
func (c *Controller) removeFinalizer(propagationWork *propagationstrategy.PropagationWork) (controllerruntime.Result, error) {
|
||||
accessor, err := meta.Accessor(propagationWork)
|
||||
if err != nil {
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
finalizers := sets.NewString(accessor.GetFinalizers()...)
|
||||
if !finalizers.Has(finalizer) {
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
finalizers.Delete(finalizer)
|
||||
accessor.SetFinalizers(finalizers.List())
|
||||
_, err = c.karmadaClientSet.PropagationstrategyV1alpha1().PropagationWorks(propagationWork.Namespace).Update(context.TODO(), propagationWork, v1.UpdateOptions{})
|
||||
return err
|
||||
err = c.Client.Update(context.TODO(), propagationWork)
|
||||
if err != nil {
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
// ensureFinalizer ensure finalizer for the given PropagationWork
|
||||
func (c *Controller) ensureFinalizer(propagationWork *pagationstrategy.PropagationWork) (bool, error) {
|
||||
func (c *Controller) ensureFinalizer(propagationWork *propagationstrategy.PropagationWork) (bool, error) {
|
||||
accessor, err := meta.Accessor(propagationWork)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -493,12 +305,12 @@ func (c *Controller) ensureFinalizer(propagationWork *pagationstrategy.Propagati
|
|||
}
|
||||
finalizers.Insert(finalizer)
|
||||
accessor.SetFinalizers(finalizers.List())
|
||||
_, err = c.karmadaClientSet.PropagationstrategyV1alpha1().PropagationWorks(propagationWork.Namespace).Update(context.TODO(), propagationWork, v1.UpdateOptions{})
|
||||
err = c.Client.Update(context.TODO(), propagationWork)
|
||||
return true, err
|
||||
}
|
||||
|
||||
// updateAppliedCondition update the Applied condition for the given PropagationWork
|
||||
func (c *Controller) updateAppliedCondition(propagationWork *pagationstrategy.PropagationWork) error {
|
||||
func (c *Controller) updateAppliedCondition(propagationWork *propagationstrategy.PropagationWork) error {
|
||||
currentTime := v1.Now()
|
||||
propagationWorkApplied := "Applied"
|
||||
appliedSuccess := "AppliedSuccess"
|
||||
|
@ -511,6 +323,6 @@ func (c *Controller) updateAppliedCondition(propagationWork *pagationstrategy.Pr
|
|||
LastTransitionTime: currentTime,
|
||||
}
|
||||
propagationWork.Status.Conditions = append(propagationWork.Status.Conditions, newPropagationWorkAppliedCondition)
|
||||
_, err := c.karmadaClientSet.PropagationstrategyV1alpha1().PropagationWorks(propagationWork.Namespace).Update(context.TODO(), propagationWork, v1.UpdateOptions{})
|
||||
err := c.Client.Update(context.TODO(), propagationWork)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -10,305 +10,108 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/huawei-cloudnative/karmada/pkg/apis/membercluster/v1alpha1"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/util"
|
||||
clientset "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
|
||||
karmadakubecheme "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned/scheme"
|
||||
informers "github.com/huawei-cloudnative/karmada/pkg/generated/informers/externalversions"
|
||||
listers "github.com/huawei-cloudnative/karmada/pkg/generated/listers/membercluster/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerAgentName = "membercluster-controller"
|
||||
// ControllerName is the controller name that will be used when reporting events.
|
||||
ControllerName = "membercluster-controller"
|
||||
finalizer = "karmada.io/membercluster-controller"
|
||||
executionSpaceLabelKey = "karmada.io/executionspace"
|
||||
executionSpaceLabelValue = ""
|
||||
)
|
||||
|
||||
// Controller is the controller implementation for membercluster resources
|
||||
// Controller is to sync MemberCluster.
|
||||
type Controller struct {
|
||||
// karmadaClientSet is the clientset for our own API group.
|
||||
karmadaClientSet clientset.Interface
|
||||
|
||||
// kubeClientSet is a standard kubernetes clientset.
|
||||
kubeClientSet kubernetes.Interface
|
||||
|
||||
karmadaInformerFactory informers.SharedInformerFactory
|
||||
memberclusterLister listers.MemberClusterLister
|
||||
memberclusterSynced cache.InformerSynced
|
||||
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
eventRecorder record.EventRecorder
|
||||
client.Client // used to operate MemberCluster resources.
|
||||
KubeClientSet kubernetes.Interface // used to get kubernetes resources.
|
||||
EventRecorder record.EventRecorder
|
||||
}
|
||||
|
||||
// StartMemberClusterController starts a new cluster controller.
|
||||
func StartMemberClusterController(config *util.ControllerConfig, stopChan <-chan struct{}) error {
|
||||
controller, err := newMemberClusterController(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Infof("Starting member cluster controller")
|
||||
// Reconcile performs a full reconciliation for the object referred to by the Request.
|
||||
// The Controller will requeue the Request to be processed again if an error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
|
||||
klog.V(4).Infof("Reconciling memberCluster %s", req.NamespacedName.String())
|
||||
|
||||
go wait.Until(func() {
|
||||
if err := controller.Run(2, stopChan); err != nil {
|
||||
klog.Errorf("controller exit unexpected! will restart later, controller: %s, error: %v", controllerAgentName, err)
|
||||
}
|
||||
}, 1*time.Second, stopChan)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newMemberClusterController returns a new controller.
|
||||
func newMemberClusterController(config *util.ControllerConfig) (*Controller, error) {
|
||||
|
||||
headClusterConfig := rest.CopyConfig(config.HeadClusterConfig)
|
||||
kubeClientSet := kubernetes.NewForConfigOrDie(headClusterConfig)
|
||||
|
||||
karmadaClientSet := clientset.NewForConfigOrDie(headClusterConfig)
|
||||
karmadaInformerFactory := informers.NewSharedInformerFactory(karmadaClientSet, 10*time.Second)
|
||||
memberclusterInformer := karmadaInformerFactory.Membercluster().V1alpha1().MemberClusters()
|
||||
|
||||
// Add karmada types to the default Kubernetes Scheme so Events can be logged for karmada types.
|
||||
utilruntime.Must(karmadakubecheme.AddToScheme(scheme.Scheme))
|
||||
|
||||
// Create event broadcaster
|
||||
klog.V(1).Infof("Creating event broadcaster for %s", controllerAgentName)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClientSet.CoreV1().Events("")})
|
||||
|
||||
controller := &Controller{
|
||||
karmadaClientSet: karmadaClientSet,
|
||||
kubeClientSet: kubeClientSet,
|
||||
karmadaInformerFactory: karmadaInformerFactory,
|
||||
memberclusterLister: memberclusterInformer.Lister(),
|
||||
memberclusterSynced: memberclusterInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}),
|
||||
}
|
||||
|
||||
klog.Info("Setting up event handlers")
|
||||
memberclusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
controller.enqueueEventResource(obj)
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
controller.enqueueEventResource(new)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
controller.enqueueEventResource(obj)
|
||||
},
|
||||
})
|
||||
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
// Run will set up the event handlers for types we are interested in, as well
|
||||
// as syncing informer caches and starting workers. It will block until stopCh
|
||||
// is closed, at which point it will shutdown the workqueue and wait for
|
||||
// workers to finish processing their current work items.
|
||||
func (c *Controller) Run(workerNumber int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
klog.Infof("Run controller: %s", controllerAgentName)
|
||||
c.karmadaInformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.memberclusterSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Infof("Starting workers for controller. worker number: %d, controller: %s", workerNumber, controllerAgentName)
|
||||
for i := 0; i < workerNumber; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Controller will block here until stopCh is closed.
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runWorker is a long-running function that will continually call the
|
||||
// processNextWorkItem function in order to read and process a message on the
|
||||
// workqueue.
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling the syncHandler.
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// PropagateStrategy resource to be synced.
|
||||
if err := c.syncHandler(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the PropagateStrategy resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) syncHandler(key string) error {
|
||||
// Convert the namespace/name string into a distinct namespace and name
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the resource with this namespace/name
|
||||
membercluster, err := c.memberclusterLister.MemberClusters(namespace).Get(name)
|
||||
if err != nil {
|
||||
// The membercluster resource may no longer exist, in which case we stop
|
||||
// processing.
|
||||
memberCluster := &v1alpha1.MemberCluster{}
|
||||
if err := c.Client.Get(context.TODO(), req.NamespacedName, memberCluster); err != nil {
|
||||
// The resource may no longer exist, in which case we stop processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("membercluster '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
if membercluster.GetDeletionTimestamp() != nil {
|
||||
return c.removeMemberCluster(membercluster)
|
||||
if !memberCluster.DeletionTimestamp.IsZero() {
|
||||
return c.removeMemberCluster(memberCluster)
|
||||
}
|
||||
|
||||
err = c.createExecutionSpace(membercluster)
|
||||
return c.syncMemberCluster(memberCluster)
|
||||
}
|
||||
|
||||
// SetupWithManager creates a controller and register to controller manager.
|
||||
func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.MemberCluster{}).Complete(c)
|
||||
}
|
||||
|
||||
func (c *Controller) syncMemberCluster(memberCluster *v1alpha1.MemberCluster) (controllerruntime.Result, error) {
|
||||
// create execution space
|
||||
err := c.createExecutionSpace(memberCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
// ensure finalizer
|
||||
updated, err := c.ensureFinalizer(membercluster)
|
||||
updated, err := c.ensureFinalizer(memberCluster)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to ensure finalizer for membercluster %q", membercluster.Name)
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
} else if updated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a ClusterClient for the given member cluster
|
||||
memberclusterClient, err := util.NewClusterClientSet(membercluster, c.kubeClientSet, membercluster.Spec.SecretRef.Namespace)
|
||||
if err != nil {
|
||||
c.eventRecorder.Eventf(membercluster, corev1.EventTypeWarning, "MalformedClusterConfig", err.Error())
|
||||
return err
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
// update status of the given member cluster
|
||||
// TODO(RainbowMango): need to check errors and return error if update status failed.
|
||||
updateIndividualClusterStatus(membercluster, c.karmadaClientSet, memberclusterClient)
|
||||
// TODO: update status of member cluster in status controller.
|
||||
updateIndividualClusterStatus(memberCluster, c.Client, c.KubeClientSet)
|
||||
|
||||
return nil
|
||||
return controllerruntime.Result{RequeueAfter: 10 * time.Second}, nil
|
||||
}
|
||||
|
||||
// enqueueFoo takes a resource and converts it into a namespace/name
|
||||
// string which is then put onto the work queue. This method should *not* be
|
||||
// passed resources of any type other than membercluster.
|
||||
func (c *Controller) enqueueEventResource(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) removeMemberCluster(membercluster *v1alpha1.MemberCluster) error {
|
||||
err := c.removeExecutionSpace(membercluster)
|
||||
func (c *Controller) removeMemberCluster(memberCluster *v1alpha1.MemberCluster) (controllerruntime.Result, error) {
|
||||
err := c.removeExecutionSpace(memberCluster)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return c.removeFinalizer(membercluster)
|
||||
return c.removeFinalizer(memberCluster)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to remove execution space %v, err is %v", membercluster.Name, err)
|
||||
return err
|
||||
klog.Errorf("Failed to remove execution space %v, err is %v", memberCluster.Name, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
// make sure the given execution space has been deleted
|
||||
existES, err := c.ensureRemoveExecutionSpace(membercluster)
|
||||
existES, err := c.ensureRemoveExecutionSpace(memberCluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check weather the execution space exist in the given member cluster or not, error is: %v", err)
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
} else if existES {
|
||||
return fmt.Errorf("the execution space %v still exist", membercluster.Name)
|
||||
return controllerruntime.Result{Requeue: true}, fmt.Errorf("requeuing operation until the execution space %v deleted, ", memberCluster.Name)
|
||||
}
|
||||
|
||||
return c.removeFinalizer(membercluster)
|
||||
return c.removeFinalizer(memberCluster)
|
||||
}
|
||||
|
||||
// removeExecutionSpace delete the given execution space
|
||||
func (c *Controller) removeExecutionSpace(memberCluster *v1alpha1.MemberCluster) error {
|
||||
// todo: executionSpace := "karmada-es-" + memberCluster.Name
|
||||
executionSpace := memberCluster.Name
|
||||
if err := c.kubeClientSet.CoreV1().Namespaces().Delete(context.TODO(), executionSpace, v1.DeleteOptions{}); err != nil {
|
||||
if err := c.KubeClientSet.CoreV1().Namespaces().Delete(context.TODO(), executionSpace, v1.DeleteOptions{}); err != nil {
|
||||
klog.Errorf("Error while deleting namespace %s: %s", executionSpace, err)
|
||||
return err
|
||||
}
|
||||
|
@ -319,30 +122,33 @@ func (c *Controller) removeExecutionSpace(memberCluster *v1alpha1.MemberCluster)
|
|||
func (c *Controller) ensureRemoveExecutionSpace(memberCluster *v1alpha1.MemberCluster) (bool, error) {
|
||||
// todo: executionSpace := "karmada-es-" + memberCluster.Name
|
||||
executionSpace := memberCluster.Name
|
||||
_, err := c.kubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
|
||||
_, err := c.KubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
klog.Infof("Failed to get execution space %v, err is %v ", executionSpace, err)
|
||||
klog.Errorf("Failed to get execution space %v, err is %v ", executionSpace, err)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Controller) removeFinalizer(memberCluster *v1alpha1.MemberCluster) error {
|
||||
func (c *Controller) removeFinalizer(memberCluster *v1alpha1.MemberCluster) (controllerruntime.Result, error) {
|
||||
accessor, err := meta.Accessor(memberCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
finalizers := sets.NewString(accessor.GetFinalizers()...)
|
||||
if !finalizers.Has(finalizer) {
|
||||
return nil
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
finalizers.Delete(finalizer)
|
||||
accessor.SetFinalizers(finalizers.List())
|
||||
_, err = c.karmadaClientSet.MemberclusterV1alpha1().MemberClusters(memberCluster.Namespace).Update(context.TODO(), memberCluster, v1.UpdateOptions{})
|
||||
return err
|
||||
err = c.Client.Update(context.TODO(), memberCluster)
|
||||
if err != nil {
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureFinalizer(memberCluster *v1alpha1.MemberCluster) (bool, error) {
|
||||
|
@ -356,7 +162,7 @@ func (c *Controller) ensureFinalizer(memberCluster *v1alpha1.MemberCluster) (boo
|
|||
}
|
||||
finalizers.Insert(finalizer)
|
||||
accessor.SetFinalizers(finalizers.List())
|
||||
_, err = c.karmadaClientSet.MemberclusterV1alpha1().MemberClusters(memberCluster.Namespace).Update(context.TODO(), memberCluster, v1.UpdateOptions{})
|
||||
err = c.Client.Update(context.TODO(), memberCluster)
|
||||
return true, err
|
||||
}
|
||||
|
||||
|
@ -365,7 +171,7 @@ func (c *Controller) createExecutionSpace(membercluster *v1alpha1.MemberCluster)
|
|||
// todo: executionSpace := "karmada-es-" + membercluster.Name
|
||||
executionSpace := membercluster.Name
|
||||
// create member cluster execution space when member cluster joined
|
||||
_, err := c.kubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
|
||||
_, err := c.KubeClientSet.CoreV1().Namespaces().Get(context.TODO(), executionSpace, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
memberclusterES := &corev1.Namespace{
|
||||
|
@ -374,13 +180,13 @@ func (c *Controller) createExecutionSpace(membercluster *v1alpha1.MemberCluster)
|
|||
Labels: map[string]string{executionSpaceLabelKey: executionSpaceLabelValue},
|
||||
},
|
||||
}
|
||||
_, err = c.kubeClientSet.CoreV1().Namespaces().Create(context.TODO(), memberclusterES, v1.CreateOptions{})
|
||||
_, err = c.KubeClientSet.CoreV1().Namespaces().Create(context.TODO(), memberclusterES, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create execution space for membercluster %v", membercluster.Name)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
klog.V(2).Infof("Could not get %s namespace: %v", executionSpace, err)
|
||||
klog.Errorf("Could not get %s namespace: %v", executionSpace, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,11 +5,12 @@ import (
|
|||
"strings"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/huawei-cloudnative/karmada/pkg/apis/membercluster/v1alpha1"
|
||||
"github.com/huawei-cloudnative/karmada/pkg/controllers/util"
|
||||
clientset "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,13 +25,19 @@ const (
|
|||
clusterOffline = "Offline"
|
||||
)
|
||||
|
||||
func updateIndividualClusterStatus(cluster *v1alpha1.MemberCluster, hostClient clientset.Interface, clusterClient *util.ClusterClient) {
|
||||
func updateIndividualClusterStatus(cluster *v1alpha1.MemberCluster, hostClient client.Client, kubeClient kubernetes.Interface) {
|
||||
// create a ClusterClient for the given member cluster
|
||||
clusterClient, err := util.NewClusterClientSet(cluster, kubeClient)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// update the health status of member cluster
|
||||
currentClusterStatus, err := getMemberClusterHealthStatus(clusterClient)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get health status of the member cluster: %v, err is : %v", cluster.Name, err)
|
||||
cluster.Status = *currentClusterStatus
|
||||
_, err = hostClient.MemberclusterV1alpha1().MemberClusters("karmada-cluster").Update(context.TODO(), cluster, v1.UpdateOptions{})
|
||||
err = hostClient.Update(context.TODO(), cluster)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to update health status of the member cluster: %v, err is : %v", cluster.Name, err)
|
||||
return
|
||||
|
@ -39,14 +46,13 @@ func updateIndividualClusterStatus(cluster *v1alpha1.MemberCluster, hostClient c
|
|||
}
|
||||
|
||||
// update the cluster version of member cluster
|
||||
clusterVersion, err := clusterClient.KubeClient.Discovery().ServerVersion()
|
||||
currentClusterStatus, err = getKubernetesVersion(currentClusterStatus, clusterClient)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get server version of the member cluster: %v, err is : %v", cluster.Name, err)
|
||||
}
|
||||
|
||||
currentClusterStatus.KubernetesVersion = clusterVersion.GitVersion
|
||||
cluster.Status = *currentClusterStatus
|
||||
_, err = hostClient.MemberclusterV1alpha1().MemberClusters("karmada-cluster").Update(context.TODO(), cluster, v1.UpdateOptions{})
|
||||
err = hostClient.Update(context.TODO(), cluster)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to update health status of the member cluster: %v, err is : %v", cluster.Name, err)
|
||||
return
|
||||
|
@ -109,3 +115,13 @@ func getMemberClusterHealthStatus(clusterClient *util.ClusterClient) (*v1alpha1.
|
|||
|
||||
return &clusterStatus, err
|
||||
}
|
||||
|
||||
func getKubernetesVersion(currentClusterStatus *v1alpha1.MemberClusterStatus, clusterClient *util.ClusterClient) (*v1alpha1.MemberClusterStatus, error) {
|
||||
clusterVersion, err := clusterClient.KubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return currentClusterStatus, err
|
||||
}
|
||||
|
||||
currentClusterStatus.KubernetesVersion = clusterVersion.GitVersion
|
||||
return currentClusterStatus, nil
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ type DynamicClusterClient struct {
|
|||
}
|
||||
|
||||
// NewClusterClientSet returns a ClusterClient for the given member cluster.
|
||||
func NewClusterClientSet(c *v1alpha1.MemberCluster, client kubeclientset.Interface, namespace string) (*ClusterClient, error) {
|
||||
clusterConfig, err := buildMemberClusterConfig(c, client, namespace)
|
||||
func NewClusterClientSet(c *v1alpha1.MemberCluster, client kubeclientset.Interface) (*ClusterClient, error) {
|
||||
clusterConfig, err := buildMemberClusterConfig(c, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -49,8 +49,8 @@ func NewClusterClientSet(c *v1alpha1.MemberCluster, client kubeclientset.Interfa
|
|||
}
|
||||
|
||||
// NewClusterDynamicClientSet returns a dynamic client for the given member cluster.
|
||||
func NewClusterDynamicClientSet(c *v1alpha1.MemberCluster, client kubeclientset.Interface, namespace string) (*DynamicClusterClient, error) {
|
||||
clusterConfig, err := buildMemberClusterConfig(c, client, namespace)
|
||||
func NewClusterDynamicClientSet(c *v1alpha1.MemberCluster, client kubeclientset.Interface) (*DynamicClusterClient, error) {
|
||||
clusterConfig, err := buildMemberClusterConfig(c, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func NewClusterDynamicClientSet(c *v1alpha1.MemberCluster, client kubeclientset.
|
|||
return &clusterClientSet, nil
|
||||
}
|
||||
|
||||
func buildMemberClusterConfig(memberCluster *v1alpha1.MemberCluster, client kubeclientset.Interface, namespace string) (*rest.Config, error) {
|
||||
func buildMemberClusterConfig(memberCluster *v1alpha1.MemberCluster, client kubeclientset.Interface) (*rest.Config, error) {
|
||||
clusterName := memberCluster.Name
|
||||
apiEndpoint := memberCluster.Spec.APIEndpoint
|
||||
if apiEndpoint == "" {
|
||||
|
@ -74,7 +74,7 @@ func buildMemberClusterConfig(memberCluster *v1alpha1.MemberCluster, client kube
|
|||
return nil, fmt.Errorf("cluster %s does not have a secret name", clusterName)
|
||||
}
|
||||
|
||||
secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
secret, err := client.CoreV1().Secrets(memberCluster.Spec.SecretRef.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue