Merge pull request #746 from dddddai/get-member-workload-from-cache
Get member workload from cache instead of api server
This commit is contained in:
commit
d625a87783
|
@ -121,7 +121,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
RESTMapper: mgr.GetRESTMapper(),
|
||||
ObjectWatcher: objectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
|
||||
ClusterClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
InformerManager: informermanager.GetInstance(),
|
||||
}
|
||||
if err := executionController.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatalf("Failed to setup execution controller: %v", err)
|
||||
|
|
|
@ -227,7 +227,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
RESTMapper: mgr.GetRESTMapper(),
|
||||
ObjectWatcher: objectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicate(mgr),
|
||||
ClusterClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
InformerManager: informermanager.GetInstance(),
|
||||
}
|
||||
if err := executionController.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatalf("Failed to setup execution controller: %v", err)
|
||||
|
|
|
@ -20,9 +20,10 @@ import (
|
|||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/informermanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/informermanager/keys"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -37,7 +38,7 @@ type Controller struct {
|
|||
RESTMapper meta.RESTMapper
|
||||
ObjectWatcher objectwatcher.ObjectWatcher
|
||||
PredicateFunc predicate.Predicate
|
||||
ClusterClientSetFunc func(c *clusterv1alpha1.Cluster, client client.Client) (*util.DynamicClusterClient, error)
|
||||
InformerManager informermanager.MultiClusterInformerManager
|
||||
}
|
||||
|
||||
// Reconcile performs a full reconciliation for the object referred to by the Request.
|
||||
|
@ -150,11 +151,6 @@ func (c *Controller) removeFinalizer(work *workv1alpha1.Work) (controllerruntime
|
|||
|
||||
// syncToClusters ensures that the state of the given object is synchronized to member clusters.
|
||||
func (c *Controller) syncToClusters(cluster *clusterv1alpha1.Cluster, work *workv1alpha1.Work) error {
|
||||
clusterDynamicClient, err := c.ClusterClientSetFunc(cluster, c.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
syncSucceedNum := 0
|
||||
for _, manifest := range work.Spec.Workload.Manifests {
|
||||
|
@ -168,7 +164,7 @@ func (c *Controller) syncToClusters(cluster *clusterv1alpha1.Cluster, work *work
|
|||
|
||||
applied := helper.IsResourceApplied(&work.Status)
|
||||
if applied {
|
||||
err = c.tryUpdateWorkload(cluster, workload, clusterDynamicClient)
|
||||
err = c.tryUpdateWorkload(cluster, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update resource(%v/%v) in the given member cluster %s, err is %v", workload.GetNamespace(), workload.GetName(), cluster.Name, err)
|
||||
errs = append(errs, err)
|
||||
|
@ -188,7 +184,7 @@ func (c *Controller) syncToClusters(cluster *clusterv1alpha1.Cluster, work *work
|
|||
if len(errs) > 0 {
|
||||
total := len(work.Spec.Workload.Manifests)
|
||||
message := fmt.Sprintf("Failed to apply all manifests (%v/%v): %v", syncSucceedNum, total, errors.NewAggregate(errs).Error())
|
||||
err = c.updateAppliedCondition(work, metav1.ConditionFalse, "AppliedFailed", message)
|
||||
err := c.updateAppliedCondition(work, metav1.ConditionFalse, "AppliedFailed", message)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||
errs = append(errs, err)
|
||||
|
@ -196,7 +192,7 @@ func (c *Controller) syncToClusters(cluster *clusterv1alpha1.Cluster, work *work
|
|||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
err = c.updateAppliedCondition(work, metav1.ConditionTrue, "AppliedSuccessful", "Manifest has been successfully applied")
|
||||
err := c.updateAppliedCondition(work, metav1.ConditionTrue, "AppliedSuccessful", "Manifest has been successfully applied")
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||
return err
|
||||
|
@ -205,15 +201,14 @@ func (c *Controller) syncToClusters(cluster *clusterv1alpha1.Cluster, work *work
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) tryUpdateWorkload(cluster *clusterv1alpha1.Cluster, workload *unstructured.Unstructured, clusterDynamicClient *util.DynamicClusterClient) error {
|
||||
// todo: get clusterObj from cache
|
||||
dynamicResource, err := restmapper.GetGroupVersionResource(c.RESTMapper, workload.GroupVersionKind())
|
||||
func (c *Controller) tryUpdateWorkload(cluster *clusterv1alpha1.Cluster, workload *unstructured.Unstructured) error {
|
||||
fedKey, err := keys.FederatedKeyFunc(cluster.Name, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get resource(%s/%s) as mapping GVK to GVR failed: %v", workload.GetNamespace(), workload.GetName(), err)
|
||||
klog.Errorf("Failed to get FederatedKey %s, error: %v", workload.GetName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
clusterObj, err := clusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), metav1.GetOptions{})
|
||||
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
|
||||
|
|
Loading…
Reference in New Issue