no informer for sync status back when k8s resource applied failed to member cluster
Signed-off-by: pengli <justdoit.pli@gmail.com>
This commit is contained in:
parent
402d5884c9
commit
b602aaf868
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
|
@ -67,7 +68,7 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
|
|||
}
|
||||
|
||||
if !work.DeletionTimestamp.IsZero() {
|
||||
applied := c.isResourceApplied(&work.Status)
|
||||
applied := helper.IsResourceApplied(&work.Status)
|
||||
if applied {
|
||||
err := c.tryDeleteWorkload(cluster, work)
|
||||
if err != nil {
|
||||
|
@ -105,18 +106,6 @@ func (c *Controller) syncWork(cluster *v1alpha1.Cluster, work *workv1alpha1.Work
|
|||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
// isResourceApplied checking weather resource has been dispatched to member cluster or not
|
||||
func (c *Controller) isResourceApplied(workStatus *workv1alpha1.WorkStatus) bool {
|
||||
for _, condition := range workStatus.Conditions {
|
||||
if condition.Type == workv1alpha1.WorkApplied {
|
||||
if condition.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// tryDeleteWorkload tries to delete resource in the given member cluster.
|
||||
// Abort deleting when the member cluster is unready, otherwise we can't unjoin the member cluster when the member cluster is unready
|
||||
func (c *Controller) tryDeleteWorkload(cluster *v1alpha1.Cluster, work *workv1alpha1.Work) error {
|
||||
|
@ -173,7 +162,7 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *workv1alpha
|
|||
return err
|
||||
}
|
||||
|
||||
applied := c.isResourceApplied(&work.Status)
|
||||
applied := helper.IsResourceApplied(&work.Status)
|
||||
if applied {
|
||||
err = c.tryUpdateWorkload(cluster, workload, clusterDynamicClient)
|
||||
if err != nil {
|
||||
|
|
|
@ -68,6 +68,10 @@ func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerrunt
|
|||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
if !helper.IsResourceApplied(&work.Status) {
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
clusterName, err := names.GetClusterName(work.GetNamespace())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get member cluster name by %s. Error: %v.", work.GetNamespace(), err)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -146,3 +147,15 @@ func equalIdentifier(targetIdentifier *workv1alpha1.ResourceIdentifier, ordinal
|
|||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// IsResourceApplied checking weather resource has been dispatched to member cluster or not
|
||||
func IsResourceApplied(workStatus *workv1alpha1.WorkStatus) bool {
|
||||
for _, condition := range workStatus.Conditions {
|
||||
if condition.Type == workv1alpha1.WorkApplied {
|
||||
if condition.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue