fix: allow suspended work to get deleted

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi 2024-08-14 23:55:55 -04:00
parent d18afa1579
commit b27e669f48
No known key found for this signature in database
GPG Key ID: A82919F366EE978E
2 changed files with 40 additions and 14 deletions

View File

@ -101,16 +101,6 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
return controllerruntime.Result{}, err
}
if err := c.updateWorkDispatchingConditionIfNeeded(ctx, work); err != nil {
klog.Errorf("Failed to update work condition type %s. err is %v", workv1alpha1.WorkDispatching, err)
return controllerruntime.Result{}, err
}
if helper.IsWorkSuspendDispatching(work) {
klog.V(4).Infof("Skip syncing work(%s/%s) for cluster(%s) as work dispatch is suspended.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, nil
}
if !work.DeletionTimestamp.IsZero() {
// Abort deleting workload if cluster is unready when unjoining cluster, otherwise the unjoin process will be failed.
if util.IsClusterReady(&cluster.Status) {
@ -126,6 +116,16 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
return c.removeFinalizer(ctx, work)
}
if err := c.updateWorkDispatchingConditionIfNeeded(ctx, work); err != nil {
klog.Errorf("Failed to update work condition type %s. err is %v", workv1alpha1.WorkDispatching, err)
return controllerruntime.Result{}, err
}
if helper.IsWorkSuspendDispatching(work) {
klog.V(4).Infof("Skip syncing work(%s/%s) for cluster(%s) as work dispatch is suspended.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, nil
}
if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("Stop syncing the work(%s/%s) for the cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)

View File

@ -25,8 +25,11 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
controllerruntime "sigs.k8s.io/controller-runtime"
@ -35,9 +38,10 @@ import (
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
"github.com/karmada-io/karmada/pkg/events"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
testhelper "github.com/karmada-io/karmada/test/helper"
)
@ -96,6 +100,18 @@ func TestExecutionController_Reconcile(t *testing.T) {
})
}),
},
{
name: "suspend work with deletion timestamp is deleted",
ns: "karmada-es-cluster",
expectRes: controllerruntime.Result{},
existErr: false,
work: newWork(func(work *workv1alpha1.Work) {
now := metav1.Now()
work.SetDeletionTimestamp(&now)
work.SetFinalizers([]string{util.ExecutionControllerFinalizer})
work.Spec.SuspendDispatching = ptr.To(true)
}),
},
}
for _, tt := range tests {
@ -133,11 +149,21 @@ func TestExecutionController_Reconcile(t *testing.T) {
func newController(work *workv1alpha1.Work, eventRecorder *record.FakeRecorder) Controller {
cluster := newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue)
pod := testhelper.NewPod("default", "test")
client := fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster, work, pod).WithStatusSubresource(work).Build()
restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion})
restMapper.Add(corev1.SchemeGroupVersion.WithKind(pod.Kind), meta.RESTScopeNamespace)
dynamicClientSet := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, pod)
informerManager := genericmanager.GetInstance()
informerManager.ForCluster(cluster.Name, dynamicClientSet, 0).Lister(corev1.SchemeGroupVersion.WithResource("pods"))
informerManager.Start(cluster.Name)
informerManager.WaitForCacheSync(cluster.Name)
return Controller{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster, work).WithStatusSubresource(work).Build(),
InformerManager: genericmanager.GetInstance(),
PredicateFunc: helper.NewClusterPredicateOnAgent("test"),
Client: client,
InformerManager: informerManager,
EventRecorder: eventRecorder,
RESTMapper: restMapper,
ObjectWatcher: objectwatcher.NewObjectWatcher(client, restMapper, util.NewClusterDynamicClientSetForAgent, nil),
}
}