Merge pull request #4838 from a7i/work-suspend
feat: cluster-level resource propagation pause and resume capabilities
This commit is contained in:
commit
920baceafd
|
@ -19118,6 +19118,10 @@
|
|||
"schedulerName": {
|
||||
"description": "SchedulerName represents which scheduler to proceed the scheduling. If specified, the policy will be dispatched by specified scheduler. If not specified, the policy will be dispatched by default scheduler.",
|
||||
"type": "string"
|
||||
},
|
||||
"suspension": {
|
||||
"description": "Suspension declares the policy for suspending different aspects of propagation. nil means no suspension. no default values.",
|
||||
"$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Suspension"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -19256,6 +19260,34 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.SuspendClusters": {
|
||||
"description": "SuspendClusters represents a group of clusters that should be suspended from propagating. Note: No plan to introduce the label selector or field selector to select clusters yet, as it would make the system unpredictable.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clusterNames": {
|
||||
"description": "ClusterNames is the list of clusters to be selected.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Suspension": {
|
||||
"description": "Suspension defines the policy for suspending different aspects of propagation.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"dispatching": {
|
||||
"description": "Dispatching controls whether dispatching should be suspended. nil means not suspend, no default value, only accepts 'true'. Note: true means stop propagating to all clusters. Can not co-exist with DispatchingOnClusters which is used to suspend particular clusters.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"dispatchingOnClusters": {
|
||||
"description": "DispatchingOnClusters declares a list of clusters to which the dispatching should be suspended. Note: Can not co-exist with Dispatching which is used to suspend all.",
|
||||
"$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.SuspendClusters"
|
||||
}
|
||||
}
|
||||
},
|
||||
"com.github.karmada-io.karmada.pkg.apis.remedy.v1alpha1.ClusterAffinity": {
|
||||
"description": "ClusterAffinity represents the filter to select clusters.",
|
||||
"type": "object",
|
||||
|
@ -19720,6 +19752,10 @@
|
|||
"description": "WorkSpec defines the desired state of Work.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"suspendDispatching": {
|
||||
"description": "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"workload": {
|
||||
"description": "Workload represents the manifest workload to be deployed on managed cluster.",
|
||||
"default": {},
|
||||
|
@ -20171,6 +20207,10 @@
|
|||
"schedulerName": {
|
||||
"description": "SchedulerName represents which scheduler to proceed the scheduling. It inherits directly from the associated PropagationPolicy(or ClusterPropagationPolicy).",
|
||||
"type": "string"
|
||||
},
|
||||
"suspension": {
|
||||
"description": "Suspension declares the policy for suspending different aspects of propagation. nil means no suspension. no default values.",
|
||||
"$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Suspension"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -806,6 +806,31 @@ spec:
|
|||
If specified, the policy will be dispatched by specified scheduler.
|
||||
If not specified, the policy will be dispatched by default scheduler.
|
||||
type: string
|
||||
suspension:
|
||||
description: |-
|
||||
Suspension declares the policy for suspending different aspects of propagation.
|
||||
nil means no suspension. no default values.
|
||||
properties:
|
||||
dispatching:
|
||||
description: |-
|
||||
Dispatching controls whether dispatching should be suspended.
|
||||
nil means not suspend, no default value, only accepts 'true'.
|
||||
Note: true means stop propagating to all clusters. Can not co-exist
|
||||
with DispatchingOnClusters which is used to suspend particular clusters.
|
||||
type: boolean
|
||||
dispatchingOnClusters:
|
||||
description: |-
|
||||
DispatchingOnClusters declares a list of clusters to which the dispatching
|
||||
should be suspended.
|
||||
Note: Can not co-exist with Dispatching which is used to suspend all.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- resourceSelectors
|
||||
type: object
|
||||
|
|
|
@ -803,6 +803,31 @@ spec:
|
|||
If specified, the policy will be dispatched by specified scheduler.
|
||||
If not specified, the policy will be dispatched by default scheduler.
|
||||
type: string
|
||||
suspension:
|
||||
description: |-
|
||||
Suspension declares the policy for suspending different aspects of propagation.
|
||||
nil means no suspension. no default values.
|
||||
properties:
|
||||
dispatching:
|
||||
description: |-
|
||||
Dispatching controls whether dispatching should be suspended.
|
||||
nil means not suspend, no default value, only accepts 'true'.
|
||||
Note: true means stop propagating to all clusters. Can not co-exist
|
||||
with DispatchingOnClusters which is used to suspend particular clusters.
|
||||
type: boolean
|
||||
dispatchingOnClusters:
|
||||
description: |-
|
||||
DispatchingOnClusters declares a list of clusters to which the dispatching
|
||||
should be suspended.
|
||||
Note: Can not co-exist with Dispatching which is used to suspend all.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- resourceSelectors
|
||||
type: object
|
||||
|
|
|
@ -1187,6 +1187,31 @@ spec:
|
|||
SchedulerName represents which scheduler to proceed the scheduling.
|
||||
It inherits directly from the associated PropagationPolicy(or ClusterPropagationPolicy).
|
||||
type: string
|
||||
suspension:
|
||||
description: |-
|
||||
Suspension declares the policy for suspending different aspects of propagation.
|
||||
nil means no suspension. no default values.
|
||||
properties:
|
||||
dispatching:
|
||||
description: |-
|
||||
Dispatching controls whether dispatching should be suspended.
|
||||
nil means not suspend, no default value, only accepts 'true'.
|
||||
Note: true means stop propagating to all clusters. Can not co-exist
|
||||
with DispatchingOnClusters which is used to suspend particular clusters.
|
||||
type: boolean
|
||||
dispatchingOnClusters:
|
||||
description: |-
|
||||
DispatchingOnClusters declares a list of clusters to which the dispatching
|
||||
should be suspended.
|
||||
Note: Can not co-exist with Dispatching which is used to suspend all.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
|
|
|
@ -1187,6 +1187,31 @@ spec:
|
|||
SchedulerName represents which scheduler to proceed the scheduling.
|
||||
It inherits directly from the associated PropagationPolicy(or ClusterPropagationPolicy).
|
||||
type: string
|
||||
suspension:
|
||||
description: |-
|
||||
Suspension declares the policy for suspending different aspects of propagation.
|
||||
nil means no suspension. no default values.
|
||||
properties:
|
||||
dispatching:
|
||||
description: |-
|
||||
Dispatching controls whether dispatching should be suspended.
|
||||
nil means not suspend, no default value, only accepts 'true'.
|
||||
Note: true means stop propagating to all clusters. Can not co-exist
|
||||
with DispatchingOnClusters which is used to suspend particular clusters.
|
||||
type: boolean
|
||||
dispatchingOnClusters:
|
||||
description: |-
|
||||
DispatchingOnClusters declares a list of clusters to which the dispatching
|
||||
should be suspended.
|
||||
Note: Can not co-exist with Dispatching which is used to suspend all.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
|
|
|
@ -54,6 +54,12 @@ spec:
|
|||
spec:
|
||||
description: Spec represents the desired behavior of Work.
|
||||
properties:
|
||||
suspendDispatching:
|
||||
description: |-
|
||||
SuspendDispatching controls whether dispatching should
|
||||
be suspended, nil means not suspend.
|
||||
Note: true means stop propagating to all clusters.
|
||||
type: boolean
|
||||
workload:
|
||||
description: Workload represents the manifest workload to be deployed
|
||||
on managed cluster.
|
||||
|
|
|
@ -176,6 +176,11 @@ type PropagationSpec struct {
|
|||
// +kubebuilder:validation:Enum=Lazy
|
||||
// +optional
|
||||
ActivationPreference ActivationPreference `json:"activationPreference,omitempty"`
|
||||
|
||||
// Suspension declares the policy for suspending different aspects of propagation.
|
||||
// nil means no suspension. no default values.
|
||||
// +optional
|
||||
Suspension *Suspension `json:"suspension,omitempty"`
|
||||
}
|
||||
|
||||
// ResourceSelector the resources will be selected.
|
||||
|
@ -210,6 +215,31 @@ type FieldSelector struct {
|
|||
MatchExpressions []corev1.NodeSelectorRequirement `json:"matchExpressions,omitempty"`
|
||||
}
|
||||
|
||||
// Suspension defines the policy for suspending different aspects of propagation.
|
||||
type Suspension struct {
|
||||
// Dispatching controls whether dispatching should be suspended.
|
||||
// nil means not suspend, no default value, only accepts 'true'.
|
||||
// Note: true means stop propagating to all clusters. Can not co-exist
|
||||
// with DispatchingOnClusters which is used to suspend particular clusters.
|
||||
// +optional
|
||||
Dispatching *bool `json:"dispatching,omitempty"`
|
||||
|
||||
// DispatchingOnClusters declares a list of clusters to which the dispatching
|
||||
// should be suspended.
|
||||
// Note: Can not co-exist with Dispatching which is used to suspend all.
|
||||
// +optional
|
||||
DispatchingOnClusters *SuspendClusters `json:"dispatchingOnClusters,omitempty"`
|
||||
}
|
||||
|
||||
// SuspendClusters represents a group of clusters that should be suspended from propagating.
|
||||
// Note: No plan to introduce the label selector or field selector to select clusters yet, as it
|
||||
// would make the system unpredictable.
|
||||
type SuspendClusters struct {
|
||||
// ClusterNames is the list of clusters to be selected.
|
||||
// +optional
|
||||
ClusterNames []string `json:"clusterNames,omitempty"`
|
||||
}
|
||||
|
||||
// PurgeMode represents that how to deal with the legacy applications on the
|
||||
// cluster from which the application is migrated.
|
||||
type PurgeMode string
|
||||
|
|
|
@ -838,6 +838,11 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) {
|
|||
*out = new(FailoverBehavior)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Suspension != nil {
|
||||
in, out := &in.Suspension, &out.Suspension
|
||||
*out = new(Suspension)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -970,3 +975,50 @@ func (in *StaticClusterWeight) DeepCopy() *StaticClusterWeight {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SuspendClusters) DeepCopyInto(out *SuspendClusters) {
|
||||
*out = *in
|
||||
if in.ClusterNames != nil {
|
||||
in, out := &in.ClusterNames, &out.ClusterNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendClusters.
|
||||
func (in *SuspendClusters) DeepCopy() *SuspendClusters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SuspendClusters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Suspension) DeepCopyInto(out *Suspension) {
|
||||
*out = *in
|
||||
if in.Dispatching != nil {
|
||||
in, out := &in.Dispatching, &out.Dispatching
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DispatchingOnClusters != nil {
|
||||
in, out := &in.DispatchingOnClusters, &out.DispatchingOnClusters
|
||||
*out = new(SuspendClusters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
|
||||
func (in *Suspension) DeepCopy() *Suspension {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Suspension)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -57,6 +57,12 @@ type Work struct {
|
|||
type WorkSpec struct {
|
||||
// Workload represents the manifest workload to be deployed on managed cluster.
|
||||
Workload WorkloadTemplate `json:"workload,omitempty"`
|
||||
|
||||
// SuspendDispatching controls whether dispatching should
|
||||
// be suspended, nil means not suspend.
|
||||
// Note: true means stop propagating to all clusters.
|
||||
// +optional
|
||||
SuspendDispatching *bool `json:"suspendDispatching,omitempty"`
|
||||
}
|
||||
|
||||
// WorkloadTemplate represents the manifest workload to be deployed on managed cluster.
|
||||
|
|
|
@ -381,6 +381,11 @@ func (in *WorkList) DeepCopyObject() runtime.Object {
|
|||
func (in *WorkSpec) DeepCopyInto(out *WorkSpec) {
|
||||
*out = *in
|
||||
in.Workload.DeepCopyInto(&out.Workload)
|
||||
if in.SuspendDispatching != nil {
|
||||
in, out := &in.SuspendDispatching, &out.SuspendDispatching
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -146,6 +146,11 @@ type ResourceBindingSpec struct {
|
|||
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
|
||||
// +optional
|
||||
RescheduleTriggeredAt *metav1.Time `json:"rescheduleTriggeredAt,omitempty"`
|
||||
|
||||
// Suspension declares the policy for suspending different aspects of propagation.
|
||||
// nil means no suspension. no default values.
|
||||
// +optional
|
||||
Suspension *policyv1alpha1.Suspension `json:"suspension,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectReference contains enough information to locate the referenced object inside current cluster.
|
||||
|
|
|
@ -348,6 +348,11 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) {
|
|||
in, out := &in.RescheduleTriggeredAt, &out.RescheduleTriggeredAt
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Suspension != nil {
|
||||
in, out := &in.Suspension, &out.Suspension
|
||||
*out = new(v1alpha1.Suspension)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
|
@ -45,6 +46,7 @@ func ensureWork(
|
|||
var requiredByBindingSnapshot []workv1alpha2.BindingSnapshot
|
||||
var replicas int32
|
||||
var conflictResolutionInBinding policyv1alpha1.ConflictResolution
|
||||
var suspension *policyv1alpha1.Suspension
|
||||
switch scope {
|
||||
case apiextensionsv1.NamespaceScoped:
|
||||
bindingObj := binding.(*workv1alpha2.ResourceBinding)
|
||||
|
@ -53,6 +55,7 @@ func ensureWork(
|
|||
placement = bindingObj.Spec.Placement
|
||||
replicas = bindingObj.Spec.Replicas
|
||||
conflictResolutionInBinding = bindingObj.Spec.ConflictResolution
|
||||
suspension = bindingObj.Spec.Suspension
|
||||
case apiextensionsv1.ClusterScoped:
|
||||
bindingObj := binding.(*workv1alpha2.ClusterResourceBinding)
|
||||
targetClusters = bindingObj.Spec.Clusters
|
||||
|
@ -60,6 +63,7 @@ func ensureWork(
|
|||
placement = bindingObj.Spec.Placement
|
||||
replicas = bindingObj.Spec.Replicas
|
||||
conflictResolutionInBinding = bindingObj.Spec.ConflictResolution
|
||||
suspension = bindingObj.Spec.Suspension
|
||||
}
|
||||
|
||||
targetClusters = mergeTargetClusters(targetClusters, requiredByBindingSnapshot)
|
||||
|
@ -128,7 +132,9 @@ func ensureWork(
|
|||
Annotations: annotations,
|
||||
}
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c, workMeta, clonedWorkload); err != nil {
|
||||
suspendDispatching := shouldSuspendDispatching(suspension, targetCluster)
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c, workMeta, clonedWorkload, &suspendDispatching); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -260,3 +266,21 @@ func divideReplicasByJobCompletions(workload *unstructured.Unstructured, cluster
|
|||
func needReviseReplicas(replicas int32, placement *policyv1alpha1.Placement) bool {
|
||||
return replicas > 0 && placement != nil && placement.ReplicaSchedulingType() == policyv1alpha1.ReplicaSchedulingTypeDivided
|
||||
}
|
||||
|
||||
func shouldSuspendDispatching(suspension *policyv1alpha1.Suspension, targetCluster workv1alpha2.TargetCluster) bool {
|
||||
if suspension == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
suspendDispatching := ptr.Deref(suspension.Dispatching, false)
|
||||
|
||||
if !suspendDispatching && suspension.DispatchingOnClusters != nil {
|
||||
for _, cluster := range suspension.DispatchingOnClusters.ClusterNames {
|
||||
if cluster == targetCluster.Name {
|
||||
suspendDispatching = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return suspendDispatching
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
|
@ -316,3 +317,65 @@ func Test_mergeConflictResolution(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_shouldSuspendDispatching(t *testing.T) {
|
||||
type args struct {
|
||||
suspension *policyv1alpha1.Suspension
|
||||
targetCluster workv1alpha2.TargetCluster
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "false for nil suspension",
|
||||
args: args{},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "false for nil dispatching",
|
||||
args: args{
|
||||
suspension: &policyv1alpha1.Suspension{Dispatching: nil},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "false for not suspension",
|
||||
args: args{
|
||||
suspension: &policyv1alpha1.Suspension{Dispatching: ptr.To(false)},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "true for suspension",
|
||||
args: args{
|
||||
suspension: &policyv1alpha1.Suspension{Dispatching: ptr.To(true)},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "true for matching cluster",
|
||||
args: args{
|
||||
suspension: &policyv1alpha1.Suspension{DispatchingOnClusters: &policyv1alpha1.SuspendClusters{ClusterNames: []string{"clusterA"}}},
|
||||
targetCluster: workv1alpha2.TargetCluster{Name: "clusterA"},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "false for mismatched cluster",
|
||||
args: args{
|
||||
suspension: &policyv1alpha1.Suspension{DispatchingOnClusters: &policyv1alpha1.SuspendClusters{ClusterNames: []string{"clusterB"}}},
|
||||
targetCluster: workv1alpha2.TargetCluster{Name: "clusterA"},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := shouldSuspendDispatching(tt.args.suspension, tt.args.targetCluster); got != tt.want {
|
||||
t.Errorf("shouldSuspendDispatching() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -93,6 +94,11 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
|
|||
return controllerruntime.Result{}, err
|
||||
}
|
||||
|
||||
if ptr.Deref(work.Spec.SuspendDispatching, false) {
|
||||
klog.V(4).Infof("Skip syncing work(%s/%s) for cluster(%s) as work dispatch is suspended.", work.Namespace, work.Name, cluster.Name)
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
if !work.DeletionTimestamp.IsZero() {
|
||||
// Abort deleting workload if cluster is unready when unjoining cluster, otherwise the unjoin process will be failed.
|
||||
if util.IsClusterReady(&cluster.Status) {
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
Copyright 2024 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
func TestExecutionController_Reconcile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
c Controller
|
||||
work *workv1alpha1.Work
|
||||
ns string
|
||||
expectRes controllerruntime.Result
|
||||
existErr bool
|
||||
}{
|
||||
{
|
||||
name: "work dispatching is suspended, no error, no apply",
|
||||
c: newController(newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue)),
|
||||
work: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work",
|
||||
Namespace: "karmada-es-cluster",
|
||||
},
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
SuspendDispatching: ptr.To(true),
|
||||
},
|
||||
Status: workv1alpha1.WorkStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: workv1alpha1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ns: "karmada-es-cluster",
|
||||
expectRes: controllerruntime.Result{},
|
||||
existErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := controllerruntime.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: "work",
|
||||
Namespace: tt.ns,
|
||||
},
|
||||
}
|
||||
|
||||
if err := tt.c.Client.Create(context.Background(), tt.work); err != nil {
|
||||
t.Fatalf("Failed to create cluster: %v", err)
|
||||
}
|
||||
|
||||
res, err := tt.c.Reconcile(context.Background(), req)
|
||||
assert.Equal(t, tt.expectRes, res)
|
||||
if tt.existErr {
|
||||
assert.NotEmpty(t, err)
|
||||
} else {
|
||||
assert.Empty(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newController(objects ...client.Object) Controller {
|
||||
return Controller{
|
||||
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(objects...).Build(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
PredicateFunc: helper.NewClusterPredicateOnAgent("test"),
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name string, clusterType string, clusterStatus metav1.ConditionStatus) *clusterv1alpha1.Cluster {
|
||||
return &clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{},
|
||||
Status: clusterv1alpha1.ClusterStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: clusterType,
|
||||
Status: clusterStatus,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -183,7 +183,7 @@ func (c *SyncController) buildWorks(quota *policyv1alpha1.FederatedResourceQuota
|
|||
},
|
||||
}
|
||||
|
||||
err = helper.CreateOrUpdateWork(c.Client, objectMeta, resourceQuotaObj)
|
||||
err = helper.CreateOrUpdateWork(c.Client, objectMeta, resourceQuotaObj, nil)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
|
|
@ -493,7 +493,7 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
|||
return err
|
||||
}
|
||||
|
||||
if err := helper.CreateOrUpdateWork(c, workMeta, endpointSlice); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c, workMeta, endpointSlice, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -380,7 +380,7 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
|||
return err
|
||||
}
|
||||
|
||||
if err := helper.CreateOrUpdateWork(c, workMeta, endpointSlice); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c, workMeta, endpointSlice, nil); err != nil {
|
||||
klog.Errorf("Failed to create or update work(%s/%s), Error: %v", workMeta.Namespace, workMeta.Name, err)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -393,7 +393,7 @@ func (c *EndpointsliceDispatchController) ensureEndpointSliceWork(mcs *networkin
|
|||
klog.Errorf("Failed to convert typed object to unstructured object, error is: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := helper.CreateOrUpdateWork(c.Client, workMeta, unstructuredEPS); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c.Client, workMeta, unstructuredEPS, nil); err != nil {
|
||||
klog.Errorf("Failed to dispatch EndpointSlice %s/%s from %s to cluster %s:%v",
|
||||
work.GetNamespace(), work.GetName(), providerCluster, consumerCluster, err)
|
||||
return err
|
||||
|
|
|
@ -256,7 +256,7 @@ func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(mcs *networkingv
|
|||
// 5. make sure service exist
|
||||
svc := &corev1.Service{}
|
||||
err = c.Client.Get(context.Background(), types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}, svc)
|
||||
// If the Service are deleted, the Service's ResourceBinding will be cleaned by GC
|
||||
// If the Service is deleted, the Service's ResourceBinding will be cleaned by GC
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get service(%s/%s):%v", mcs.Namespace, mcs.Name, err)
|
||||
return err
|
||||
|
@ -309,7 +309,7 @@ func (c *MCSController) propagateMultiClusterService(mcs *networkingv1alpha1.Mul
|
|||
klog.Errorf("Failed to convert MultiClusterService(%s/%s) to unstructured object, err is %v", mcs.Namespace, mcs.Name, err)
|
||||
return err
|
||||
}
|
||||
if err = helper.CreateOrUpdateWork(c, workMeta, mcsObj); err != nil {
|
||||
if err = helper.CreateOrUpdateWork(c, workMeta, mcsObj, nil); err != nil {
|
||||
klog.Errorf("Failed to create or update MultiClusterService(%s/%s) work in the given member cluster %s, err is %v",
|
||||
mcs.Namespace, mcs.Name, clusterName, err)
|
||||
return err
|
||||
|
@ -403,6 +403,7 @@ func (c *MCSController) propagateService(ctx context.Context, mcs *networkingv1a
|
|||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
bindingCopy.Spec.Resource = binding.Spec.Resource
|
||||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.Suspension = binding.Spec.Suspension
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -157,7 +157,7 @@ func (c *Controller) buildWorks(namespace *corev1.Namespace, clusters []clusterv
|
|||
Annotations: annotations,
|
||||
}
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c.Client, objectMeta, clonedNamespaced); err != nil {
|
||||
if err = helper.CreateOrUpdateWork(c.Client, objectMeta, clonedNamespaced, nil); err != nil {
|
||||
ch <- fmt.Errorf("sync namespace(%s) to cluster(%s) failed due to: %v", clonedNamespaced.GetName(), cluster.GetName(), err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -237,7 +237,7 @@ func (c *Controller) buildWorks(cluster *clusterv1alpha1.Cluster, obj *unstructu
|
|||
},
|
||||
}
|
||||
|
||||
if err := helper.CreateOrUpdateWork(c.Client, objectMeta, obj); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c.Client, objectMeta, obj, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -500,6 +500,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
bindingCopy.Spec.Failover = binding.Spec.Failover
|
||||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.Suspension = binding.Spec.Suspension
|
||||
excludeClusterPolicy(bindingCopy.Labels)
|
||||
return nil
|
||||
})
|
||||
|
@ -594,6 +595,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
bindingCopy.Spec.Failover = binding.Spec.Failover
|
||||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.Suspension = binding.Spec.Suspension
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
|
@ -639,6 +641,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
bindingCopy.Spec.Failover = binding.Spec.Failover
|
||||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.Suspension = binding.Spec.Suspension
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
|
@ -765,6 +768,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
Placement: &policySpec.Placement,
|
||||
Failover: policySpec.Failover,
|
||||
ConflictResolution: policySpec.ConflictResolution,
|
||||
Suspension: policySpec.Suspension,
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
|
@ -809,6 +813,7 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst
|
|||
Placement: &policySpec.Placement,
|
||||
Failover: policySpec.Failover,
|
||||
ConflictResolution: policySpec.ConflictResolution,
|
||||
Suspension: policySpec.Suspension,
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
|
|
|
@ -128,6 +128,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
|||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SpreadConstraint": schema_pkg_apis_policy_v1alpha1_SpreadConstraint(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.StaticClusterAssignment": schema_pkg_apis_policy_v1alpha1_StaticClusterAssignment(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.StaticClusterWeight": schema_pkg_apis_policy_v1alpha1_StaticClusterWeight(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SuspendClusters": schema_pkg_apis_policy_v1alpha1_SuspendClusters(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension": schema_pkg_apis_policy_v1alpha1_Suspension(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterAffinity": schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterConditionRequirement": schema_pkg_apis_remedy_v1alpha1_ClusterConditionRequirement(ref),
|
||||
"github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.DecisionMatch": schema_pkg_apis_remedy_v1alpha1_DecisionMatch(ref),
|
||||
|
@ -4870,12 +4872,18 @@ func schema_pkg_apis_policy_v1alpha1_PropagationSpec(ref common.ReferenceCallbac
|
|||
Format: "",
|
||||
},
|
||||
},
|
||||
"suspension": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Suspension declares the policy for suspending different aspects of propagation. nil means no suspension. no default values.",
|
||||
Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"resourceSelectors"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ResourceSelector"},
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ResourceSelector", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5105,6 +5113,62 @@ func schema_pkg_apis_policy_v1alpha1_StaticClusterWeight(ref common.ReferenceCal
|
|||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_policy_v1alpha1_SuspendClusters(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "SuspendClusters represents a group of clusters that should be suspended from propagating. Note: No plan to introduce the label selector or field selector to select clusters yet, as it would make the system unpredictable.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"clusterNames": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ClusterNames is the list of clusters to be selected.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_policy_v1alpha1_Suspension(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Suspension defines the policy for suspending different aspects of propagation.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"dispatching": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Dispatching controls whether dispatching should be suspended. nil means not suspend, no default value, only accepts 'true'. Note: true means stop propagating to all clusters. Can not co-exist with DispatchingOnClusters which is used to suspend particular clusters.",
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"dispatchingOnClusters": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DispatchingOnClusters declares a list of clusters to which the dispatching should be suspended. Note: Can not co-exist with Dispatching which is used to suspend all.",
|
||||
Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SuspendClusters"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SuspendClusters"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
@ -6312,6 +6376,13 @@ func schema_pkg_apis_work_v1alpha1_WorkSpec(ref common.ReferenceCallback) common
|
|||
Ref: ref("github.com/karmada-io/karmada/pkg/apis/work/v1alpha1.WorkloadTemplate"),
|
||||
},
|
||||
},
|
||||
"suspendDispatching": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.",
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -7028,12 +7099,18 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb
|
|||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
|
||||
},
|
||||
},
|
||||
"suspension": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Suspension declares the policy for suspending different aspects of propagation. nil means no suspension. no default values.",
|
||||
Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"resource"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
)
|
||||
|
||||
// CreateOrUpdateWork creates a Work object if not exist, or updates if it already exists.
|
||||
func CreateOrUpdateWork(client client.Client, workMeta metav1.ObjectMeta, resource *unstructured.Unstructured) error {
|
||||
func CreateOrUpdateWork(client client.Client, workMeta metav1.ObjectMeta, resource *unstructured.Unstructured, suspendDispatching *bool) error {
|
||||
if workMeta.Labels[util.PropagationInstruction] != util.PropagationInstructionSuppressed {
|
||||
resource = resource.DeepCopy()
|
||||
// set labels
|
||||
|
@ -61,6 +61,7 @@ func CreateOrUpdateWork(client client.Client, workMeta metav1.ObjectMeta, resour
|
|||
work := &workv1alpha1.Work{
|
||||
ObjectMeta: workMeta,
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
SuspendDispatching: suspendDispatching,
|
||||
Workload: workv1alpha1.WorkloadTemplate{
|
||||
Manifests: []workv1alpha1.Manifest{
|
||||
{
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
|
@ -1018,3 +1019,68 @@ var _ = ginkgo.Describe("[Delete] clusterPropagation testing", func() {
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Suspend dispatching of ClusterPropagationPolicy
|
||||
var _ = ginkgo.Describe("[Suspend] clusterPropagation testing", func() {
|
||||
var policy *policyv1alpha1.ClusterPropagationPolicy
|
||||
var clusterRole *rbacv1.ClusterRole
|
||||
var targetMember string
|
||||
var resourceBindingName string
|
||||
var workName string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
targetMember = framework.ClusterNames()[0]
|
||||
policyName := clusterRoleNamePrefix + rand.String(RandomStrLength)
|
||||
clusterRoleName := fmt.Sprintf("system:test-%s", policyName)
|
||||
|
||||
clusterRole = testhelper.NewClusterRole(clusterRoleName, nil)
|
||||
resourceBindingName = names.GenerateBindingName(clusterRole.Kind, clusterRole.Name)
|
||||
workName = names.GenerateWorkName(clusterRole.Kind, clusterRole.Name, clusterRole.Namespace)
|
||||
policy = testhelper.NewClusterPropagationPolicy(policyName, []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: clusterRole.APIVersion,
|
||||
Kind: clusterRole.Kind,
|
||||
Name: clusterRole.Name,
|
||||
}}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{targetMember},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.CreateClusterRole(kubeClient, clusterRole)
|
||||
ginkgo.DeferCleanup(func() {
|
||||
framework.RemoveClusterRole(kubeClient, clusterRole.Name)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("suspend the ClusterPropagationPolicy dispatching", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
policy.Spec.Suspension = &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
}
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, policy)
|
||||
ginkgo.DeferCleanup(func() {
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("suspends ClusterResourceBinding", func() {
|
||||
framework.WaitClusterResourceBindingFitWith(karmadaClient, resourceBindingName, func(binding *workv1alpha2.ClusterResourceBinding) bool {
|
||||
return binding.Spec.Suspension != nil && ptr.Deref(binding.Spec.Suspension.Dispatching, false)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("suspends Work", func() {
|
||||
esName := names.GenerateExecutionSpaceName(targetMember)
|
||||
gomega.Eventually(func() bool {
|
||||
work, err := karmadaClient.WorkV1alpha1().Works(esName).Get(context.TODO(), workName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return work != nil && ptr.Deref(work.Spec.SuspendDispatching, false)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -1110,3 +1110,61 @@ var _ = ginkgo.Describe("[AdvancedPropagation] propagation testing", func() {
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() {
|
||||
var policy *policyv1alpha1.PropagationPolicy
|
||||
var deployment *appsv1.Deployment
|
||||
var targetMember string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
targetMember = framework.ClusterNames()[0]
|
||||
policyNamespace := testNamespace
|
||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
||||
deployment = testhelper.NewDeployment(testNamespace, policyName+"01")
|
||||
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: deployment.APIVersion,
|
||||
Kind: deployment.Kind,
|
||||
Name: deployment.Name,
|
||||
}}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{targetMember},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
ginkgo.DeferCleanup(func() {
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("suspend the PropagationPolicy dispatching", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
policy.Spec.Suspension = &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
}
|
||||
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
})
|
||||
|
||||
ginkgo.It("suspends ResourceBinding", func() {
|
||||
framework.WaitResourceBindingFitWith(karmadaClient, deployment.Namespace, names.GenerateBindingName(deployment.Kind, deployment.Name), func(binding *workv1alpha2.ResourceBinding) bool {
|
||||
return binding.Spec.Suspension != nil && ptr.Deref(binding.Spec.Suspension.Dispatching, false)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("suspends Work", func() {
|
||||
workName := names.GenerateWorkName(deployment.Kind, deployment.Name, deployment.Namespace)
|
||||
esName := names.GenerateExecutionSpaceName(targetMember)
|
||||
gomega.Eventually(func() bool {
|
||||
work, err := karmadaClient.WorkV1alpha1().Works(esName).Get(context.TODO(), workName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return work != nil && ptr.Deref(work.Spec.SuspendDispatching, false)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue