Merge pull request #4007 from jwcesign/add-uid-label
feature: Add the UID of the owner resource in labels and include the details in annotations.
This commit is contained in:
commit
b852552ed1
|
@ -1,6 +1,21 @@
|
|||
package v1alpha1
|
||||
|
||||
const (
|
||||
// PropagationPolicyUIDLabel is the uid of PropagationPolicy object.
|
||||
PropagationPolicyUIDLabel = "propagationpolicy.karmada.io/uid"
|
||||
|
||||
// PropagationPolicyNamespaceAnnotation is added to objects to specify associated PropagationPolicy namespace.
|
||||
PropagationPolicyNamespaceAnnotation = "propagationpolicy.karmada.io/namespace"
|
||||
|
||||
// PropagationPolicyNameAnnotation is added to objects to specify associated PropagationPolicy name.
|
||||
PropagationPolicyNameAnnotation = "propagationpolicy.karmada.io/name"
|
||||
|
||||
// ClusterPropagationPolicyUIDLabel is the uid of ClusterPropagationPolicy object.
|
||||
ClusterPropagationPolicyUIDLabel = "clusterpropagationpolicy.karmada.io/uid"
|
||||
|
||||
// ClusterPropagationPolicyAnnotation is added to objects to specify associated ClusterPropagationPolicy name.
|
||||
ClusterPropagationPolicyAnnotation = "clusterpropagationpolicy.karmada.io/name"
|
||||
|
||||
// PropagationPolicyNamespaceLabel is added to objects to specify associated PropagationPolicy namespace.
|
||||
PropagationPolicyNamespaceLabel = "propagationpolicy.karmada.io/namespace"
|
||||
|
||||
|
|
|
@ -1,6 +1,21 @@
|
|||
package v1alpha2
|
||||
|
||||
const (
|
||||
// ResourceBindingUIDLabel is the UID of ResourceBinding object.
|
||||
ResourceBindingUIDLabel = "resourcebinding.karmada.io/uid"
|
||||
|
||||
// ClusterResourceBindingUIDLabel is the uid of ClusterResourceBinding object.
|
||||
ClusterResourceBindingUIDLabel = "clusterresourcebinding.karmada.io/uid"
|
||||
|
||||
// WorkNamespaceAnnotation is added to objects to specify associated Work's namespace.
|
||||
WorkNamespaceAnnotation = "work.karmada.io/namespace"
|
||||
|
||||
// WorkNameAnnotation is added to objects to specify associated Work's name.
|
||||
WorkNameAnnotation = "work.karmada.io/name"
|
||||
|
||||
// WorkUIDLabel is the uid of Work object.
|
||||
WorkUIDLabel = "work.karmada.io/uid"
|
||||
|
||||
// ResourceBindingReferenceKey is the key of ResourceBinding object.
|
||||
// It is usually a unique hash value of ResourceBinding object's namespace and name, intended to be added to the Work object.
|
||||
// It will be used to retrieve all Works objects that derived from a specific ResourceBinding object.
|
||||
|
|
|
@ -95,7 +95,7 @@ func ensureWork(
|
|||
}
|
||||
workLabel := mergeLabel(clonedWorkload, workNamespace, binding, scope)
|
||||
|
||||
annotations := mergeAnnotations(clonedWorkload, binding, scope)
|
||||
annotations := mergeAnnotations(clonedWorkload, workNamespace, binding, scope)
|
||||
annotations = mergeConflictResolution(clonedWorkload, conflictResolutionInBinding, annotations)
|
||||
annotations, err = RecordAppliedOverrides(cops, ops, annotations)
|
||||
if err != nil {
|
||||
|
@ -144,16 +144,23 @@ func mergeLabel(workload *unstructured.Unstructured, workNamespace string, bindi
|
|||
util.MergeLabel(workload, util.ManagedByKarmadaLabel, util.ManagedByKarmadaLabelValue)
|
||||
if scope == apiextensionsv1.NamespaceScoped {
|
||||
util.MergeLabel(workload, workv1alpha2.ResourceBindingReferenceKey, names.GenerateBindingReferenceKey(binding.GetNamespace(), binding.GetName()))
|
||||
util.MergeLabel(workload, workv1alpha2.ResourceBindingUIDLabel, string(binding.GetUID()))
|
||||
workLabel[workv1alpha2.ResourceBindingReferenceKey] = names.GenerateBindingReferenceKey(binding.GetNamespace(), binding.GetName())
|
||||
workLabel[workv1alpha2.ResourceBindingUIDLabel] = string(binding.GetUID())
|
||||
} else {
|
||||
util.MergeLabel(workload, workv1alpha2.ClusterResourceBindingReferenceKey, names.GenerateBindingReferenceKey("", binding.GetName()))
|
||||
util.MergeLabel(workload, workv1alpha2.ClusterResourceBindingUIDLabel, string(binding.GetUID()))
|
||||
workLabel[workv1alpha2.ClusterResourceBindingReferenceKey] = names.GenerateBindingReferenceKey("", binding.GetName())
|
||||
workLabel[workv1alpha2.ClusterResourceBindingUIDLabel] = string(binding.GetUID())
|
||||
}
|
||||
return workLabel
|
||||
}
|
||||
|
||||
func mergeAnnotations(workload *unstructured.Unstructured, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
|
||||
func mergeAnnotations(workload *unstructured.Unstructured, workNamespace string, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
util.MergeAnnotation(workload, workv1alpha2.WorkNameAnnotation, names.GenerateWorkName(workload.GetKind(), workload.GetName(), workload.GetNamespace()))
|
||||
util.MergeAnnotation(workload, workv1alpha2.WorkNamespaceAnnotation, workNamespace)
|
||||
|
||||
if scope == apiextensionsv1.NamespaceScoped {
|
||||
util.MergeAnnotation(workload, workv1alpha2.ResourceBindingNamespaceAnnotationKey, binding.GetNamespace())
|
||||
util.MergeAnnotation(workload, workv1alpha2.ResourceBindingNameAnnotationKey, binding.GetName())
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
|
@ -87,6 +88,7 @@ func Test_mergeTargetClusters(t *testing.T) {
|
|||
func Test_mergeLabel(t *testing.T) {
|
||||
namespace := "fake-ns"
|
||||
bindingName := "fake-bindingName"
|
||||
rbUID := "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -113,10 +115,12 @@ func Test_mergeLabel(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: bindingName,
|
||||
Namespace: namespace,
|
||||
UID: types.UID(rbUID),
|
||||
},
|
||||
},
|
||||
scope: v1.NamespaceScoped,
|
||||
want: map[string]string{
|
||||
workv1alpha2.ResourceBindingUIDLabel: rbUID,
|
||||
workv1alpha2.ResourceBindingReferenceKey: names.GenerateBindingReferenceKey(namespace, bindingName),
|
||||
},
|
||||
},
|
||||
|
@ -134,10 +138,12 @@ func Test_mergeLabel(t *testing.T) {
|
|||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: bindingName,
|
||||
UID: types.UID(rbUID),
|
||||
},
|
||||
},
|
||||
scope: v1.ClusterScoped,
|
||||
want: map[string]string{
|
||||
workv1alpha2.ClusterResourceBindingUIDLabel: rbUID,
|
||||
workv1alpha2.ClusterResourceBindingReferenceKey: names.GenerateBindingReferenceKey("", bindingName),
|
||||
},
|
||||
},
|
||||
|
@ -156,14 +162,16 @@ func Test_mergeAnnotations(t *testing.T) {
|
|||
bindingName := "fake-bindingName"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workload *unstructured.Unstructured
|
||||
binding metav1.Object
|
||||
scope v1.ResourceScope
|
||||
want map[string]string
|
||||
name string
|
||||
namespace string
|
||||
workload *unstructured.Unstructured
|
||||
binding metav1.Object
|
||||
scope v1.ResourceScope
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "NamespaceScoped",
|
||||
name: "NamespaceScoped",
|
||||
namespace: "test",
|
||||
workload: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
|
@ -187,7 +195,8 @@ func Test_mergeAnnotations(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "ClusterScoped",
|
||||
name: "ClusterScoped",
|
||||
namespace: "",
|
||||
workload: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
|
@ -210,7 +219,7 @@ func Test_mergeAnnotations(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := mergeAnnotations(tt.workload, tt.binding, tt.scope); !reflect.DeepEqual(got, tt.want) {
|
||||
if got := mergeAnnotations(tt.workload, tt.namespace, tt.binding, tt.scope); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("mergeAnnotations() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/events"
|
||||
"github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
|
@ -302,8 +303,8 @@ func (c *Controller) syncToClusters(clusterName string, work *workv1alpha1.Work)
|
|||
syncSucceedNum := 0
|
||||
for _, manifest := range work.Spec.Workload.Manifests {
|
||||
workload := &unstructured.Unstructured{}
|
||||
util.MergeLabel(workload, util.ManagedByKarmadaLabel, util.ManagedByKarmadaLabelValue)
|
||||
err := workload.UnmarshalJSON(manifest.Raw)
|
||||
util.MergeLabel(workload, workv1alpha2.WorkUIDLabel, string(work.UID))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
|
||||
errs = append(errs, err)
|
||||
|
|
|
@ -378,8 +378,9 @@ func newPod(name string, labels map[string]string) *corev1.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
func newWorkLabels(workNs, workName string) map[string]string {
|
||||
func newWorkloadLabels(workNs, workName, workUID string) map[string]string {
|
||||
labels := map[string]string{}
|
||||
|
||||
if workNs != "" {
|
||||
labels[workv1alpha1.WorkNamespaceLabel] = workNs
|
||||
}
|
||||
|
@ -388,6 +389,10 @@ func newWorkLabels(workNs, workName string) map[string]string {
|
|||
labels[workv1alpha1.WorkNameLabel] = workName
|
||||
}
|
||||
|
||||
if workUID != "" {
|
||||
labels[workv1alpha2.WorkUIDLabel] = workUID
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
|
@ -428,6 +433,7 @@ func TestExecutionController_tryDeleteWorkload(t *testing.T) {
|
|||
podName := "pod"
|
||||
workName := "work-name"
|
||||
workNs := "karmada-es-cluster"
|
||||
workUID := "99f1f7c3-1f1f-4f1f-9f1f-7c3f1f1f9f1f"
|
||||
clusterName := "cluster"
|
||||
podGVR := corev1.SchemeGroupVersion.WithResource("pods")
|
||||
|
||||
|
@ -441,8 +447,8 @@ func TestExecutionController_tryDeleteWorkload(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "failed to GetObjectFromCache, wrong InformerManager in ExecutionController",
|
||||
pod: newPod(podName, newWorkLabels(workNs, workName)),
|
||||
work: testhelper.NewWork(workName, workNs, raw),
|
||||
pod: newPod(podName, newWorkloadLabels(workNs, workName, workUID)),
|
||||
work: testhelper.NewWork(workName, workNs, workUID, raw),
|
||||
controllerWithoutInformer: false,
|
||||
expectedError: false,
|
||||
objectNeedDelete: false,
|
||||
|
@ -450,23 +456,23 @@ func TestExecutionController_tryDeleteWorkload(t *testing.T) {
|
|||
{
|
||||
name: "workload is not managed by karmada, without work-related labels",
|
||||
pod: newPod(podName, nil),
|
||||
work: testhelper.NewWork(workName, workNs, raw),
|
||||
work: testhelper.NewWork(workName, workNs, workUID, raw),
|
||||
controllerWithoutInformer: true,
|
||||
expectedError: false,
|
||||
objectNeedDelete: false,
|
||||
},
|
||||
{
|
||||
name: "workload is not related to current work",
|
||||
pod: newPod(podName, newWorkLabels(workNs, "wrong-work")),
|
||||
work: testhelper.NewWork(workName, workNs, raw),
|
||||
pod: newPod(podName, newWorkloadLabels(workNs, "wrong-work", workUID)),
|
||||
work: testhelper.NewWork(workName, workNs, workUID, raw),
|
||||
controllerWithoutInformer: true,
|
||||
expectedError: false,
|
||||
objectNeedDelete: false,
|
||||
},
|
||||
{
|
||||
name: "normal case",
|
||||
pod: newPod(podName, newWorkLabels(workNs, workName)),
|
||||
work: testhelper.NewWork(workName, workNs, raw),
|
||||
pod: newPod(podName, newWorkloadLabels(workNs, workName, workUID)),
|
||||
work: testhelper.NewWork(workName, workNs, workUID, raw),
|
||||
controllerWithoutInformer: true,
|
||||
expectedError: false,
|
||||
objectNeedDelete: true,
|
||||
|
@ -516,6 +522,7 @@ func TestExecutionController_tryCreateOrUpdateWorkload(t *testing.T) {
|
|||
podName := "pod"
|
||||
workName := "work-name"
|
||||
workNs := "karmada-es-cluster"
|
||||
workUID := "99f1f7c3-1f1f-4f1f-9f1f-7c3f1f1f9f1f"
|
||||
clusterName := "cluster"
|
||||
podGVR := corev1.SchemeGroupVersion.WithResource("pods")
|
||||
annotations := map[string]string{
|
||||
|
@ -534,7 +541,7 @@ func TestExecutionController_tryCreateOrUpdateWorkload(t *testing.T) {
|
|||
{
|
||||
name: "created workload",
|
||||
pod: newPod("wrong-pod", nil),
|
||||
obj: newPodObj(podName, newWorkLabels(workNs, workName)),
|
||||
obj: newPodObj(podName, newWorkloadLabels(workNs, workName, workUID)),
|
||||
withAnnotation: false,
|
||||
expectedError: false,
|
||||
objectExist: true,
|
||||
|
@ -543,7 +550,7 @@ func TestExecutionController_tryCreateOrUpdateWorkload(t *testing.T) {
|
|||
{
|
||||
name: "failed to update object, overwrite conflict resolusion not set",
|
||||
pod: newPod(podName, nil),
|
||||
obj: newPodObj(podName, newWorkLabels(workNs, workName)),
|
||||
obj: newPodObj(podName, newWorkloadLabels(workNs, workName, workUID)),
|
||||
withAnnotation: false,
|
||||
expectedError: true,
|
||||
objectExist: true,
|
||||
|
@ -552,7 +559,7 @@ func TestExecutionController_tryCreateOrUpdateWorkload(t *testing.T) {
|
|||
{
|
||||
name: "updated object",
|
||||
pod: newPod(podName, nil),
|
||||
obj: newPodObj(podName, newWorkLabels(workNs, workName)),
|
||||
obj: newPodObj(podName, newWorkloadLabels(workNs, workName, workUID)),
|
||||
withAnnotation: true,
|
||||
expectedError: false,
|
||||
objectExist: true,
|
||||
|
@ -590,7 +597,11 @@ func TestExecutionController_tryCreateOrUpdateWorkload(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
labels := map[string]string{workv1alpha1.WorkNamespaceLabel: workNs, workv1alpha1.WorkNameLabel: workName}
|
||||
labels := map[string]string{
|
||||
workv1alpha1.WorkNamespaceLabel: workNs,
|
||||
workv1alpha1.WorkNameLabel: workName,
|
||||
workv1alpha2.WorkUIDLabel: workUID,
|
||||
}
|
||||
if tt.labelMatch {
|
||||
assert.Equal(t, resource.GetLabels(), labels)
|
||||
} else {
|
||||
|
@ -706,6 +717,7 @@ func TestExecutionController_syncWork(t *testing.T) {
|
|||
basePod := newPod("pod", nil)
|
||||
workName := "work"
|
||||
workNs := "karmada-es-cluster"
|
||||
workUID := "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"
|
||||
podGVR := corev1.SchemeGroupVersion.WithResource("pods")
|
||||
podRaw := []byte(`
|
||||
{
|
||||
|
@ -789,7 +801,7 @@ func TestExecutionController_syncWork(t *testing.T) {
|
|||
o.dynamicClientSet = dynamicClientSet
|
||||
}
|
||||
|
||||
work := testhelper.NewWork(workName, tt.workNamespace, podRaw)
|
||||
work := testhelper.NewWork(workName, tt.workNamespace, workUID, podRaw)
|
||||
o.objects = append(o.objects, work)
|
||||
o.objectsWithStatus = append(o.objectsWithStatus, &workv1alpha1.Work{})
|
||||
|
||||
|
@ -810,7 +822,7 @@ func TestExecutionController_syncWork(t *testing.T) {
|
|||
t.Fatalf("Failed to get pod: %v", err)
|
||||
}
|
||||
|
||||
expectedLabels := newWorkLabels(workNs, workName)
|
||||
expectedLabels := newWorkloadLabels(workNs, workName, workUID)
|
||||
assert.Equal(t, resource.GetLabels(), expectedLabels)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -496,6 +496,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
cluster := testhelper.NewClusterWithTypeAndStatus("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionFalse)
|
||||
workName := "work"
|
||||
workNs := "karmada-es-cluster"
|
||||
workUID := "92345678-1234-5678-1234-567812345678"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -576,9 +577,9 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
|
||||
var work *workv1alpha1.Work
|
||||
if tt.workWithRigntNS {
|
||||
work = testhelper.NewWork(workName, workNs, tt.raw)
|
||||
work = testhelper.NewWork(workName, workNs, workUID, tt.raw)
|
||||
} else {
|
||||
work = testhelper.NewWork(workName, fmt.Sprintf("%v-test", workNs), tt.raw)
|
||||
work = testhelper.NewWork(workName, fmt.Sprintf("%v-test", workNs), workUID, tt.raw)
|
||||
}
|
||||
|
||||
key, _ := generateKey(tt.obj)
|
||||
|
|
|
@ -388,7 +388,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
}
|
||||
}()
|
||||
|
||||
if err := d.ClaimPolicyForObject(object, policy.Namespace, policy.Name); err != nil {
|
||||
if err := d.ClaimPolicyForObject(object, policy.Namespace, policy.Name, string(policy.UID)); err != nil {
|
||||
klog.Errorf("Failed to claim policy(%s) for object: %s", policy.Name, object)
|
||||
return err
|
||||
}
|
||||
|
@ -396,9 +396,14 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
policyLabels := map[string]string{
|
||||
policyv1alpha1.PropagationPolicyNamespaceLabel: policy.GetNamespace(),
|
||||
policyv1alpha1.PropagationPolicyNameLabel: policy.GetName(),
|
||||
policyv1alpha1.PropagationPolicyUIDLabel: string(policy.UID),
|
||||
}
|
||||
policyAnnotations := map[string]string{
|
||||
policyv1alpha1.PropagationPolicyNamespaceAnnotation: policy.GetNamespace(),
|
||||
policyv1alpha1.PropagationPolicyNameAnnotation: policy.GetName(),
|
||||
}
|
||||
|
||||
binding, err := d.BuildResourceBinding(object, objectKey, policyLabels, &policy.Spec)
|
||||
binding, err := d.BuildResourceBinding(object, objectKey, policyLabels, policyAnnotations, &policy.Spec)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err)
|
||||
return err
|
||||
|
@ -413,6 +418,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
"try again later after binding is garbage collected, see https://github.com/karmada-io/karmada/issues/2090")
|
||||
}
|
||||
// Just update necessary fields, especially avoid modifying Spec.Clusters which is scheduling result, if already exists.
|
||||
bindingCopy.Annotations = util.DedupeAndMergeAnnotations(bindingCopy.Annotations, binding.Annotations)
|
||||
bindingCopy.Labels = util.DedupeAndMergeLabels(bindingCopy.Labels, binding.Labels)
|
||||
bindingCopy.OwnerReferences = binding.OwnerReferences
|
||||
bindingCopy.Finalizers = binding.Finalizers
|
||||
|
@ -463,20 +469,24 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
}
|
||||
}()
|
||||
|
||||
if err := d.ClaimClusterPolicyForObject(object, policy.Name); err != nil {
|
||||
if err := d.ClaimClusterPolicyForObject(object, policy.Name, string(policy.UID)); err != nil {
|
||||
klog.Errorf("Failed to claim cluster policy(%s) for object: %s", policy.Name, object)
|
||||
return err
|
||||
}
|
||||
|
||||
policyLabels := map[string]string{
|
||||
policyv1alpha1.ClusterPropagationPolicyLabel: policy.GetName(),
|
||||
policyv1alpha1.ClusterPropagationPolicyLabel: policy.GetName(),
|
||||
policyv1alpha1.ClusterPropagationPolicyUIDLabel: string(policy.UID),
|
||||
}
|
||||
policyAnnotations := map[string]string{
|
||||
policyv1alpha1.ClusterPropagationPolicyAnnotation: policy.GetName(),
|
||||
}
|
||||
|
||||
// Build `ResourceBinding` or `ClusterResourceBinding` according to the resource template's scope.
|
||||
// For namespace-scoped resources, which namespace is not empty, building `ResourceBinding`.
|
||||
// For cluster-scoped resources, which namespace is empty, building `ClusterResourceBinding`.
|
||||
if object.GetNamespace() != "" {
|
||||
binding, err := d.BuildResourceBinding(object, objectKey, policyLabels, &policy.Spec)
|
||||
binding, err := d.BuildResourceBinding(object, objectKey, policyLabels, policyAnnotations, &policy.Spec)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err)
|
||||
return err
|
||||
|
@ -491,6 +501,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
"try again later after binding is garbage collected, see https://github.com/karmada-io/karmada/issues/2090")
|
||||
}
|
||||
// Just update necessary fields, especially avoid modifying Spec.Clusters which is scheduling result, if already exists.
|
||||
bindingCopy.Annotations = util.DedupeAndMergeAnnotations(bindingCopy.Annotations, binding.Annotations)
|
||||
bindingCopy.Labels = util.DedupeAndMergeLabels(bindingCopy.Labels, binding.Labels)
|
||||
bindingCopy.OwnerReferences = binding.OwnerReferences
|
||||
bindingCopy.Finalizers = binding.Finalizers
|
||||
|
@ -523,7 +534,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
klog.V(2).Infof("ResourceBinding(%s) is up to date.", binding.GetName())
|
||||
}
|
||||
} else {
|
||||
binding, err := d.BuildClusterResourceBinding(object, objectKey, policyLabels, &policy.Spec)
|
||||
binding, err := d.BuildClusterResourceBinding(object, objectKey, policyLabels, policyAnnotations, &policy.Spec)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to build clusterResourceBinding for object: %s. error: %v", objectKey, err)
|
||||
return err
|
||||
|
@ -537,6 +548,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
"try again later after binding is garbage collected, see https://github.com/karmada-io/karmada/issues/2090")
|
||||
}
|
||||
// Just update necessary fields, especially avoid modifying Spec.Clusters which is scheduling result, if already exists.
|
||||
bindingCopy.Annotations = util.DedupeAndMergeAnnotations(bindingCopy.Annotations, binding.Annotations)
|
||||
bindingCopy.Labels = util.DedupeAndMergeLabels(bindingCopy.Labels, binding.Labels)
|
||||
bindingCopy.OwnerReferences = binding.OwnerReferences
|
||||
bindingCopy.Finalizers = binding.Finalizers
|
||||
|
@ -603,7 +615,7 @@ func (d *ResourceDetector) GetUnstructuredObject(objectKey keys.ClusterWideKey)
|
|||
}
|
||||
|
||||
// ClaimPolicyForObject set policy identifier which the object associated with.
|
||||
func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructured, policyNamespace string, policyName string) error {
|
||||
func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructured, policyNamespace, policyName, policyUID string) error {
|
||||
objLabels := object.GetLabels()
|
||||
if objLabels == nil {
|
||||
objLabels = make(map[string]string)
|
||||
|
@ -618,15 +630,24 @@ func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructure
|
|||
|
||||
objLabels[policyv1alpha1.PropagationPolicyNamespaceLabel] = policyNamespace
|
||||
objLabels[policyv1alpha1.PropagationPolicyNameLabel] = policyName
|
||||
objLabels[policyv1alpha1.PropagationPolicyUIDLabel] = policyUID
|
||||
|
||||
objectAnnotations := object.GetAnnotations()
|
||||
if objectAnnotations == nil {
|
||||
objectAnnotations = make(map[string]string)
|
||||
}
|
||||
objectAnnotations[policyv1alpha1.PropagationPolicyNamespaceAnnotation] = policyNamespace
|
||||
objectAnnotations[policyv1alpha1.PropagationPolicyNameAnnotation] = policyName
|
||||
|
||||
objectCopy := object.DeepCopy()
|
||||
objectCopy.SetLabels(objLabels)
|
||||
objectCopy.SetAnnotations(objectAnnotations)
|
||||
return d.Client.Update(context.TODO(), objectCopy)
|
||||
}
|
||||
|
||||
// ClaimClusterPolicyForObject set cluster identifier which the object associated with.
|
||||
func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unstructured, policyName string) error {
|
||||
func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unstructured, policyName, policyUID string) error {
|
||||
claimedName := util.GetLabelValue(object.GetLabels(), policyv1alpha1.ClusterPropagationPolicyLabel)
|
||||
|
||||
// object has been claimed, don't need to claim again
|
||||
if claimedName == policyName {
|
||||
return nil
|
||||
|
@ -634,11 +655,15 @@ func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unst
|
|||
|
||||
objectCopy := object.DeepCopy()
|
||||
util.MergeLabel(objectCopy, policyv1alpha1.ClusterPropagationPolicyLabel, policyName)
|
||||
util.MergeLabel(objectCopy, policyv1alpha1.ClusterPropagationPolicyUIDLabel, policyUID)
|
||||
|
||||
util.MergeAnnotation(objectCopy, policyv1alpha1.ClusterPropagationPolicyAnnotation, policyName)
|
||||
return d.Client.Update(context.TODO(), objectCopy)
|
||||
}
|
||||
|
||||
// BuildResourceBinding builds a desired ResourceBinding for object.
|
||||
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) {
|
||||
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey,
|
||||
labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) {
|
||||
bindingName := names.GenerateBindingName(object.GetKind(), object.GetName())
|
||||
propagationBinding := &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -647,8 +672,9 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(object, objectKey.GroupVersionKind()),
|
||||
},
|
||||
Labels: labels,
|
||||
Finalizers: []string{util.BindingControllerFinalizer},
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
Finalizers: []string{util.BindingControllerFinalizer},
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
PropagateDeps: policySpec.PropagateDeps,
|
||||
|
@ -681,7 +707,8 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
}
|
||||
|
||||
// BuildClusterResourceBinding builds a desired ClusterResourceBinding for object.
|
||||
func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) {
|
||||
func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey,
|
||||
labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) {
|
||||
bindingName := names.GenerateBindingName(object.GetKind(), object.GetName())
|
||||
binding := &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -689,8 +716,9 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst
|
|||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(object, objectKey.GroupVersionKind()),
|
||||
},
|
||||
Labels: labels,
|
||||
Finalizers: []string{util.ClusterResourceBindingControllerFinalizer},
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
Finalizers: []string{util.ClusterResourceBindingControllerFinalizer},
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
PropagateDeps: policySpec.PropagateDeps,
|
||||
|
|
|
@ -119,7 +119,7 @@ func (d *ResourceDetector) preemptPropagationPolicy(resourceTemplate *unstructur
|
|||
"Propagation policy(%s/%s) preempted propagation policy(%s/%s) successfully", policy.Namespace, policy.Name, claimedPolicyNamespace, claimedPolicyName)
|
||||
}()
|
||||
|
||||
if err = d.ClaimPolicyForObject(resourceTemplate, policy.Namespace, policy.Name); err != nil {
|
||||
if err = d.ClaimPolicyForObject(resourceTemplate, policy.Namespace, policy.Name, string(policy.UID)); err != nil {
|
||||
klog.Errorf("Failed to claim new propagation policy(%s/%s) on resource template(%s, kind=%s, %s): %v.", policy.Namespace, policy.Name,
|
||||
resourceTemplate.GetAPIVersion(), resourceTemplate.GetKind(), names.NamespacedKey(resourceTemplate.GetNamespace(), resourceTemplate.GetName()), err)
|
||||
return err
|
||||
|
@ -147,7 +147,7 @@ func (d *ResourceDetector) preemptClusterPropagationPolicyDirectly(resourceTempl
|
|||
"Propagation policy(%s/%s) preempted cluster propagation policy(%s) successfully", policy.Namespace, policy.Name, claimedPolicyName)
|
||||
}()
|
||||
|
||||
if err = d.ClaimPolicyForObject(resourceTemplate, policy.Namespace, policy.Name); err != nil {
|
||||
if err = d.ClaimPolicyForObject(resourceTemplate, policy.Namespace, policy.Name, string(policy.UID)); err != nil {
|
||||
klog.Errorf("Failed to claim new propagation policy(%s/%s) on resource template(%s, kind=%s, %s) directly: %v.", policy.Namespace, policy.Name,
|
||||
resourceTemplate.GetAPIVersion(), resourceTemplate.GetKind(), names.NamespacedKey(resourceTemplate.GetNamespace(), resourceTemplate.GetName()), err)
|
||||
return err
|
||||
|
@ -197,7 +197,7 @@ func (d *ResourceDetector) preemptClusterPropagationPolicy(resourceTemplate *uns
|
|||
"Cluster propagation policy(%s) preempted cluster propagation policy(%s) successfully", policy.Name, claimedPolicyName)
|
||||
}()
|
||||
|
||||
if err = d.ClaimClusterPolicyForObject(resourceTemplate, policy.Name); err != nil {
|
||||
if err = d.ClaimClusterPolicyForObject(resourceTemplate, policy.Name, string(policy.UID)); err != nil {
|
||||
klog.Errorf("Failed to claim new cluster propagation policy(%s) on resource template(%s, kind=%s, %s): %v.", policy.Name,
|
||||
resourceTemplate.GetAPIVersion(), resourceTemplate.GetKind(), names.NamespacedKey(resourceTemplate.GetNamespace(), resourceTemplate.GetName()), err)
|
||||
return err
|
||||
|
|
|
@ -97,3 +97,15 @@ func RecordManagedAnnotations(object *unstructured.Unstructured) {
|
|||
annotations[workv1alpha2.ManagedAnnotation] = strings.Join(managedKeys, ",")
|
||||
object.SetAnnotations(annotations)
|
||||
}
|
||||
|
||||
// DedupeAndMergeAnnotations merges the new annotations into exist annotations.
|
||||
func DedupeAndMergeAnnotations(existAnnotation, newAnnotation map[string]string) map[string]string {
|
||||
if existAnnotation == nil {
|
||||
return newAnnotation
|
||||
}
|
||||
|
||||
for k, v := range newAnnotation {
|
||||
existAnnotation[k] = v
|
||||
}
|
||||
return existAnnotation
|
||||
}
|
||||
|
|
|
@ -118,6 +118,7 @@ func Test_getSingleClusterManager(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_registerInformersAndStart(t *testing.T) {
|
||||
workUID := "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"
|
||||
clusterName := "cluster"
|
||||
cluster := testhelper.NewClusterWithTypeAndStatus(clusterName, clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue)
|
||||
|
||||
|
@ -128,7 +129,7 @@ func Test_registerInformersAndStart(t *testing.T) {
|
|||
c := newMemberClusterInformer(cluster)
|
||||
|
||||
raw := []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`)
|
||||
work := testhelper.NewWork("work", "default", raw)
|
||||
work := testhelper.NewWork("work", "default", workUID, raw)
|
||||
|
||||
eventHandler := fedinformer.NewHandlerOnEvents(nil, nil, nil)
|
||||
|
||||
|
|
|
@ -890,11 +890,12 @@ func NewPodDisruptionBudget(namespace, name string, maxUnAvailable intstr.IntOrS
|
|||
}
|
||||
|
||||
// NewWork will build a new Work object.
|
||||
func NewWork(workName, workNs string, raw []byte) *workv1alpha1.Work {
|
||||
func NewWork(workName, workNs, workUID string, raw []byte) *workv1alpha1.Work {
|
||||
work := &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workName,
|
||||
Namespace: workNs,
|
||||
UID: types.UID(workUID),
|
||||
},
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
Workload: workv1alpha1.WorkloadTemplate{
|
||||
|
|
Loading…
Reference in New Issue