Deprecate name/namespace labels of work

Signed-off-by: whitewindmills <jayfantasyhjh@gmail.com>
This commit is contained in:
whitewindmills 2024-04-28 15:10:33 +08:00
parent 8f9a624620
commit 8328054439
11 changed files with 92 additions and 110 deletions

View File

@ -1,25 +0,0 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"
// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"
)

View File

@ -58,12 +58,6 @@ const (
// - Manifest in Work object: describes the name of ClusterResourceBinding which the manifest derived from.
ClusterResourceBindingAnnotationKey = "clusterresourcebinding.karmada.io/name"
// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"
// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"
// BindingManagedByLabel is added to ResourceBinding to represent what kind of resource manages this Binding.
BindingManagedByLabel = "binding.karmada.io/managed-by"
)

View File

@ -110,7 +110,7 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
}
if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("Stop sync work(%s/%s) for cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
klog.Errorf("Stop syncing the work(%s/%s) for the cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)
}
@ -133,18 +133,18 @@ func (c *Controller) syncWork(clusterName string, work *workv1alpha1.Work) (cont
err := c.syncToClusters(clusterName, work)
metrics.ObserveSyncWorkloadLatency(err, start)
if err != nil {
msg := fmt.Sprintf("Failed to sync work(%s) to cluster(%s): %v", work.Name, clusterName, err)
msg := fmt.Sprintf("Failed to sync work(%s/%s) to cluster(%s), err: %v", work.Namespace, work.Name, clusterName, err)
klog.Errorf(msg)
c.EventRecorder.Event(work, corev1.EventTypeWarning, events.EventReasonSyncWorkloadFailed, msg)
return controllerruntime.Result{}, err
}
msg := fmt.Sprintf("Sync work (%s) to cluster(%s) successful.", work.Name, clusterName)
msg := fmt.Sprintf("Sync work(%s/%s) to cluster(%s) successful.", work.Namespace, work.Name, clusterName)
klog.V(4).Infof(msg)
c.EventRecorder.Event(work, corev1.EventTypeNormal, events.EventReasonSyncWorkloadSucceed, msg)
return controllerruntime.Result{}, nil
}
// tryDeleteWorkload tries to delete resource in the given member cluster.
// tryDeleteWorkload tries to delete resources in the given member cluster.
func (c *Controller) tryDeleteWorkload(clusterName string, work *workv1alpha1.Work) error {
for _, manifest := range work.Spec.Workload.Manifests {
workload := &unstructured.Unstructured{}
@ -174,6 +174,7 @@ func (c *Controller) tryDeleteWorkload(clusterName string, work *workv1alpha1.Wo
klog.Infof("Abort deleting the resource(kind=%s, %s/%s) exists in cluster %v but not managed by karmada", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), clusterName)
return nil
}
util.MergeLabel(workload, workv1alpha2.WorkPermanentIDLabel, util.GetLabelValue(work.Labels, workv1alpha2.WorkPermanentIDLabel))
err = c.ObjectWatcher.Delete(clusterName, workload)
if err != nil {
@ -207,7 +208,7 @@ func (c *Controller) syncToClusters(clusterName string, work *workv1alpha1.Work)
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
klog.Errorf("Failed to unmarshal workload of the work(%s/%s), error is: %v", work.GetNamespace(), work.GetName(), err)
errs = append(errs, err)
continue
}
@ -253,12 +254,11 @@ func (c *Controller) tryCreateOrUpdateWorkload(clusterName string, workload *uns
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
klog.Errorf("Failed to get the resource(kind=%s, %s/%s) from member cluster(%s), err is %v ", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
return err
}
err = c.ObjectWatcher.Create(clusterName, workload)
if err != nil {
klog.Errorf("Failed to create resource(%v/%v) in the given member cluster %s, err is %v", workload.GetNamespace(), workload.GetName(), clusterName, err)
return err
}
return nil
@ -266,13 +266,12 @@ func (c *Controller) tryCreateOrUpdateWorkload(clusterName string, workload *uns
err = c.ObjectWatcher.Update(clusterName, workload, clusterObj)
if err != nil {
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", clusterName, err)
return err
}
return nil
}
// updateAppliedCondition update the Applied condition for the given Work
// updateAppliedCondition updates the applied condition for the given Work.
func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status metav1.ConditionStatus, reason, message string) error {
newWorkAppliedCondition := metav1.Condition{
Type: workv1alpha1.WorkApplied,
@ -292,7 +291,7 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: work.Namespace, Name: work.Name}, updated); err == nil {
work = updated
} else {
klog.Errorf("Failed to get updated work %s/%s: %v", work.Namespace, work.Name, err)
klog.Errorf("Failed to get the updated work(%s/%s), err: %v", work.Namespace, work.Name, err)
}
return updateErr
})
@ -301,7 +300,7 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
func (c *Controller) eventf(object *unstructured.Unstructured, eventType, reason, messageFmt string, args ...interface{}) {
ref, err := helper.GenEventRef(object)
if err != nil {
klog.Errorf("ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
klog.Errorf("Ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
return
}
c.EventRecorder.Eventf(ref, eventType, reason, messageFmt, args...)

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
@ -34,6 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/names"
@ -55,24 +57,34 @@ func (c *EndpointSliceController) Reconcile(ctx context.Context, req controllerr
work := &workv1alpha1.Work{}
if err := c.Client.Get(ctx, req.NamespacedName, work); err != nil {
if apierrors.IsNotFound(err) {
// Cleanup derived EndpointSlices after work has been removed.
err = helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
})
if err == nil {
return controllerruntime.Result{}, nil
// Clean up derived EndpointSlices after work has been removed.
endpointSlices := &discoveryv1.EndpointSliceList{}
if err = c.List(context.TODO(), endpointSlices, client.HasLabels{workv1alpha2.WorkPermanentIDLabel}); err != nil {
return controllerruntime.Result{}, err
}
var errs []error
for i, es := range endpointSlices.Items {
if es.Annotations[workv1alpha2.WorkNamespaceAnnotation] == req.Namespace &&
es.Annotations[workv1alpha2.WorkNameAnnotation] == req.Name {
if err := c.Delete(context.TODO(), &endpointSlices.Items[i]); err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("Failed to delete endpointslice(%s/%s) after the work(%s/%s) has been removed, err: %v",
es.Namespace, es.Name, req.Namespace, req.Name, err)
errs = append(errs, err)
}
}
}
return controllerruntime.Result{}, utilerrors.NewAggregate(errs)
}
return controllerruntime.Result{}, err
}
if !work.DeletionTimestamp.IsZero() {
err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
})
if err != nil {
// Clean up derived EndpointSlices when deleting the work.
if err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
}); err != nil {
klog.Errorf("Failed to delete endpointslice of the work(%s/%s) when deleting the work, err is %v", work.Namespace, work.Name, err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, c.removeFinalizer(work.DeepCopy())
@ -83,10 +95,10 @@ func (c *EndpointSliceController) Reconcile(ctx context.Context, req controllerr
// Once the conflict between service_export_controller.go and endpointslice_collect_controller.go is fixed, the following code should be deleted.
if serviceName := util.GetLabelValue(work.Labels, util.ServiceNameLabel); serviceName == "" {
err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
})
if err != nil {
klog.Errorf("Failed to delete endpointslice of the work(%s/%s) when the serviceexport is deleted, err is %v", work.Namespace, work.Name, err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, c.removeFinalizer(work.DeepCopy())
@ -146,12 +158,11 @@ func (c *EndpointSliceController) collectEndpointSliceFromWork(work *workv1alpha
}
desiredEndpointSlice := deriveEndpointSlice(endpointSlice, clusterName)
desiredEndpointSlice.Labels = map[string]string{
workv1alpha1.WorkNamespaceLabel: work.Namespace,
workv1alpha1.WorkNameLabel: work.Name,
discoveryv1.LabelServiceName: names.GenerateDerivedServiceName(work.Labels[util.ServiceNameLabel]),
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
}
desiredEndpointSlice.Labels = util.DedupeAndMergeLabels(desiredEndpointSlice.Labels, map[string]string{
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
discoveryv1.LabelServiceName: names.GenerateDerivedServiceName(work.Labels[util.ServiceNameLabel]),
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
})
if err = helper.CreateOrUpdateEndpointSlice(c.Client, desiredEndpointSlice); err != nil {
return err

View File

@ -112,7 +112,7 @@ func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerrunt
}
if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("Stop sync work(%s/%s) for cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
klog.Errorf("Stop syncing the Work(%s/%s) to the cluster(%s) as not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)
}
@ -168,7 +168,7 @@ func generateKey(obj interface{}) (util.QueueKey, error) {
func getClusterNameFromAnnotation(resource *unstructured.Unstructured) (string, error) {
workNamespace, exist := resource.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation]
if !exist {
klog.V(4).Infof("Ignore resource(%s/%s/%s) which not managed by karmada", resource.GetKind(), resource.GetNamespace(), resource.GetName())
klog.V(4).Infof("Ignore resource(kind=%s, %s/%s) which is not managed by Karmada.", resource.GetKind(), resource.GetNamespace(), resource.GetName())
return "", nil
}
@ -196,16 +196,17 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}
workNamespace, nsExist := observedObj.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation]
workName, nameExist := observedObj.GetAnnotations()[workv1alpha2.WorkNameAnnotation]
observedAnnotations := observedObj.GetAnnotations()
workNamespace, nsExist := observedAnnotations[workv1alpha2.WorkNamespaceAnnotation]
workName, nameExist := observedAnnotations[workv1alpha2.WorkNameAnnotation]
if !nsExist || !nameExist {
klog.Infof("Ignore object(%s) which not managed by karmada.", fedKey.String())
klog.Infof("Ignore object(%s) which not managed by Karmada.", fedKey.String())
return nil
}
workObject := &workv1alpha1.Work{}
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: workNamespace, Name: workName}, workObject); err != nil {
// Stop processing if resource no longer exist.
// Stop processing if the resource no longer exists.
if apierrors.IsNotFound(err) {
return nil
}
@ -214,7 +215,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}
// stop update status if Work object in terminating state.
// stop updating status if Work object in terminating state.
if !workObject.DeletionTimestamp.IsZero() {
return nil
}
@ -230,6 +231,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}
util.MergeLabel(desiredObj, workv1alpha2.WorkPermanentIDLabel, util.GetLabelValue(workObject.Labels, workv1alpha2.WorkPermanentIDLabel))
// we should check if the observed status is consistent with the declaration to prevent accidental changes made
// in member clusters.
needUpdate, err := c.ObjectWatcher.NeedsUpdate(clusterName, desiredObj, observedObj)
@ -239,7 +241,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
if needUpdate {
if err := c.ObjectWatcher.Update(clusterName, desiredObj, observedObj); err != nil {
klog.Errorf("Update %s failed: %v", fedKey.String(), err)
klog.Errorf("Updating %s failed: %v", fedKey.String(), err)
return err
}
// We can't return even after a success updates, because that might lose the chance to collect status.
@ -251,7 +253,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
// status changes.
}
klog.Infof("reflecting %s(%s/%s) status to Work(%s/%s)", observedObj.GetKind(), observedObj.GetNamespace(), observedObj.GetName(), workNamespace, workName)
klog.Infof("Reflecting the resource(kind=%s, %s/%s) status to the Work(%s/%s).", observedObj.GetKind(), observedObj.GetNamespace(), observedObj.GetName(), workNamespace, workName)
return c.reflectStatus(workObject, observedObj)
}
@ -301,7 +303,7 @@ func (c *WorkStatusController) recreateResourceIfNeeded(work *workv1alpha1.Work,
if reflect.DeepEqual(desiredGVK, workloadKey.GroupVersionKind()) &&
manifest.GetNamespace() == workloadKey.Namespace &&
manifest.GetName() == workloadKey.Name {
klog.Infof("recreating %s", workloadKey.String())
klog.Infof("Recreating resource(%s).", workloadKey.String())
err := c.ObjectWatcher.Create(workloadKey.Cluster, manifest)
if err != nil {
c.eventf(manifest, corev1.EventTypeWarning, events.EventReasonSyncWorkloadFailed, "Failed to create or update resource(%s/%s) in member cluster(%s): %v", manifest.GetNamespace(), manifest.GetName(), workloadKey.Cluster, err)
@ -550,7 +552,7 @@ func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) e
func (c *WorkStatusController) eventf(object *unstructured.Unstructured, eventType, reason, messageFmt string, args ...interface{}) {
ref, err := helper.GenEventRef(object)
if err != nil {
klog.Errorf("ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
klog.Errorf("Ignore event(%s) as failing to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
return
}
c.EventRecorder.Eventf(ref, eventType, reason, messageFmt, args...)

View File

@ -408,7 +408,7 @@ func TestGenerateKey(t *testing.T) {
"metadata": map[string]interface{}{
"name": "test",
"namespace": "default",
"labels": map[string]interface{}{
"annotations": map[string]interface{}{
"test": "karmada-es-cluster",
},
},
@ -471,7 +471,7 @@ func TestGetClusterNameFromAnnotation(t *testing.T) {
"metadata": map[string]interface{}{
"name": "test",
"namespace": "default",
"labels": map[string]interface{}{
"annotations": map[string]interface{}{
"foo": "karmada-es-cluster",
},
},

View File

@ -41,7 +41,6 @@ import (
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/karmadactl/get"
@ -317,8 +316,7 @@ func (o *CommandPromoteOption) revertPromotedDeps(memberClusterFactory cmdutil.F
return
}
// remove the karmada label of dependency resource in member cluster to avoid deleting it when deleting the resource in control plane
u.RemoveLabels(depObj, workv1alpha1.WorkNamespaceLabel)
u.RemoveLabels(depObj, workv1alpha1.WorkNameLabel)
u.RemoveLabels(depObj, workv1alpha2.WorkPermanentIDLabel, u.ManagedByKarmadaLabel)
if len(depObj.GetNamespace()) != 0 {
// update the dependency resource in member cluster
_, err := memberDynamicClient.Resource(depGvr).Namespace(depObj.GetNamespace()).Update(context.Background(), depObj, metav1.UpdateOptions{})
@ -707,7 +705,7 @@ func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *kar
// preprocessResource delete redundant fields to convert resource as template
func preprocessResource(obj *unstructured.Unstructured) error {
// remove fields that generated by kube-apiserver and no need(or can't) propagate to member clusters.
if err := prune.RemoveIrrelevantField(obj); err != nil {
if err := prune.RemoveIrrelevantFields(obj); err != nil {
return err
}

View File

@ -38,9 +38,9 @@ var kindIrrelevantFieldPruners = map[string]irrelevantFieldPruneFunc{
util.ServiceKind: removeServiceIrrelevantField,
}
// RemoveIrrelevantField used to remove fields that generated by kube-apiserver and no need(or can't) propagate to
// RemoveIrrelevantFields used to remove fields that generated by kube-apiserver and no need(or can't) propagate to
// member clusters.
func RemoveIrrelevantField(workload *unstructured.Unstructured, extraHooks ...func(*unstructured.Unstructured)) error {
func RemoveIrrelevantFields(workload *unstructured.Unstructured, extraHooks ...func(*unstructured.Unstructured)) error {
// populated by the kubernetes.
unstructured.RemoveNestedField(workload.Object, "metadata", "creationTimestamp")

View File

@ -226,7 +226,7 @@ func TestRemoveIrrelevantField(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := RemoveIrrelevantField(tt.workload, tt.extraHooks...); err != nil {
if err := RemoveIrrelevantFields(tt.workload, tt.extraHooks...); err != nil {
t.Fatalf("RemoveIrrelevantField() expects no error but got: %v", err)
return
}

View File

@ -75,7 +75,7 @@ func NewObjectWatcher(kubeClientSet client.Client, restMapper meta.RESTMapper, c
func (o *objectWatcherImpl) Create(clusterName string, desireObj *unstructured.Unstructured) error {
dynamicClusterClient, err := o.ClusterClientSetFunc(clusterName, o.KubeClientSet)
if err != nil {
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", clusterName)
klog.Errorf("Failed to build dynamic cluster client for cluster %s, err: %v.", clusterName, err)
return err
}
@ -87,23 +87,23 @@ func (o *objectWatcherImpl) Create(clusterName string, desireObj *unstructured.U
fedKey, err := keys.FederatedKeyFunc(clusterName, desireObj)
if err != nil {
klog.Errorf("Failed to get FederatedKey %s, error: %v", desireObj.GetName(), err)
klog.Errorf("Failed to get FederatedKey %s, error: %v.", desireObj.GetName(), err)
return err
}
_, err = helper.GetObjectFromCache(o.RESTMapper, o.InformerManager, fedKey)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", desireObj.GetName(), err)
klog.Errorf("Failed to get resource(kind=%s, %s/%s) from member cluster, err is %v.", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), err)
return err
}
clusterObj, err := dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(desireObj.GetNamespace()).Create(context.TODO(), desireObj, metav1.CreateOptions{})
if err != nil {
klog.Errorf("Failed to create resource(kind=%s, %s/%s) in cluster %s, err is %v ", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
klog.Errorf("Failed to create resource(kind=%s, %s/%s) in cluster %s, err is %v.", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
return err
}
klog.Infof("Created resource(kind=%s, %s/%s) on cluster: %s", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
klog.Infof("Created the resource(kind=%s, %s/%s) on cluster(%s).", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
// record version
o.recordVersion(clusterObj, dynamicClusterClient.ClusterName)
}
@ -142,20 +142,19 @@ func (o *objectWatcherImpl) Update(clusterName string, desireObj, clusterObj *un
updateAllowed := o.allowUpdate(clusterName, desireObj, clusterObj)
if !updateAllowed {
// The existing resource is not managed by Karmada, and no conflict resolution found, avoid updating the existing resource by default.
return fmt.Errorf("resource(kind=%s, %s/%s) already exist in cluster %v and the %s strategy value is empty, karmada will not manage this resource",
desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, workv1alpha2.ResourceConflictResolutionAnnotation,
)
return fmt.Errorf("resource(kind=%s, %s/%s) already exists in the cluster %v and the %s strategy value is empty, Karmada will not manage this resource",
desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, workv1alpha2.ResourceConflictResolutionAnnotation)
}
dynamicClusterClient, err := o.ClusterClientSetFunc(clusterName, o.KubeClientSet)
if err != nil {
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", clusterName)
klog.Errorf("Failed to build dynamic cluster client for cluster %s, err: %v.", clusterName, err)
return err
}
gvr, err := restmapper.GetGroupVersionResource(o.RESTMapper, desireObj.GroupVersionKind())
if err != nil {
klog.Errorf("Failed to update resource(kind=%s, %s/%s) in cluster %s as mapping GVK to GVR failed: %v", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
klog.Errorf("Failed to update the resource(kind=%s, %s/%s) in the cluster %s as mapping GVK to GVR failed: %v", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
return err
}
@ -167,11 +166,11 @@ func (o *objectWatcherImpl) Update(clusterName string, desireObj, clusterObj *un
resource, err := dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(desireObj.GetNamespace()).Update(context.TODO(), desireObj, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Failed to update resource(kind=%s, %s/%s) in cluster %s, err is %v ", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
klog.Errorf("Failed to update resource(kind=%s, %s/%s) in cluster %s, err: %v.", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
return err
}
klog.Infof("Updated resource(kind=%s, %s/%s) on cluster: %s", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
klog.Infof("Updated the resource(kind=%s, %s/%s) on cluster(%s).", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
// record version
o.recordVersion(resource, clusterName)
@ -181,13 +180,13 @@ func (o *objectWatcherImpl) Update(clusterName string, desireObj, clusterObj *un
func (o *objectWatcherImpl) Delete(clusterName string, desireObj *unstructured.Unstructured) error {
dynamicClusterClient, err := o.ClusterClientSetFunc(clusterName, o.KubeClientSet)
if err != nil {
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", clusterName)
klog.Errorf("Failed to build dynamic cluster client for cluster %s, err: %v.", clusterName, err)
return err
}
gvr, err := restmapper.GetGroupVersionResource(o.RESTMapper, desireObj.GroupVersionKind())
if err != nil {
klog.Errorf("Failed to delete resource(kind=%s, %s/%s) in cluster %s as mapping GVK to GVR failed: %v", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
klog.Errorf("Failed to delete the resource(kind=%s, %s/%s) in the cluster %s as mapping GVK to GVR failed: %v", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
return err
}
@ -203,10 +202,10 @@ func (o *objectWatcherImpl) Delete(clusterName string, desireObj *unstructured.U
err = dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(desireObj.GetNamespace()).Delete(context.TODO(), desireObj.GetName(), deleteOption)
if err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("Failed to delete resource %v in cluster %s, err is %v ", desireObj.GetName(), clusterName, err)
klog.Errorf("Failed to delete the resource(kind=%s, %s/%s) in the cluster %s, err: %v.", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName, err)
return err
}
klog.Infof("Deleted resource(kind=%s, %s/%s) on cluster: %s", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
klog.Infof("Deleted the resource(kind=%s, %s/%s) on cluster(%s).", desireObj.GetKind(), desireObj.GetNamespace(), desireObj.GetName(), clusterName)
objectKey := o.genObjectKey(desireObj)
o.deleteVersionRecord(dynamicClusterClient.ClusterName, objectKey)
@ -273,28 +272,32 @@ func (o *objectWatcherImpl) NeedsUpdate(clusterName string, desiredObj, clusterO
// get resource version
version, exist := o.getVersionRecord(clusterName, desiredObj.GroupVersionKind().String()+"/"+desiredObj.GetNamespace()+"/"+desiredObj.GetName())
if !exist {
klog.Errorf("Failed to update resource(kind=%s, %s/%s) in cluster %s for the version record does not exist", desiredObj.GetKind(), desiredObj.GetNamespace(), desiredObj.GetName(), clusterName)
klog.Errorf("Failed to update the resource(kind=%s, %s/%s) in the cluster %s because the version record does not exist.", desiredObj.GetKind(), desiredObj.GetNamespace(), desiredObj.GetName(), clusterName)
return false, fmt.Errorf("failed to update resource(kind=%s, %s/%s) in cluster %s for the version record does not exist", desiredObj.GetKind(), desiredObj.GetNamespace(), desiredObj.GetName(), clusterName)
}
return lifted.ObjectNeedsUpdate(desiredObj, clusterObj, version), nil
}
func (o *objectWatcherImpl) isManagedResource(clusterObj *unstructured.Unstructured) bool {
return util.GetLabelValue(clusterObj.GetLabels(), util.ManagedByKarmadaLabel) == util.ManagedByKarmadaLabelValue
}
func (o *objectWatcherImpl) allowUpdate(clusterName string, desiredObj, clusterObj *unstructured.Unstructured) bool {
// If the existing resource is managed by Karmada, then the updating is allowed.
if util.GetLabelValue(clusterObj.GetLabels(), util.ManagedByKarmadaLabel) == util.ManagedByKarmadaLabelValue {
if o.isManagedResource(clusterObj) {
return true
}
klog.Warningf("The existing resource(kind=%s, %s/%s) in the cluster(%s) is not managed by Karmada.",
clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), clusterName)
// This happens when promoting workload to the Karmada control plane
// This happens when promoting workload to the Karmada control plane.
conflictResolution := util.GetAnnotationValue(desiredObj.GetAnnotations(), workv1alpha2.ResourceConflictResolutionAnnotation)
if conflictResolution == workv1alpha2.ResourceConflictResolutionOverwrite {
klog.Infof("Force overwriting of the resource(kind=%s, %s/%s) in the cluster(%s).",
desiredObj.GetKind(), desiredObj.GetNamespace(), desiredObj.GetName(), clusterName)
return true
}
// The existing resource is not managed by Karmada, and no conflict resolution found, avoid updating the existing resource by default.
klog.Warningf("resource(kind=%s, %s/%s) already exist in cluster %v and the %s strategy value is empty, karmada will not manage this resource",
desiredObj.GetKind(), desiredObj.GetNamespace(), desiredObj.GetName(), clusterName, workv1alpha2.ResourceConflictResolutionAnnotation,
)
return false
}

View File

@ -46,7 +46,7 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
klog.V(2).Infof("Mutating work(%s) for request: %s", work.Name, req.Operation)
klog.V(2).Infof("Mutating the work(%s/%s) for request: %s", work.Namespace, work.Name, req.Operation)
var manifests []workv1alpha1.Manifest
@ -54,19 +54,19 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm
workloadObj := &unstructured.Unstructured{}
err := json.Unmarshal(manifest.Raw, workloadObj)
if err != nil {
klog.Errorf("Failed to unmarshal work(%s) manifest to Unstructured", work.Name)
klog.Errorf("Failed to unmarshal the work(%s/%s) manifest to Unstructured, err: %v", work.Namespace, work.Name, err)
return admission.Errored(http.StatusInternalServerError, err)
}
err = prune.RemoveIrrelevantField(workloadObj, prune.RemoveJobTTLSeconds)
err = prune.RemoveIrrelevantFields(workloadObj, prune.RemoveJobTTLSeconds)
if err != nil {
klog.Errorf("Failed to remove irrelevant field for work(%s): %v", work.Name, err)
klog.Errorf("Failed to remove irrelevant fields for the work(%s/%s), err: %v", work.Namespace, work.Name, err)
return admission.Errored(http.StatusInternalServerError, err)
}
workloadJSON, err := workloadObj.MarshalJSON()
if err != nil {
klog.Errorf("Failed to marshal workload of work(%s)", work.Name)
klog.Errorf("Failed to marshal workload of the work(%s/%s), err: %s", work.Namespace, work.Name, err)
return admission.Errored(http.StatusInternalServerError, err)
}
manifests = append(manifests, workv1alpha1.Manifest{RawExtension: runtime.RawExtension{Raw: workloadJSON}})