Merge pull request #471 from mrlihanbo/sync
adopt applying manifest to member cluster when resource with the same name and kind already exists
This commit is contained in:
commit
b9f6c0f7c2
|
@ -4,10 +4,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||||
|
@ -48,7 +49,7 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
|
||||||
work := &workv1alpha1.Work{}
|
work := &workv1alpha1.Work{}
|
||||||
if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil {
|
if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil {
|
||||||
// The resource may no longer exist, in which case we stop processing.
|
// The resource may no longer exist, in which case we stop processing.
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return controllerruntime.Result{}, nil
|
return controllerruntime.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,31 +155,48 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *workv1alpha
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
syncSucceedNum := 0
|
||||||
for _, manifest := range work.Spec.Workload.Manifests {
|
for _, manifest := range work.Spec.Workload.Manifests {
|
||||||
workload := &unstructured.Unstructured{}
|
workload := &unstructured.Unstructured{}
|
||||||
err := workload.UnmarshalJSON(manifest.Raw)
|
err := workload.UnmarshalJSON(manifest.Raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to unmarshal workload, error is: %v", err)
|
klog.Errorf("failed to unmarshal workload, error is: %v", err)
|
||||||
return err
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
applied := helper.IsResourceApplied(&work.Status)
|
applied := helper.IsResourceApplied(&work.Status)
|
||||||
if applied {
|
if applied {
|
||||||
err = c.tryUpdateWorkload(cluster, workload, clusterDynamicClient)
|
err = c.tryUpdateWorkload(cluster, workload, clusterDynamicClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", cluster.Name, err)
|
klog.Errorf("Failed to update resource(%v/%v) in the given member cluster %s, err is %v", workload.GetNamespace(), workload.GetName(), cluster.Name, err)
|
||||||
return err
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = c.tryCreateWorkload(cluster, workload)
|
err = c.tryCreateWorkload(cluster, workload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to create resource in the given member cluster %s, err is %v", cluster.Name, err)
|
klog.Errorf("Failed to create resource(%v/%v) in the given member cluster %s, err is %v", workload.GetNamespace(), workload.GetName(), cluster.Name, err)
|
||||||
return err
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
syncSucceedNum++
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.updateAppliedCondition(work)
|
if len(errs) > 0 {
|
||||||
|
total := len(work.Spec.Workload.Manifests)
|
||||||
|
message := fmt.Sprintf("Failed to apply all manifests (%v/%v): %v", syncSucceedNum, total, errors.NewAggregate(errs).Error())
|
||||||
|
err = c.updateAppliedCondition(work, metav1.ConditionFalse, "AppliedFailed", message)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
return errors.NewAggregate(errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.updateAppliedCondition(work, metav1.ConditionTrue, "AppliedSuccessful", "Manifest has been successfully applied")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||||
return err
|
return err
|
||||||
|
@ -197,7 +215,7 @@ func (c *Controller) tryUpdateWorkload(cluster *v1alpha1.Cluster, workload *unst
|
||||||
|
|
||||||
clusterObj, err := clusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), metav1.GetOptions{})
|
clusterObj, err := clusterDynamicClient.DynamicClientSet.Resource(dynamicResource).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
|
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -223,15 +241,16 @@ func (c *Controller) tryCreateWorkload(cluster *v1alpha1.Cluster, workload *unst
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateAppliedCondition update the Applied condition for the given Work
|
// updateAppliedCondition update the Applied condition for the given Work
|
||||||
func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work) error {
|
func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status metav1.ConditionStatus, reason, message string) error {
|
||||||
newWorkAppliedCondition := metav1.Condition{
|
newWorkAppliedCondition := metav1.Condition{
|
||||||
Type: workv1alpha1.WorkApplied,
|
Type: workv1alpha1.WorkApplied,
|
||||||
Status: metav1.ConditionTrue,
|
Status: status,
|
||||||
Reason: "AppliedSuccessful",
|
Reason: reason,
|
||||||
Message: "Manifest has been successfully applied",
|
Message: message,
|
||||||
LastTransitionTime: metav1.Now(),
|
LastTransitionTime: metav1.Now(),
|
||||||
}
|
}
|
||||||
work.Status.Conditions = append(work.Status.Conditions, newWorkAppliedCondition)
|
|
||||||
|
meta.SetStatusCondition(&work.Status.Conditions, newWorkAppliedCondition)
|
||||||
err := c.Client.Status().Update(context.TODO(), work)
|
err := c.Client.Status().Update(context.TODO(), work)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,11 +66,10 @@ func (o *objectWatcherImpl) Create(cluster *v1alpha1.Cluster, desireObj *unstruc
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Karmada will adopt creating resource due to an existing resource in member cluster, because we don't want to force update or delete the resource created by users.
|
||||||
|
// users should resolve the conflict in person.
|
||||||
clusterObj, err := dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(desireObj.GetNamespace()).Create(context.TODO(), desireObj, metav1.CreateOptions{})
|
clusterObj, err := dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(desireObj.GetNamespace()).Create(context.TODO(), desireObj, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsAlreadyExists(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
klog.Errorf("Failed to create resource %v, err is %v ", desireObj.GetName(), err)
|
klog.Errorf("Failed to create resource %v, err is %v ", desireObj.GetName(), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue