Merge pull request #579 from pigletfly/fix-rb-status
Retry conflict errors when aggregating binding status
This commit is contained in:
commit
2b615642ca
|
@ -105,6 +105,7 @@ func (c *ResourceBindingController) syncBinding(binding *workv1alpha1.ResourceBi
|
|||
binding.GetNamespace(), binding.GetName(), err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
klog.V(4).Infof("Update resourceBinding(%s/%s) with AggregatedStatus successfully.", binding.Namespace, binding.Name)
|
||||
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
|
|
@ -99,6 +99,7 @@ func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha1.Clu
|
|||
klog.Errorf("Failed to aggregate workStatuses to clusterResourceBinding(%s). Error: %v.", binding.GetName(), err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
klog.V(4).Infof("Update clusterResourceBinding(%s) with AggregatedStatus successfully.", binding.Name)
|
||||
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
|
@ -33,16 +34,13 @@ func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha1.R
|
|||
binding.Namespace, binding.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
err = c.Status().Update(context.TODO(), binding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed update resourceBinding(%s/%s). Error: %v.", binding.Namespace, binding.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("Update resourceBinding(%s/%s) with AggregatedStatus successfully.", binding.Namespace, binding.Name)
|
||||
|
||||
return nil
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: binding.Namespace, Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
||||
// AggregateClusterResourceBindingWorkStatus will collect all work statuses with current ClusterResourceBinding objects,
|
||||
|
@ -60,15 +58,13 @@ func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1a
|
|||
return nil
|
||||
}
|
||||
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
err = c.Status().Update(context.TODO(), binding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed update clusterResourceBinding(%s). Error: %v.", binding.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("Update clusterResourceBinding(%s) with AggregatedStatus successfully.", binding.Name)
|
||||
|
||||
return nil
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if err = c.Get(context.TODO(), client.ObjectKey{Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
||||
// assemble workStatuses from workList which list by selector and match with workload.
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- caesarxuchao
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package retry
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// DefaultRetry is the recommended retry for a conflict where multiple clients
|
||||
// are making changes to the same resource.
|
||||
var DefaultRetry = wait.Backoff{
|
||||
Steps: 5,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Factor: 1.0,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// DefaultBackoff is the recommended backoff for a conflict where a client
|
||||
// may be attempting to make an unrelated modification to a resource under
|
||||
// active management by one or more controllers.
|
||||
var DefaultBackoff = wait.Backoff{
|
||||
Steps: 4,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Factor: 5.0,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// OnError allows the caller to retry fn in case the error returned by fn is retriable
|
||||
// according to the provided function. backoff defines the maximum retries and the wait
|
||||
// interval between two retries.
|
||||
func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error {
|
||||
var lastErr error
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
err := fn()
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case retriable(err):
|
||||
lastErr = err
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = lastErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryOnConflict is used to make an update to a resource when you have to worry about
|
||||
// conflicts caused by other code making unrelated updates to the resource at the same
|
||||
// time. fn should fetch the resource to be modified, make appropriate changes to it, try
|
||||
// to update it, and return (unmodified) the error from the update function. On a
|
||||
// successful update, RetryOnConflict will return nil. If the update function returns a
|
||||
// "Conflict" error, RetryOnConflict will wait some amount of time as described by
|
||||
// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times
|
||||
// and gives up, RetryOnConflict will return an error to the caller.
|
||||
//
|
||||
// err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// // Fetch the resource here; you need to refetch it on every try, since
|
||||
// // if you got a conflict on the last update attempt then you need to get
|
||||
// // the current version before making your own changes.
|
||||
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
|
||||
// if err ! nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Make whatever updates to the resource are needed
|
||||
// pod.Status.Phase = v1.PodFailed
|
||||
//
|
||||
// // Try to update
|
||||
// _, err = c.Pods("mynamespace").UpdateStatus(pod)
|
||||
// // You have to return err itself here (not wrapped inside another error)
|
||||
// // so that RetryOnConflict can identify it correctly.
|
||||
// return err
|
||||
// })
|
||||
// if err != nil {
|
||||
// // May be conflict if max retries were hit, or may be something unrelated
|
||||
// // like permissions or a network error
|
||||
// return err
|
||||
// }
|
||||
// ...
|
||||
//
|
||||
// TODO: Make Backoff an interface?
|
||||
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
|
||||
return OnError(backoff, errors.IsConflict, fn)
|
||||
}
|
|
@ -816,6 +816,7 @@ k8s.io/client-go/util/connrotation
|
|||
k8s.io/client-go/util/flowcontrol
|
||||
k8s.io/client-go/util/homedir
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/code-generator v0.21.3 => k8s.io/code-generator v0.21.3
|
||||
## explicit
|
||||
|
|
Loading…
Reference in New Issue