update FullyApplied condition
Signed-off-by: lihanbo <lihanbo2@huawei.com>
This commit is contained in:
parent
763a50ef3c
commit
c07b6ce2dd
|
@ -8,12 +8,10 @@ import (
|
|||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -144,17 +142,6 @@ func (c *ResourceBindingController) syncBinding(binding *workv1alpha2.ResourceBi
|
|||
return controllerruntime.Result{Requeue: true}, errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
fullyAppliedCondition := meta.FindStatusCondition(binding.Status.Conditions, workv1alpha2.FullyApplied)
|
||||
if fullyAppliedCondition == nil {
|
||||
if len(binding.Spec.Clusters) == len(binding.Status.AggregatedStatus) && areWorksFullyApplied(binding.Status.AggregatedStatus) {
|
||||
err := c.updateFullyAppliedCondition(binding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update FullyApplied status for given resourceBinding(%s/%s). Error: %v.", binding.Namespace, binding.Name, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -252,21 +239,3 @@ func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.Map
|
|||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
// updateFullyAppliedCondition update the FullyApplied condition for the given ResourceBinding
|
||||
func (c *ResourceBindingController) updateFullyAppliedCondition(binding *workv1alpha2.ResourceBinding) error {
|
||||
newBindingFullyAppliedCondition := metav1.Condition{
|
||||
Type: workv1alpha2.FullyApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: FullyAppliedSuccessReason,
|
||||
Message: FullyAppliedSuccessMessage,
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: binding.Namespace, Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
meta.SetStatusCondition(&binding.Status.Conditions, newBindingFullyAppliedCondition)
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -8,12 +8,10 @@ import (
|
|||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -139,17 +137,6 @@ func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha2.Clu
|
|||
return controllerruntime.Result{Requeue: true}, errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
fullyAppliedCondition := meta.FindStatusCondition(binding.Status.Conditions, workv1alpha2.FullyApplied)
|
||||
if fullyAppliedCondition == nil {
|
||||
if len(binding.Spec.Clusters) == len(binding.Status.AggregatedStatus) && areWorksFullyApplied(binding.Status.AggregatedStatus) {
|
||||
err := c.updateFullyAppliedCondition(binding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update FullyApplied status for given clusterResourceBinding(%s), Error: %v", binding.Name, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -245,21 +232,3 @@ func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() hand
|
|||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
// updateFullyAppliedCondition update the FullyApplied condition for the given ClusterResourceBinding
|
||||
func (c *ClusterResourceBindingController) updateFullyAppliedCondition(binding *workv1alpha2.ClusterResourceBinding) error {
|
||||
newBindingFullyAppliedCondition := metav1.Condition{
|
||||
Type: workv1alpha2.FullyApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: FullyAppliedSuccessReason,
|
||||
Message: FullyAppliedSuccessMessage,
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: binding.Namespace, Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
meta.SetStatusCondition(&binding.Status.Conditions, newBindingFullyAppliedCondition)
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -24,13 +24,6 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util/overridemanager"
|
||||
)
|
||||
|
||||
const (
|
||||
// FullyAppliedSuccessReason defines the name for the FullyAppliedSuccess condition.
|
||||
FullyAppliedSuccessReason = "FullyAppliedSuccess"
|
||||
// FullyAppliedSuccessMessage defines the message for the FullyAppliedSuccess condition.
|
||||
FullyAppliedSuccessMessage = "All works have been successfully applied"
|
||||
)
|
||||
|
||||
var workPredicateFn = builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return false
|
||||
|
@ -364,13 +357,3 @@ func calculateReplicas(c client.Client, policy *policyv1alpha1.ReplicaScheduling
|
|||
|
||||
return desireReplicaInfos, nil
|
||||
}
|
||||
|
||||
// areWorksFullyApplied checks if all works are applied for a Binding
|
||||
func areWorksFullyApplied(aggregatedStatuses []workv1alpha2.AggregatedStatusItem) bool {
|
||||
for _, aggregatedSatusItem := range aggregatedStatuses {
|
||||
if !aggregatedSatusItem.Applied {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -16,14 +16,24 @@ import (
|
|||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
const (
|
||||
// FullyAppliedSuccessReason defines the success reason for the FullyApplied condition.
|
||||
FullyAppliedSuccessReason = "FullyAppliedSuccess"
|
||||
// FullyAppliedFailedReason defines the failure reason for the FullyApplied condition.
|
||||
FullyAppliedFailedReason = "FullyAppliedFailed"
|
||||
// FullyAppliedSuccessMessage defines the success message for the FullyApplied condition.
|
||||
FullyAppliedSuccessMessage = "All works have been successfully applied"
|
||||
// FullyAppliedFailedMessage defines the failure message for the FullyApplied condition.
|
||||
FullyAppliedFailedMessage = "Failed to apply all works, see status.aggregatedStatus for details"
|
||||
)
|
||||
|
||||
// AggregateResourceBindingWorkStatus will collect all work statuses with current ResourceBinding objects,
|
||||
// then aggregate status info to current ResourceBinding status.
|
||||
func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.ResourceBinding, workload *unstructured.Unstructured) error {
|
||||
var targetWorks []workv1alpha1.Work
|
||||
|
||||
workList, err := GetWorksByLabelSelector(c, labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
workv1alpha2.ResourceBindingReferenceKey: names.GenerateBindingReferenceKey(binding.Namespace, binding.Name),
|
||||
|
@ -33,23 +43,27 @@ func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.R
|
|||
klog.Errorf("Failed to get works by ResourceBinding(%s/%s): %v", binding.Namespace, binding.Name, err)
|
||||
return err
|
||||
}
|
||||
targetWorks = append(targetWorks, workList.Items...)
|
||||
|
||||
aggregatedStatuses, err := assembleWorkStatus(targetWorks, workload)
|
||||
aggregatedStatuses, err := assembleWorkStatus(workList.Items, workload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(binding.Status.AggregatedStatus, aggregatedStatuses) {
|
||||
currentBindingStatus := binding.Status.DeepCopy()
|
||||
currentBindingStatus.AggregatedStatus = aggregatedStatuses
|
||||
meta.SetStatusCondition(¤tBindingStatus.Conditions, generateFullyAppliedCondition(binding.Spec.Clusters, aggregatedStatuses))
|
||||
|
||||
if reflect.DeepEqual(binding.Status, currentBindingStatus) {
|
||||
klog.V(4).Infof("New aggregatedStatuses are equal with old resourceBinding(%s/%s) AggregatedStatus, no update required.",
|
||||
binding.Namespace, binding.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: binding.Namespace, Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
binding.Status = *currentBindingStatus
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
@ -57,8 +71,6 @@ func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.R
|
|||
// AggregateClusterResourceBindingWorkStatus will collect all work statuses with current ClusterResourceBinding objects,
|
||||
// then aggregate status info to current ClusterResourceBinding status.
|
||||
func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.ClusterResourceBinding, workload *unstructured.Unstructured) error {
|
||||
var targetWorks []workv1alpha1.Work
|
||||
|
||||
workList, err := GetWorksByLabelSelector(c, labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
workv1alpha2.ClusterResourceBindingReferenceKey: names.GenerateBindingReferenceKey("", binding.Name),
|
||||
|
@ -68,14 +80,17 @@ func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1a
|
|||
klog.Errorf("Failed to get works by ClusterResourceBinding(%s): %v", binding.Name, err)
|
||||
return err
|
||||
}
|
||||
targetWorks = append(targetWorks, workList.Items...)
|
||||
|
||||
aggregatedStatuses, err := assembleWorkStatus(targetWorks, workload)
|
||||
aggregatedStatuses, err := assembleWorkStatus(workList.Items, workload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(binding.Status.AggregatedStatus, aggregatedStatuses) {
|
||||
currentBindingStatus := binding.Status.DeepCopy()
|
||||
currentBindingStatus.AggregatedStatus = aggregatedStatuses
|
||||
meta.SetStatusCondition(¤tBindingStatus.Conditions, generateFullyAppliedCondition(binding.Spec.Clusters, aggregatedStatuses))
|
||||
|
||||
if reflect.DeepEqual(binding.Status, currentBindingStatus) {
|
||||
klog.Infof("New aggregatedStatuses are equal with old clusterResourceBinding(%s) AggregatedStatus, no update required.", binding.Name)
|
||||
return nil
|
||||
}
|
||||
|
@ -84,15 +99,26 @@ func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1a
|
|||
if err = c.Get(context.TODO(), client.ObjectKey{Name: binding.Name}, binding); err != nil {
|
||||
return err
|
||||
}
|
||||
binding.Status.AggregatedStatus = aggregatedStatuses
|
||||
binding.Status = *currentBindingStatus
|
||||
return c.Status().Update(context.TODO(), binding)
|
||||
})
|
||||
}
|
||||
|
||||
func generateFullyAppliedCondition(targetClusters []workv1alpha2.TargetCluster, aggregatedStatuses []workv1alpha2.AggregatedStatusItem) metav1.Condition {
|
||||
if len(targetClusters) == len(aggregatedStatuses) && areWorksFullyApplied(aggregatedStatuses) {
|
||||
return util.NewCondition(workv1alpha2.FullyApplied, FullyAppliedSuccessReason, FullyAppliedSuccessMessage, metav1.ConditionTrue)
|
||||
}
|
||||
return util.NewCondition(workv1alpha2.FullyApplied, FullyAppliedFailedReason, FullyAppliedFailedMessage, metav1.ConditionFalse)
|
||||
}
|
||||
|
||||
// assemble workStatuses from workList which list by selector and match with workload.
|
||||
func assembleWorkStatus(works []workv1alpha1.Work, workload *unstructured.Unstructured) ([]workv1alpha2.AggregatedStatusItem, error) {
|
||||
statuses := make([]workv1alpha2.AggregatedStatusItem, 0)
|
||||
for _, work := range works {
|
||||
if !work.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
identifierIndex, err := GetManifestIndex(work.Spec.Workload.Manifests, workload)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get manifestIndex of workload in work.Spec.Workload.Manifests. Error: %v.", err)
|
||||
|
@ -127,7 +153,7 @@ func assembleWorkStatus(works []workv1alpha1.Work, workload *unstructured.Unstru
|
|||
AppliedMessage: appliedMsg,
|
||||
}
|
||||
statuses = append(statuses, aggregatedStatus)
|
||||
return statuses, nil
|
||||
continue
|
||||
}
|
||||
|
||||
for _, manifestStatus := range work.Status.ManifestStatuses {
|
||||
|
@ -187,6 +213,16 @@ func equalIdentifier(targetIdentifier *workv1alpha1.ResourceIdentifier, ordinal
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// areWorksFullyApplied checks if all works are applied for a Binding
|
||||
func areWorksFullyApplied(aggregatedStatuses []workv1alpha2.AggregatedStatusItem) bool {
|
||||
for _, aggregatedSatusItem := range aggregatedStatuses {
|
||||
if !aggregatedSatusItem.Applied {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsResourceApplied checks whether resource has been dispatched to member cluster or not
|
||||
func IsResourceApplied(workStatus *workv1alpha1.WorkStatus) bool {
|
||||
return meta.IsStatusConditionTrue(workStatus.Conditions, workv1alpha1.WorkApplied)
|
||||
|
|
Loading…
Reference in New Issue