add scheduled condition for binding

Signed-off-by: dddddai <dddwq@foxmail.com>
This commit is contained in:
dddddai 2021-10-18 11:43:40 +08:00
parent a9250c5977
commit eaea5c608b
5 changed files with 72 additions and 16 deletions

View File

@ -135,6 +135,12 @@ type AggregatedStatusItem struct {
AppliedMessage string `json:"appliedMessage,omitempty"`
}
// Conditions definition
const (
// Scheduled represents the condition that the ResourceBinding or ClusterResourceBinding has been scheduled.
Scheduled string = "Scheduled"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceBindingList contains a list of ResourceBinding.

View File

@ -65,7 +65,7 @@ func (c *ResourceBindingController) Reconcile(ctx context.Context, req controlle
return c.removeFinalizer(binding)
}
isReady := helper.IsBindingReady(binding.Spec.Clusters)
isReady := helper.IsBindingReady(&binding.Status)
if !isReady {
klog.Infof("ResourceBinding(%s/%s) is not ready to sync", binding.GetNamespace(), binding.GetName())
return controllerruntime.Result{}, nil

View File

@ -65,7 +65,7 @@ func (c *ClusterResourceBindingController) Reconcile(ctx context.Context, req co
return c.removeFinalizer(clusterResourceBinding)
}
isReady := helper.IsBindingReady(clusterResourceBinding.Spec.Clusters)
isReady := helper.IsBindingReady(&clusterResourceBinding.Status)
if !isReady {
klog.Infof("ClusterResourceBinding %s is not ready to sync", clusterResourceBinding.GetName())
return controllerruntime.Result{}, nil

View File

@ -61,6 +61,12 @@ const (
Unknown ScheduleType = "Unknown"
)
const (
scheduleSuccessReason = "BindingScheduled"
scheduleSuccessMessage = "the binding has been scheduled"
)
// Failover indicates if the scheduler should performs re-scheduler in case of cluster failure.
// TODO(RainbowMango): Remove the temporary solution by introducing feature flag
var Failover bool
@ -472,11 +478,11 @@ func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha2.Resour
}
binding.Annotations[util.PolicyPlacementAnnotation] = placementStr
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
binding, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateBindingStatusIfNeeded(binding)
}
func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding, policy *policyv1alpha1.ClusterPropagationPolicy) (err error) {
@ -501,11 +507,11 @@ func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv
}
binding.Annotations[util.PolicyPlacementAnnotation] = string(placement)
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
binding, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateClusterBindingStatusIfNeeded(binding)
}
func (s *Scheduler) handleErr(err error, key interface{}) {
@ -674,11 +680,11 @@ func (s *Scheduler) rescheduleClusterResourceBinding(clusterResourceBinding *wor
clusterResourceBinding.Spec.Clusters = reScheduleResult.SuggestedClusters
klog.Infof("The final binding.Spec.Cluster values are: %v\n", clusterResourceBinding.Spec.Clusters)
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), clusterResourceBinding, metav1.UpdateOptions{})
clusterResourceBinding, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), clusterResourceBinding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateClusterBindingStatusIfNeeded(clusterResourceBinding)
}
func (s *Scheduler) rescheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding) error {
@ -699,11 +705,11 @@ func (s *Scheduler) rescheduleResourceBinding(resourceBinding *workv1alpha2.Reso
resourceBinding.Spec.Clusters = reScheduleResult.SuggestedClusters
klog.Infof("The final binding.Spec.Cluster values are: %v\n", resourceBinding.Spec.Clusters)
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(resourceBinding.Namespace).Update(context.TODO(), resourceBinding, metav1.UpdateOptions{})
resourceBinding, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(resourceBinding.Namespace).Update(context.TODO(), resourceBinding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateBindingStatusIfNeeded(resourceBinding)
}
func (s *Scheduler) scaleScheduleOne(key string) (err error) {
@ -761,11 +767,11 @@ func (s *Scheduler) scaleScheduleResourceBinding(resourceBinding *workv1alpha2.R
}
binding.Annotations[util.PolicyPlacementAnnotation] = placementStr
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
binding, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateBindingStatusIfNeeded(binding)
}
func (s *Scheduler) scaleScheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding,
@ -792,11 +798,11 @@ func (s *Scheduler) scaleScheduleClusterResourceBinding(clusterResourceBinding *
}
binding.Annotations[util.PolicyPlacementAnnotation] = string(placement)
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
binding, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return s.updateClusterBindingStatusIfNeeded(binding)
}
func (s *Scheduler) getTypeFromResourceBindings(ns, name string) ScheduleType {
@ -903,3 +909,47 @@ func (s *Scheduler) establishEstimatorConnections() {
}
}
}
// updateBindingStatusIfNeeded sets the scheduled condition of ResourceBinding to true if needed
func (s *Scheduler) updateBindingStatusIfNeeded(rb *workv1alpha2.ResourceBinding) error {
oldScheduledCondition := meta.FindStatusCondition(rb.Status.Conditions, workv1alpha2.Scheduled)
newScheduledCondition := metav1.Condition{
Type: workv1alpha2.Scheduled,
Status: metav1.ConditionTrue,
Reason: scheduleSuccessReason,
Message: scheduleSuccessMessage,
}
if equality.Semantic.DeepEqual(oldScheduledCondition, newScheduledCondition) {
return nil
}
meta.SetStatusCondition(&rb.Status.Conditions, newScheduledCondition)
_, err := s.KarmadaClient.WorkV1alpha2().ResourceBindings(rb.Namespace).UpdateStatus(context.TODO(), rb, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Failed to update ResourceBinding status(%s/%s): %v", rb.Namespace, rb.Name, err)
return err
}
return nil
}
// updateClusterBindingStatusIfNeeded sets the scheduled condition of ClusterResourceBinding to true if needed
func (s *Scheduler) updateClusterBindingStatusIfNeeded(crb *workv1alpha2.ClusterResourceBinding) error {
oldScheduledCondition := meta.FindStatusCondition(crb.Status.Conditions, workv1alpha2.Scheduled)
newScheduledCondition := metav1.Condition{
Type: workv1alpha2.Scheduled,
Status: metav1.ConditionTrue,
Reason: scheduleSuccessReason,
Message: scheduleSuccessMessage,
}
if equality.Semantic.DeepEqual(oldScheduledCondition, newScheduledCondition) {
return nil
}
meta.SetStatusCondition(&crb.Status.Conditions, newScheduledCondition)
_, err := s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().UpdateStatus(context.TODO(), crb, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Failed to update ClusterResourceBinding status(%s): %v", crb.Name, err)
return err
}
return nil
}

View File

@ -55,8 +55,8 @@ func SortClusterByWeight(m map[string]int64) ClusterWeightInfoList {
}
// IsBindingReady will check if resourceBinding/clusterResourceBinding is ready to build Work.
func IsBindingReady(targetClusters []workv1alpha2.TargetCluster) bool {
return len(targetClusters) != 0
func IsBindingReady(status *workv1alpha2.ResourceBindingStatus) bool {
return meta.IsStatusConditionTrue(status.Conditions, workv1alpha2.Scheduled)
}
// HasScheduledReplica checks if the scheduler has assigned replicas for each cluster.