Merge pull request #503 from qianjun1993/scheduler

scheduler with Duplicated ReplicaSchedulingStrategy
This commit is contained in:
karmada-bot 2021-07-08 10:00:25 +08:00 committed by GitHub
commit 6db6baf2ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 43 additions and 28 deletions

View File

@ -23,7 +23,7 @@ type ScheduleAlgorithm interface {
// ScheduleResult includes the clusters selected.
type ScheduleResult struct {
SuggestedClusters []string
SuggestedClusters []workv1alpha1.TargetCluster
}
type genericScheduler struct {
@ -68,7 +68,12 @@ func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alph
klog.V(4).Infof("feasible clusters scores: %v", clustersScore)
clusters := g.selectClusters(clustersScore, placement.SpreadConstraints, feasibleClusters)
result.SuggestedClusters = clusters
clustersWithReplicas, err := g.assignReplicas(clusters, placement.ReplicaScheduling, resource)
if err != nil {
return result, fmt.Errorf("failed to assignReplicas: %v", err)
}
result.SuggestedClusters = clustersWithReplicas
return result, nil
}
@ -117,19 +122,15 @@ func (g *genericScheduler) prioritizeClusters(
return result, nil
}
func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreList, spreadConstraints []policyv1alpha1.SpreadConstraint, clusters []*clusterv1alpha1.Cluster) []string {
func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreList, spreadConstraints []policyv1alpha1.SpreadConstraint, clusters []*clusterv1alpha1.Cluster) []*clusterv1alpha1.Cluster {
if len(spreadConstraints) != 0 {
return g.matchSpreadConstraints(clusters, spreadConstraints)
}
out := make([]string, len(clustersScore))
for i := range clustersScore {
out[i] = clustersScore[i].Name
}
return out
return clusters
}
func (g *genericScheduler) matchSpreadConstraints(clusters []*clusterv1alpha1.Cluster, spreadConstraints []policyv1alpha1.SpreadConstraint) []string {
func (g *genericScheduler) matchSpreadConstraints(clusters []*clusterv1alpha1.Cluster, spreadConstraints []policyv1alpha1.SpreadConstraint) []*clusterv1alpha1.Cluster {
state := util.NewSpreadGroup()
g.runSpreadConstraintsFilter(clusters, spreadConstraints, state)
return g.calSpreadResult(state)
@ -148,11 +149,11 @@ func (g *genericScheduler) runSpreadConstraintsFilter(clusters []*clusterv1alpha
func (g *genericScheduler) groupByFieldCluster(clusters []*clusterv1alpha1.Cluster, spreadConstraint policyv1alpha1.SpreadConstraint, spreadGroup *util.SpreadGroup) {
for _, cluster := range clusters {
clusterGroup := cluster.Name
spreadGroup.GroupRecord[spreadConstraint][clusterGroup] = append(spreadGroup.GroupRecord[spreadConstraint][clusterGroup], cluster.Name)
spreadGroup.GroupRecord[spreadConstraint][clusterGroup] = append(spreadGroup.GroupRecord[spreadConstraint][clusterGroup], cluster)
}
}
func (g *genericScheduler) calSpreadResult(spreadGroup *util.SpreadGroup) []string {
func (g *genericScheduler) calSpreadResult(spreadGroup *util.SpreadGroup) []*clusterv1alpha1.Cluster {
// TODO: now support single spread constraint
if len(spreadGroup.GroupRecord) > 1 {
return nil
@ -161,8 +162,8 @@ func (g *genericScheduler) calSpreadResult(spreadGroup *util.SpreadGroup) []stri
return g.chooseSpreadGroup(spreadGroup)
}
func (g *genericScheduler) chooseSpreadGroup(spreadGroup *util.SpreadGroup) []string {
var feasibleClusters []string
func (g *genericScheduler) chooseSpreadGroup(spreadGroup *util.SpreadGroup) []*clusterv1alpha1.Cluster {
var feasibleClusters []*clusterv1alpha1.Cluster
for spreadConstraint, clusterGroups := range spreadGroup.GroupRecord {
if spreadConstraint.SpreadByField == policyv1alpha1.SpreadByFieldCluster {
if len(clusterGroups) < spreadConstraint.MinGroups {
@ -190,3 +191,24 @@ func (g *genericScheduler) chooseSpreadGroup(spreadGroup *util.SpreadGroup) []st
}
return feasibleClusters
}
func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, replicaSchedulingStrategy *policyv1alpha1.ReplicaSchedulingStrategy, object *workv1alpha1.ObjectReference) ([]workv1alpha1.TargetCluster, error) {
if len(clusters) == 0 {
return nil, fmt.Errorf("no clusters available to schedule")
}
targetClusters := make([]workv1alpha1.TargetCluster, len(clusters))
if object.Replicas > 0 && replicaSchedulingStrategy != nil {
if replicaSchedulingStrategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDuplicated {
for i, cluster := range clusters {
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name, Replicas: object.Replicas}
}
return targetClusters, nil
}
}
for i, cluster := range clusters {
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name}
}
return targetClusters, nil
}

View File

@ -485,11 +485,7 @@ func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha1.Resour
klog.V(4).Infof("ResourceBinding %s/%s scheduled to clusters %v", resourceBinding.Namespace, resourceBinding.Name, scheduleResult.SuggestedClusters)
binding := resourceBinding.DeepCopy()
targetClusters := make([]workv1alpha1.TargetCluster, len(scheduleResult.SuggestedClusters))
for i, cluster := range scheduleResult.SuggestedClusters {
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster}
}
binding.Spec.Clusters = targetClusters
binding.Spec.Clusters = scheduleResult.SuggestedClusters
if binding.Annotations == nil {
binding.Annotations = make(map[string]string)
@ -512,11 +508,7 @@ func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv
klog.V(4).Infof("ClusterResourceBinding %s scheduled to clusters %v", clusterResourceBinding.Name, scheduleResult.SuggestedClusters)
binding := clusterResourceBinding.DeepCopy()
targetClusters := make([]workv1alpha1.TargetCluster, len(scheduleResult.SuggestedClusters))
for i, cluster := range scheduleResult.SuggestedClusters {
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster}
}
binding.Spec.Clusters = targetClusters
binding.Spec.Clusters = scheduleResult.SuggestedClusters
placement, err := json.Marshal(policy.Spec.Placement)
if err != nil {

View File

@ -3,27 +3,28 @@ package util
import (
"sync"
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
)
// SpreadGroup stores the cluster group info for given spread constraints
type SpreadGroup struct {
// The outer map's keys are SpreadConstraint. The values (inner map) of the outer map are maps with string
// keys and []string values. The inner map's key should specify the cluster group name.
GroupRecord map[v1alpha1.SpreadConstraint]map[string][]string
GroupRecord map[policyv1alpha1.SpreadConstraint]map[string][]*clusterv1alpha1.Cluster
sync.RWMutex
}
// NewSpreadGroup initializes a SpreadGroup
func NewSpreadGroup() *SpreadGroup {
return &SpreadGroup{
GroupRecord: make(map[v1alpha1.SpreadConstraint]map[string][]string),
GroupRecord: make(map[policyv1alpha1.SpreadConstraint]map[string][]*clusterv1alpha1.Cluster),
}
}
// InitialGroupRecord initials a spread state record
func (ss *SpreadGroup) InitialGroupRecord(constraint v1alpha1.SpreadConstraint) {
func (ss *SpreadGroup) InitialGroupRecord(constraint policyv1alpha1.SpreadConstraint) {
ss.Lock()
defer ss.Unlock()
ss.GroupRecord[constraint] = make(map[string][]string)
ss.GroupRecord[constraint] = make(map[string][]*clusterv1alpha1.Cluster)
}