Merge pull request #5706 from kisieland/skipped-reasons-abtract-max-resources

Add a dedicated struct for the MaxResourceLimitReached Reasons
This commit is contained in:
Kubernetes Prow Robot 2023-04-26 10:34:14 -07:00 committed by GitHub
commit 12b1dbd751
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 6 deletions

View File

@ -513,7 +513,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupReadyToScaleUp(nodeGroup cloudprovider.
}
// IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided.
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) *SkippedReasons {
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) status.Reasons {
resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup)
if err != nil {
klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err)
@ -533,7 +533,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource
continue
}
}
return MaxResourceLimitReached(checkResult.ExceededResources)
return NewMaxResourceLimitReached(checkResult.ExceededResources)
}
return nil
}

View File

@ -45,7 +45,26 @@ var (
NotReadyReason = NewSkippedReasons("not ready for scale-up")
)
// MaxResourceLimitReached returns a reason describing which cluster wide resource limits were reached.
func MaxResourceLimitReached(resources []string) *SkippedReasons {
return NewSkippedReasons(fmt.Sprintf("max cluster %s limit reached", strings.Join(resources, ", ")))
// MaxResourceLimitReached contains information why given node group was skipped.
type MaxResourceLimitReached struct {
messages []string
resources []string
}
// Reasons returns a slice of reasons why the node group was not considered for scale up.
func (sr *MaxResourceLimitReached) Reasons() []string {
return sr.messages
}
// Resources returns a slice of resources which were missing in the node group.
func (sr *MaxResourceLimitReached) Resources() []string {
return sr.resources
}
// NewMaxResourceLimitReached returns a reason describing which cluster wide resource limits were reached.
func NewMaxResourceLimitReached(resources []string) *MaxResourceLimitReached {
return &MaxResourceLimitReached{
messages: []string{fmt.Sprintf("max cluster %s limit reached", strings.Join(resources, ", "))},
resources: resources,
}
}

View File

@ -44,7 +44,7 @@ func TestMaxResourceLimitReached(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := MaxResourceLimitReached(tt.resources); !reflect.DeepEqual(got.Reasons(), tt.wantReasons) {
if got := NewMaxResourceLimitReached(tt.resources); !reflect.DeepEqual(got.Reasons(), tt.wantReasons) {
t.Errorf("MaxResourceLimitReached(%v) = %v, want %v", tt.resources, got.Reasons(), tt.wantReasons)
}
})