Merge pull request #5706 from kisieland/skipped-reasons-abtract-max-resources
Add a dedicated struct for the MaxResourceLimitReached Reasons
This commit is contained in:
commit
12b1dbd751
|
|
@ -513,7 +513,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupReadyToScaleUp(nodeGroup cloudprovider.
|
|||
}
|
||||
|
||||
// IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided.
|
||||
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) *SkippedReasons {
|
||||
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) status.Reasons {
|
||||
resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err)
|
||||
|
|
@ -533,7 +533,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource
|
|||
continue
|
||||
}
|
||||
}
|
||||
return MaxResourceLimitReached(checkResult.ExceededResources)
|
||||
return NewMaxResourceLimitReached(checkResult.ExceededResources)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,26 @@ var (
|
|||
NotReadyReason = NewSkippedReasons("not ready for scale-up")
|
||||
)
|
||||
|
||||
// MaxResourceLimitReached returns a reason describing which cluster wide resource limits were reached.
|
||||
func MaxResourceLimitReached(resources []string) *SkippedReasons {
|
||||
return NewSkippedReasons(fmt.Sprintf("max cluster %s limit reached", strings.Join(resources, ", ")))
|
||||
// MaxResourceLimitReached contains information why given node group was skipped.
|
||||
type MaxResourceLimitReached struct {
|
||||
messages []string
|
||||
resources []string
|
||||
}
|
||||
|
||||
// Reasons returns a slice of reasons why the node group was not considered for scale up.
|
||||
func (sr *MaxResourceLimitReached) Reasons() []string {
|
||||
return sr.messages
|
||||
}
|
||||
|
||||
// Resources returns a slice of resources which were missing in the node group.
|
||||
func (sr *MaxResourceLimitReached) Resources() []string {
|
||||
return sr.resources
|
||||
}
|
||||
|
||||
// NewMaxResourceLimitReached returns a reason describing which cluster wide resource limits were reached.
|
||||
func NewMaxResourceLimitReached(resources []string) *MaxResourceLimitReached {
|
||||
return &MaxResourceLimitReached{
|
||||
messages: []string{fmt.Sprintf("max cluster %s limit reached", strings.Join(resources, ", "))},
|
||||
resources: resources,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func TestMaxResourceLimitReached(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := MaxResourceLimitReached(tt.resources); !reflect.DeepEqual(got.Reasons(), tt.wantReasons) {
|
||||
if got := NewMaxResourceLimitReached(tt.resources); !reflect.DeepEqual(got.Reasons(), tt.wantReasons) {
|
||||
t.Errorf("MaxResourceLimitReached(%v) = %v, want %v", tt.resources, got.Reasons(), tt.wantReasons)
|
||||
}
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue