Cluster-Autoscaler: update scale down status
This commit is contained in:
parent
46d2c66473
commit
5d2c675c8e
|
|
@ -147,6 +147,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
|||
sd.unneededNodesList = make([]*apiv1.Node, 0)
|
||||
sd.unneededNodes = make(map[string]time.Time)
|
||||
sd.nodeUtilizationMap = make(map[string]float64)
|
||||
sd.context.ClusterStateRegistry.UpdateScaleDownCandidates(sd.unneededNodesList, timestamp)
|
||||
|
||||
return fmt.Errorf("error while simulating node drains: %v", err)
|
||||
}
|
||||
|
|
@ -168,6 +169,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
|||
sd.unneededNodes = result
|
||||
sd.podLocationHints = newHints
|
||||
sd.nodeUtilizationMap = utilizationMap
|
||||
sd.context.ClusterStateRegistry.UpdateScaleDownCandidates(sd.unneededNodesList, timestamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -73,12 +73,21 @@ func TestFindUnneededNodes(t *testing.T) {
|
|||
fakeClient := &fake.Clientset{}
|
||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
||||
|
||||
provider := testprovider.NewTestCloudProvider(nil, nil)
|
||||
provider.AddNodeGroup("ng1", 1, 10, 2)
|
||||
provider.AddNode("ng1", n1)
|
||||
provider.AddNode("ng1", n2)
|
||||
provider.AddNode("ng1", n3)
|
||||
provider.AddNode("ng1", n4)
|
||||
|
||||
context := AutoscalingContext{
|
||||
AutoscalingOptions: AutoscalingOptions{
|
||||
ScaleDownUtilizationThreshold: 0.35,
|
||||
},
|
||||
PredicateChecker: simulator.NewTestPredicateChecker(),
|
||||
LogRecorder: fakeLogRecorder,
|
||||
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}),
|
||||
PredicateChecker: simulator.NewTestPredicateChecker(),
|
||||
LogRecorder: fakeLogRecorder,
|
||||
}
|
||||
sd := NewScaleDown(&context)
|
||||
sd.UpdateUnneededNodes([]*apiv1.Node{n1, n2, n3, n4}, []*apiv1.Pod{p1, p2, p3, p4}, time.Now(), nil)
|
||||
|
|
|
|||
Loading…
Reference in New Issue