Test balancing autoprovisioned node groups
This commit is contained in:
parent
b9719350c3
commit
3614d4ec33
|
|
@ -176,6 +176,23 @@ func (tcp *TestCloudProvider) NewNodeGroup(machineType string, labels map[string
|
|||
}, nil
|
||||
}
|
||||
|
||||
// NewNodeGroupWithId creates a new node group with custom ID suffix.
|
||||
func (tcp *TestCloudProvider) NewNodeGroupWithId(machineType string, labels map[string]string, systemLabels map[string]string,
|
||||
taints []apiv1.Taint, extraResources map[string]resource.Quantity, id string) (cloudprovider.NodeGroup, error) {
|
||||
return &TestNodeGroup{
|
||||
cloudProvider: tcp,
|
||||
id: "autoprovisioned-" + machineType + "-" + id,
|
||||
minSize: 0,
|
||||
maxSize: 1000,
|
||||
targetSize: 0,
|
||||
exist: false,
|
||||
autoprovisioned: true,
|
||||
machineType: machineType,
|
||||
labels: labels,
|
||||
taints: taints,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertNodeGroup adds already created node group to test cloud provider.
|
||||
func (tcp *TestCloudProvider) InsertNodeGroup(nodeGroup cloudprovider.NodeGroup) {
|
||||
tcp.Lock()
|
||||
|
|
@ -443,3 +460,8 @@ func (tng *TestNodeGroup) Labels() map[string]string {
|
|||
func (tng *TestNodeGroup) Taints() []apiv1.Taint {
|
||||
return tng.taints
|
||||
}
|
||||
|
||||
// MachineType returns machine type passed to the test node group when it was created.
|
||||
func (tng *TestNodeGroup) MachineType() string {
|
||||
return tng.machineType
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,10 +17,13 @@ limitations under the License.
|
|||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
testcloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
|
|
@ -167,15 +170,41 @@ func NewScaleTestAutoscalingContext(
|
|||
}
|
||||
|
||||
type mockAutoprovisioningNodeGroupManager struct {
|
||||
t *testing.T
|
||||
t *testing.T
|
||||
extraGroups int
|
||||
}
|
||||
|
||||
func (p *mockAutoprovisioningNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (nodegroups.CreateNodeGroupResult, errors.AutoscalerError) {
|
||||
newNodeGroup, err := nodeGroup.Create()
|
||||
assert.NoError(p.t, err)
|
||||
metrics.RegisterNodeGroupCreation()
|
||||
extraGroups := []cloudprovider.NodeGroup{}
|
||||
testGroup, ok := nodeGroup.(*testcloudprovider.TestNodeGroup)
|
||||
if !ok {
|
||||
return nodegroups.CreateNodeGroupResult{}, errors.ToAutoscalerError(errors.InternalError, fmt.Errorf("expected test node group, found %v", reflect.TypeOf(nodeGroup)))
|
||||
}
|
||||
testCloudProvider, ok := context.CloudProvider.(*testcloudprovider.TestCloudProvider)
|
||||
if !ok {
|
||||
return nodegroups.CreateNodeGroupResult{}, errors.ToAutoscalerError(errors.InternalError, fmt.Errorf("expected test CloudProvider, found %v", reflect.TypeOf(context.CloudProvider)))
|
||||
}
|
||||
for i := 0; i < p.extraGroups; i++ {
|
||||
extraNodeGroup, err := testCloudProvider.NewNodeGroupWithId(
|
||||
testGroup.MachineType(),
|
||||
testGroup.Labels(),
|
||||
map[string]string{},
|
||||
[]apiv1.Taint{},
|
||||
map[string]resource.Quantity{},
|
||||
fmt.Sprintf("%d", i+1),
|
||||
)
|
||||
assert.NoError(p.t, err)
|
||||
extraGroup, err := extraNodeGroup.Create()
|
||||
assert.NoError(p.t, err)
|
||||
metrics.RegisterNodeGroupCreation()
|
||||
extraGroups = append(extraGroups, extraGroup)
|
||||
}
|
||||
result := nodegroups.CreateNodeGroupResult{
|
||||
MainCreatedNodeGroup: newNodeGroup,
|
||||
MainCreatedNodeGroup: newNodeGroup,
|
||||
ExtraCreatedNodeGroups: extraGroups,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -851,7 +851,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
|
||||
processors := NewTestProcessors()
|
||||
processors.NodeGroupListProcessor = &mockAutoprovisioningNodeGroupListProcessor{t}
|
||||
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t}
|
||||
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t, 0}
|
||||
|
||||
nodes := []*apiv1.Node{}
|
||||
nodeInfos, _ := utils.GetNodeInfosForGroups(nodes, nil, provider, context.ListerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker, nil)
|
||||
|
|
@ -863,6 +863,65 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
assert.Equal(t, "autoprovisioned-T1-1", utils.GetStringFromChan(expandedGroups))
|
||||
}
|
||||
|
||||
func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
||||
createdGroups := make(chan string, 10)
|
||||
expandedGroups := make(chan string, 10)
|
||||
|
||||
p1 := BuildTestPod("p1", 80, 0)
|
||||
p2 := BuildTestPod("p2", 80, 0)
|
||||
p3 := BuildTestPod("p3", 80, 0)
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
t1 := BuildTestNode("t1", 100, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulernodeinfo.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(nodeGroup string, increase int) error {
|
||||
expandedGroups <- fmt.Sprintf("%s-%d", nodeGroup, increase)
|
||||
return nil
|
||||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulernodeinfo.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
BalanceSimilarNodeGroups: true,
|
||||
EstimatorName: estimator.BinpackingEstimatorName,
|
||||
MaxCoresTotal: 5000 * 64,
|
||||
MaxMemoryTotal: 5000 * 64 * 20,
|
||||
NodeAutoprovisioningEnabled: true,
|
||||
MaxAutoprovisionedNodeGroupCount: 10,
|
||||
}
|
||||
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
|
||||
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil)
|
||||
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
|
||||
processors := NewTestProcessors()
|
||||
processors.NodeGroupListProcessor = &mockAutoprovisioningNodeGroupListProcessor{t}
|
||||
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t, 2}
|
||||
|
||||
nodes := []*apiv1.Node{}
|
||||
nodeInfos, _ := utils.GetNodeInfosForGroups(nodes, nil, provider, context.ListerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker, nil)
|
||||
|
||||
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1, p2, p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, scaleUpStatus.WasSuccessful())
|
||||
assert.Equal(t, "autoprovisioned-T1", utils.GetStringFromChan(createdGroups))
|
||||
expandedGroupMap := map[string]bool{}
|
||||
for i := 0; i < 3; i++ {
|
||||
expandedGroupMap[utils.GetStringFromChan(expandedGroups)] = true
|
||||
}
|
||||
assert.True(t, expandedGroupMap["autoprovisioned-T1-1"])
|
||||
assert.True(t, expandedGroupMap["autoprovisioned-T1-1-1"])
|
||||
assert.True(t, expandedGroupMap["autoprovisioned-T1-2-1"])
|
||||
}
|
||||
|
||||
func TestCheckScaleUpDeltaWithinLimits(t *testing.T) {
|
||||
type testcase struct {
|
||||
limits scaleUpResourcesLimits
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
onScaleDownMock := &onScaleDownMock{}
|
||||
onNodeGroupCreateMock := &onNodeGroupCreateMock{}
|
||||
onNodeGroupDeleteMock := &onNodeGroupDeleteMock{}
|
||||
nodeGroupManager := &mockAutoprovisioningNodeGroupManager{t}
|
||||
nodeGroupManager := &mockAutoprovisioningNodeGroupManager{t, 0}
|
||||
nodeGroupListProcessor := &mockAutoprovisioningNodeGroupListProcessor{t}
|
||||
|
||||
n1 := BuildTestNode("n1", 100, 1000)
|
||||
|
|
|
|||
Loading…
Reference in New Issue