CA: refactor utils related to NodeInfos
simulator.BuildNodeInfoForNode, core_utils.GetNodeInfoFromTemplate, and scheduler_utils.DeepCopyTemplateNode all had very similar logic for sanitizing and copying NodeInfos. They're all consolidated to one file in simulator, sharing common logic. DeepCopyNodeInfo is changed to be a framework.NodeInfo method. MixedTemplateNodeInfoProvider now correctly uses ClusterSnapshot to correlate Nodes to scheduled pods, instead of using a live Pod lister. This means that the snapshot now has to be properly initialized in a bunch of tests.
This commit is contained in:
parent
bc16a6f55b
commit
eb26816ce9
|
@ -25,10 +25,10 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
|
@ -110,7 +110,7 @@ func (s *AsyncNodeGroupInitializer) InitializeNodeGroup(result nodegroups.AsyncN
|
||||||
mainCreatedNodeGroup := result.CreationResult.MainCreatedNodeGroup
|
mainCreatedNodeGroup := result.CreationResult.MainCreatedNodeGroup
|
||||||
// If possible replace candidate node-info with node info based on crated node group. The latter
|
// If possible replace candidate node-info with node info based on crated node group. The latter
|
||||||
// one should be more in line with nodes which will be created by node group.
|
// one should be more in line with nodes which will be created by node group.
|
||||||
nodeInfo, aErr := utils.GetNodeInfoFromTemplate(mainCreatedNodeGroup, s.daemonSets, s.taintConfig)
|
nodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(mainCreatedNodeGroup, s.daemonSets, s.taintConfig)
|
||||||
if aErr != nil {
|
if aErr != nil {
|
||||||
klog.Warningf("Cannot build node info for newly created main node group %s. Using fallback. Error: %v", mainCreatedNodeGroup.Id(), aErr)
|
klog.Warningf("Cannot build node info for newly created main node group %s. Using fallback. Error: %v", mainCreatedNodeGroup.Id(), aErr)
|
||||||
nodeInfo = s.nodeInfo
|
nodeInfo = s.nodeInfo
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
|
@ -35,6 +34,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||||
|
@ -527,7 +527,7 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
||||||
|
|
||||||
// If possible replace candidate node-info with node info based on crated node group. The latter
|
// If possible replace candidate node-info with node info based on crated node group. The latter
|
||||||
// one should be more in line with nodes which will be created by node group.
|
// one should be more in line with nodes which will be created by node group.
|
||||||
mainCreatedNodeInfo, aErr := utils.GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, o.taintConfig)
|
mainCreatedNodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, o.taintConfig)
|
||||||
if aErr == nil {
|
if aErr == nil {
|
||||||
nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo
|
nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo
|
||||||
schedulablePodGroups[createNodeGroupResult.MainCreatedNodeGroup.Id()] = o.SchedulablePodGroups(podEquivalenceGroups, createNodeGroupResult.MainCreatedNodeGroup, mainCreatedNodeInfo)
|
schedulablePodGroups[createNodeGroupResult.MainCreatedNodeGroup.Id()] = o.SchedulablePodGroups(podEquivalenceGroups, createNodeGroupResult.MainCreatedNodeGroup, mainCreatedNodeInfo)
|
||||||
|
@ -542,7 +542,7 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
||||||
delete(schedulablePodGroups, oldId)
|
delete(schedulablePodGroups, oldId)
|
||||||
}
|
}
|
||||||
for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups {
|
for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups {
|
||||||
nodeInfo, aErr := utils.GetNodeInfoFromTemplate(nodeGroup, daemonSets, o.taintConfig)
|
nodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(nodeGroup, daemonSets, o.taintConfig)
|
||||||
if aErr != nil {
|
if aErr != nil {
|
||||||
klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), aErr)
|
klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), aErr)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -1049,6 +1049,8 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
|
||||||
// build orchestrator
|
// build orchestrator
|
||||||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods))
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
|
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
|
||||||
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -1130,13 +1132,15 @@ func TestScaleUpUnhealthy(t *testing.T) {
|
||||||
SetNodeReadyState(n1, true, someTimeAgo)
|
SetNodeReadyState(n1, true, someTimeAgo)
|
||||||
n2 := BuildTestNode("n2", 1000, 1000)
|
n2 := BuildTestNode("n2", 1000, 1000)
|
||||||
SetNodeReadyState(n2, true, someTimeAgo)
|
SetNodeReadyState(n2, true, someTimeAgo)
|
||||||
|
nodes := []*apiv1.Node{n1, n2}
|
||||||
|
|
||||||
p1 := BuildTestPod("p1", 80, 0)
|
p1 := BuildTestPod("p1", 80, 0)
|
||||||
p2 := BuildTestPod("p2", 800, 0)
|
p2 := BuildTestPod("p2", 800, 0)
|
||||||
p1.Spec.NodeName = "n1"
|
p1.Spec.NodeName = "n1"
|
||||||
p2.Spec.NodeName = "n2"
|
p2.Spec.NodeName = "n2"
|
||||||
|
pods := []*apiv1.Pod{p1, p2}
|
||||||
|
|
||||||
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{p1, p2})
|
podLister := kube_util.NewTestPodLister(pods)
|
||||||
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
|
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error {
|
provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error {
|
||||||
|
@ -1155,8 +1159,8 @@ func TestScaleUpUnhealthy(t *testing.T) {
|
||||||
}
|
}
|
||||||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, pods)
|
||||||
nodes := []*apiv1.Node{n1, n2}
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
||||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||||
|
@ -1198,7 +1202,8 @@ func TestBinpackingLimiter(t *testing.T) {
|
||||||
|
|
||||||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
|
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
|
||||||
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -1233,11 +1238,13 @@ func TestScaleUpNoHelp(t *testing.T) {
|
||||||
n1 := BuildTestNode("n1", 100, 1000)
|
n1 := BuildTestNode("n1", 100, 1000)
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
SetNodeReadyState(n1, true, now.Add(-2*time.Minute))
|
SetNodeReadyState(n1, true, now.Add(-2*time.Minute))
|
||||||
|
nodes := []*apiv1.Node{n1}
|
||||||
|
|
||||||
p1 := BuildTestPod("p1", 80, 0)
|
p1 := BuildTestPod("p1", 80, 0)
|
||||||
p1.Spec.NodeName = "n1"
|
p1.Spec.NodeName = "n1"
|
||||||
|
pods := []*apiv1.Pod{p1}
|
||||||
|
|
||||||
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{p1})
|
podLister := kube_util.NewTestPodLister(pods)
|
||||||
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
|
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error {
|
provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error {
|
||||||
|
@ -1255,8 +1262,8 @@ func TestScaleUpNoHelp(t *testing.T) {
|
||||||
}
|
}
|
||||||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, pods)
|
||||||
nodes := []*apiv1.Node{n1}
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
||||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||||
|
@ -1410,7 +1417,8 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
|
||||||
listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
|
listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
|
||||||
ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
|
ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = ctx.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
||||||
assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now()))
|
assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now()))
|
||||||
|
@ -1474,7 +1482,8 @@ func TestScaleUpBalanceGroups(t *testing.T) {
|
||||||
}
|
}
|
||||||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, podList)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
||||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||||
|
@ -1650,6 +1659,8 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
nodes := []*apiv1.Node{n1, n2}
|
nodes := []*apiv1.Node{n1, n2}
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
||||||
processors := processorstest.NewTestProcessors(&context)
|
processors := processorstest.NewTestProcessors(&context)
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
|
@ -73,6 +74,8 @@ func TestDeltaForNode(t *testing.T) {
|
||||||
|
|
||||||
ng := testCase.nodeGroupConfig
|
ng := testCase.nodeGroupConfig
|
||||||
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
||||||
|
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
||||||
|
|
||||||
rm := NewManager(processors.CustomResourcesProcessor)
|
rm := NewManager(processors.CustomResourcesProcessor)
|
||||||
|
@ -114,6 +117,8 @@ func TestResourcesLeft(t *testing.T) {
|
||||||
|
|
||||||
ng := testCase.nodeGroupConfig
|
ng := testCase.nodeGroupConfig
|
||||||
_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
||||||
|
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
||||||
|
|
||||||
rm := NewManager(processors.CustomResourcesProcessor)
|
rm := NewManager(processors.CustomResourcesProcessor)
|
||||||
|
@ -165,6 +170,8 @@ func TestApplyLimits(t *testing.T) {
|
||||||
|
|
||||||
ng := testCase.nodeGroupConfig
|
ng := testCase.nodeGroupConfig
|
||||||
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
|
||||||
|
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
||||||
|
|
||||||
rm := NewManager(processors.CustomResourcesProcessor)
|
rm := NewManager(processors.CustomResourcesProcessor)
|
||||||
|
@ -230,6 +237,8 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
nodes := []*corev1.Node{n1}
|
nodes := []*corev1.Node{n1}
|
||||||
|
err = context.ClusterSnapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
|
||||||
|
|
||||||
rm := NewManager(processors.CustomResourcesProcessor)
|
rm := NewManager(processors.CustomResourcesProcessor)
|
||||||
|
|
|
@ -52,7 +52,6 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||||
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
scheduler_utils "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
"k8s.io/utils/integer"
|
"k8s.io/utils/integer"
|
||||||
|
|
||||||
|
@ -1028,7 +1027,7 @@ func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*f
|
||||||
// Ensure new nodes have different names because nodeName
|
// Ensure new nodes have different names because nodeName
|
||||||
// will be used as a map key. Also deep copy pods (daemonsets &
|
// will be used as a map key. Also deep copy pods (daemonsets &
|
||||||
// any pods added by cloud provider on template).
|
// any pods added by cloud provider on template).
|
||||||
nodes = append(nodes, scheduler_utils.DeepCopyTemplateNode(nodeTemplate, fmt.Sprintf("upcoming-%d", i)))
|
nodes = append(nodes, simulator.NodeInfoSanitizedDeepCopy(nodeTemplate, fmt.Sprintf("upcoming-%d", i)))
|
||||||
}
|
}
|
||||||
upcomingNodes[nodeGroup] = nodes
|
upcomingNodes[nodeGroup] = nodes
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
kube_record "k8s.io/client-go/tools/record"
|
kube_record "k8s.io/client-go/tools/record"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
@ -78,7 +79,6 @@ import (
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
klog "k8s.io/klog/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type podListerMock struct {
|
type podListerMock struct {
|
||||||
|
@ -406,7 +406,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// MaxNodesTotal reached.
|
// MaxNodesTotal reached.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -417,7 +417,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Scale up.
|
// Scale up.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once()
|
onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once()
|
||||||
|
@ -431,7 +431,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Mark unneeded nodes.
|
// Mark unneeded nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Scale down.
|
// Scale down.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3)
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
onScaleDownMock.On("ScaleDown", "ng1", "n2").Return(nil).Once()
|
onScaleDownMock.On("ScaleDown", "ng1", "n2").Return(nil).Once()
|
||||||
|
@ -460,7 +460,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Mark unregistered nodes.
|
// Mark unregistered nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -475,7 +475,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Remove unregistered nodes.
|
// Remove unregistered nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
onScaleDownMock.On("ScaleDown", "ng2", "n3").Return(nil).Once()
|
onScaleDownMock.On("ScaleDown", "ng2", "n3").Return(nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
@ -489,7 +489,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
// Scale up to node group min size.
|
// Scale up to node group min size.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n4})
|
readyNodeLister.SetNodes([]*apiv1.Node{n4})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n4})
|
allNodeLister.SetNodes([]*apiv1.Node{n4})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil)
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil)
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)
|
||||||
onScaleUpMock.On("ScaleUp", "ng3", 2).Return(nil).Once() // 2 new nodes are supposed to be scaled up.
|
onScaleUpMock.On("ScaleUp", "ng3", 2).Return(nil).Once() // 2 new nodes are supposed to be scaled up.
|
||||||
|
@ -689,7 +689,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
||||||
// Mark unneeded nodes.
|
// Mark unneeded nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -701,7 +701,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
||||||
// Scale down nodegroup
|
// Scale down nodegroup
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3)
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)
|
||||||
onScaleDownMock.On("ScaleDown", tc.expectedScaleDownNG, tc.expectedScaleDownNode).Return(nil).Once()
|
onScaleDownMock.On("ScaleDown", tc.expectedScaleDownNG, tc.expectedScaleDownNode).Return(nil).Once()
|
||||||
|
@ -828,7 +828,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
||||||
// Scale up.
|
// Scale up.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
allNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
onNodeGroupCreateMock.On("Create", "autoprovisioned-TN2").Return(nil).Once()
|
onNodeGroupCreateMock.On("Create", "autoprovisioned-TN2").Return(nil).Once()
|
||||||
|
@ -845,7 +845,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
||||||
// Remove autoprovisioned node group and mark unneeded nodes.
|
// Remove autoprovisioned node group and mark unneeded nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
onNodeGroupDeleteMock.On("Delete", "autoprovisioned-TN1").Return(nil).Once()
|
onNodeGroupDeleteMock.On("Delete", "autoprovisioned-TN1").Return(nil).Once()
|
||||||
|
@ -861,7 +861,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
||||||
// Scale down.
|
// Scale down.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3)
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
onNodeGroupDeleteMock.On("Delete", "autoprovisioned-"+
|
onNodeGroupDeleteMock.On("Delete", "autoprovisioned-"+
|
||||||
|
@ -984,7 +984,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
|
||||||
// Scale up.
|
// Scale up.
|
||||||
readyNodeLister.SetNodes(nodes)
|
readyNodeLister.SetNodes(nodes)
|
||||||
allNodeLister.SetNodes(nodes)
|
allNodeLister.SetNodes(nodes)
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once()
|
onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once()
|
||||||
|
@ -1002,7 +1002,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
|
||||||
// Remove broken node
|
// Remove broken node
|
||||||
readyNodeLister.SetNodes(nodes)
|
readyNodeLister.SetNodes(nodes)
|
||||||
allNodeLister.SetNodes(nodes)
|
allNodeLister.SetNodes(nodes)
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Once()
|
||||||
onScaleDownMock.On("ScaleDown", "ng1", "broken").Return(nil).Once()
|
onScaleDownMock.On("ScaleDown", "ng1", "broken").Return(nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
@ -1137,7 +1137,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
|
||||||
// Scale up
|
// Scale up
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5, p6}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5, p6}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
onScaleUpMock.On("ScaleUp", "ng2", 1).Return(nil).Once()
|
onScaleUpMock.On("ScaleUp", "ng2", 1).Return(nil).Once()
|
||||||
|
@ -1150,7 +1150,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
|
||||||
// Mark unneeded nodes.
|
// Mark unneeded nodes.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -1164,7 +1164,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
|
||||||
// Scale down.
|
// Scale down.
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Times(3)
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Twice()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
onScaleDownMock.On("ScaleDown", "ng1", "n1").Return(nil).Once()
|
onScaleDownMock.On("ScaleDown", "ng1", "n1").Return(nil).Once()
|
||||||
|
@ -1266,7 +1266,7 @@ func TestStaticAutoscalerRunOnceWithFilteringOnBinPackingEstimator(t *testing.T)
|
||||||
// Scale up
|
// Scale up
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
readyNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
allNodeLister.SetNodes([]*apiv1.Node{n1, n2})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p3, p4}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p3, p4}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -1365,7 +1365,7 @@ func TestStaticAutoscalerRunOnceWithFilteringOnUpcomingNodesEnabledNoScaleUp(t *
|
||||||
// Scale up
|
// Scale up
|
||||||
readyNodeLister.SetNodes([]*apiv1.Node{n2, n3})
|
readyNodeLister.SetNodes([]*apiv1.Node{n2, n3})
|
||||||
allNodeLister.SetNodes([]*apiv1.Node{n2, n3})
|
allNodeLister.SetNodes([]*apiv1.Node{n2, n3})
|
||||||
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Twice()
|
allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Once()
|
||||||
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
|
|
||||||
|
@ -1566,7 +1566,7 @@ func TestStaticAutoscalerRunOnceWithBypassedSchedulers(t *testing.T) {
|
||||||
|
|
||||||
tc.setupConfig.mocks.readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
tc.setupConfig.mocks.readyNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
tc.setupConfig.mocks.allNodeLister.SetNodes([]*apiv1.Node{n1})
|
tc.setupConfig.mocks.allNodeLister.SetNodes([]*apiv1.Node{n1})
|
||||||
tc.setupConfig.mocks.allPodLister.On("List").Return(tc.pods, nil).Twice()
|
tc.setupConfig.mocks.allPodLister.On("List").Return(tc.pods, nil).Once()
|
||||||
tc.setupConfig.mocks.daemonSetLister.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
tc.setupConfig.mocks.daemonSetLister.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once()
|
||||||
tc.setupConfig.mocks.podDisruptionBudgetLister.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
tc.setupConfig.mocks.podDisruptionBudgetLister.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once()
|
||||||
if tc.expectedScaleUp != nil {
|
if tc.expectedScaleUp != nil {
|
||||||
|
|
|
@ -17,52 +17,17 @@ limitations under the License.
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
|
||||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) {
|
|
||||||
id := nodeGroup.Id()
|
|
||||||
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
labels.UpdateDeprecatedLabels(baseNodeInfo.Node().ObjectMeta.Labels)
|
|
||||||
|
|
||||||
sanitizedNode, typedErr := SanitizeNode(baseNodeInfo.Node(), id, taintConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, typedErr
|
|
||||||
}
|
|
||||||
baseNodeInfo.SetNode(sanitizedNode)
|
|
||||||
|
|
||||||
pods, err := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
|
||||||
}
|
|
||||||
for _, podInfo := range baseNodeInfo.Pods() {
|
|
||||||
pods = append(pods, &framework.PodInfo{Pod: podInfo.Pod})
|
|
||||||
}
|
|
||||||
|
|
||||||
sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, SanitizePods(pods, sanitizedNode)...)
|
|
||||||
return sanitizedNodeInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isVirtualNode determines if the node is created by virtual kubelet
|
// isVirtualNode determines if the node is created by virtual kubelet
|
||||||
func isVirtualNode(node *apiv1.Node) bool {
|
func isVirtualNode(node *apiv1.Node) bool {
|
||||||
return node.ObjectMeta.Labels["type"] == "virtual-kubelet"
|
return node.ObjectMeta.Labels["type"] == "virtual-kubelet"
|
||||||
|
@ -89,48 +54,6 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyNodeInfo clones the provided nodeInfo
|
|
||||||
func DeepCopyNodeInfo(nodeInfo *framework.NodeInfo) *framework.NodeInfo {
|
|
||||||
newPods := make([]*framework.PodInfo, 0)
|
|
||||||
for _, podInfo := range nodeInfo.Pods() {
|
|
||||||
newPods = append(newPods, &framework.PodInfo{Pod: podInfo.Pod.DeepCopy()})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a new node info.
|
|
||||||
newNodeInfo := framework.NewNodeInfo(nodeInfo.Node().DeepCopy(), nil, newPods...)
|
|
||||||
return newNodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SanitizeNode cleans up nodes used for node group templates
|
|
||||||
func SanitizeNode(node *apiv1.Node, nodeGroup string, taintConfig taints.TaintConfig) (*apiv1.Node, errors.AutoscalerError) {
|
|
||||||
newNode := node.DeepCopy()
|
|
||||||
nodeName := fmt.Sprintf("template-node-for-%s-%d", nodeGroup, rand.Int63())
|
|
||||||
newNode.Labels = make(map[string]string, len(node.Labels))
|
|
||||||
for k, v := range node.Labels {
|
|
||||||
if k != apiv1.LabelHostname {
|
|
||||||
newNode.Labels[k] = v
|
|
||||||
} else {
|
|
||||||
newNode.Labels[k] = nodeName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newNode.Name = nodeName
|
|
||||||
newNode.Spec.Taints = taints.SanitizeTaints(newNode.Spec.Taints, taintConfig)
|
|
||||||
return newNode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SanitizePods cleans up pods used for node group templates
|
|
||||||
func SanitizePods(pods []*framework.PodInfo, sanitizedNode *apiv1.Node) []*framework.PodInfo {
|
|
||||||
// Update node name in pods.
|
|
||||||
sanitizedPods := make([]*framework.PodInfo, 0)
|
|
||||||
for _, pod := range pods {
|
|
||||||
sanitizedPod := pod.Pod.DeepCopy()
|
|
||||||
sanitizedPod.Spec.NodeName = sanitizedNode.Name
|
|
||||||
sanitizedPods = append(sanitizedPods, &framework.PodInfo{Pod: sanitizedPod})
|
|
||||||
}
|
|
||||||
|
|
||||||
return sanitizedPods
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasHardInterPodAffinity(affinity *apiv1.Affinity) bool {
|
func hasHardInterPodAffinity(affinity *apiv1.Affinity) bool {
|
||||||
if affinity == nil {
|
if affinity == nil {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -20,8 +20,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -29,33 +27,6 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSanitizePods(t *testing.T) {
|
|
||||||
pod := BuildTestPod("p1", 80, 0)
|
|
||||||
pod.Spec.NodeName = "n1"
|
|
||||||
pods := []*framework.PodInfo{{Pod: pod}}
|
|
||||||
|
|
||||||
node := BuildTestNode("node", 1000, 1000)
|
|
||||||
|
|
||||||
resNode, err := SanitizeNode(node, "test-group", taints.TaintConfig{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
res := SanitizePods(pods, resNode)
|
|
||||||
assert.Equal(t, 1, len(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSanitizeLabels(t *testing.T) {
|
|
||||||
oldNode := BuildTestNode("ng1-1", 1000, 1000)
|
|
||||||
oldNode.Labels = map[string]string{
|
|
||||||
apiv1.LabelHostname: "abc",
|
|
||||||
"x": "y",
|
|
||||||
}
|
|
||||||
node, err := SanitizeNode(oldNode, "bzium", taints.TaintConfig{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEqual(t, node.Labels[apiv1.LabelHostname], "abc", nil)
|
|
||||||
assert.Equal(t, node.Labels["x"], "y")
|
|
||||||
assert.NotEqual(t, node.Name, oldNode.Name)
|
|
||||||
assert.Equal(t, node.Labels[apiv1.LabelHostname], node.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetNodeResource(t *testing.T) {
|
func TestGetNodeResource(t *testing.T) {
|
||||||
node := BuildTestNode("n1", 1000, 2*MiB)
|
node := BuildTestNode("n1", 1000, 2*MiB)
|
||||||
|
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
|
core_utils "k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ func (e *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
||||||
estimationState *estimationState,
|
estimationState *estimationState,
|
||||||
template *framework.NodeInfo,
|
template *framework.NodeInfo,
|
||||||
) error {
|
) error {
|
||||||
newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex))
|
newNodeInfo := core_utils.NodeInfoSanitizedDeepCopy(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex))
|
||||||
if err := e.clusterSnapshot.AddNodeInfo(newNodeInfo); err != nil {
|
if err := e.clusterSnapshot.AddNodeInfo(newNodeInfo); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package nodeinfosprovider
|
package nodeinfosprovider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -24,14 +25,13 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
caerror "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
|
|
||||||
klog "k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const stabilizationDelay = 1 * time.Minute
|
const stabilizationDelay = 1 * time.Minute
|
||||||
|
@ -72,44 +72,32 @@ func (p *MixedTemplateNodeInfoProvider) CleanUp() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process returns the nodeInfos set for this cluster
|
// Process returns the nodeInfos set for this cluster
|
||||||
func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) {
|
func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*framework.NodeInfo, caerror.AutoscalerError) {
|
||||||
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
|
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
|
||||||
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
|
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
|
||||||
result := make(map[string]*framework.NodeInfo)
|
result := make(map[string]*framework.NodeInfo)
|
||||||
seenGroups := make(map[string]bool)
|
seenGroups := make(map[string]bool)
|
||||||
|
|
||||||
podsForNodes, err := getPodsForNodes(ctx.ListerRegistry)
|
|
||||||
if err != nil {
|
|
||||||
return map[string]*framework.NodeInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// processNode returns information whether the nodeTemplate was generated and if there was an error.
|
// processNode returns information whether the nodeTemplate was generated and if there was an error.
|
||||||
processNode := func(node *apiv1.Node) (bool, string, errors.AutoscalerError) {
|
processNode := func(node *apiv1.Node) (bool, string, caerror.AutoscalerError) {
|
||||||
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
|
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", errors.ToAutoscalerError(errors.CloudProviderError, err)
|
return false, "", caerror.ToAutoscalerError(caerror.CloudProviderError, err)
|
||||||
}
|
}
|
||||||
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
|
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
|
||||||
return false, "", nil
|
return false, "", nil
|
||||||
}
|
}
|
||||||
id := nodeGroup.Id()
|
id := nodeGroup.Id()
|
||||||
if _, found := result[id]; !found {
|
if _, found := result[id]; !found {
|
||||||
// Build nodeInfo.
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(node.Name)
|
||||||
sanitizedNode, err := utils.SanitizeNode(node, id, taintConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", err
|
return false, "", caerror.NewAutoscalerError(caerror.InternalError, "error while retrieving node %s from cluster snapshot - this shouldn't happen: %v", node.Name, err)
|
||||||
}
|
}
|
||||||
nodeInfo, err := simulator.BuildNodeInfoForNode(sanitizedNode, podsForNodes[node.Name], daemonsets, p.forceDaemonSets)
|
templateNodeInfo, caErr := simulator.SanitizedTemplateNodeInfoFromNodeInfo(nodeInfo, id, daemonsets, p.forceDaemonSets, taintConfig)
|
||||||
if err != nil {
|
if caErr != nil {
|
||||||
return false, "", err
|
return false, "", caErr
|
||||||
}
|
}
|
||||||
|
result[id] = templateNodeInfo
|
||||||
var pods []*apiv1.Pod
|
|
||||||
for _, podInfo := range nodeInfo.Pods() {
|
|
||||||
pods = append(pods, podInfo.Pod)
|
|
||||||
}
|
|
||||||
sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, utils.SanitizePods(nodeInfo.Pods(), sanitizedNode)...)
|
|
||||||
result[id] = sanitizedNodeInfo
|
|
||||||
return true, id, nil
|
return true, id, nil
|
||||||
}
|
}
|
||||||
return false, "", nil
|
return false, "", nil
|
||||||
|
@ -125,7 +113,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
|
||||||
return map[string]*framework.NodeInfo{}, typedErr
|
return map[string]*framework.NodeInfo{}, typedErr
|
||||||
}
|
}
|
||||||
if added && p.nodeInfoCache != nil {
|
if added && p.nodeInfoCache != nil {
|
||||||
nodeInfoCopy := utils.DeepCopyNodeInfo(result[id])
|
nodeInfoCopy := result[id].DeepCopy()
|
||||||
p.nodeInfoCache[id] = cacheItem{NodeInfo: nodeInfoCopy, added: time.Now()}
|
p.nodeInfoCache[id] = cacheItem{NodeInfo: nodeInfoCopy, added: time.Now()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -142,7 +130,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
|
||||||
if p.isCacheItemExpired(cacheItem.added) {
|
if p.isCacheItemExpired(cacheItem.added) {
|
||||||
delete(p.nodeInfoCache, id)
|
delete(p.nodeInfoCache, id)
|
||||||
} else {
|
} else {
|
||||||
result[id] = utils.DeepCopyNodeInfo(cacheItem.NodeInfo)
|
result[id] = cacheItem.NodeInfo.DeepCopy()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -150,13 +138,13 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
|
||||||
|
|
||||||
// No good template, trying to generate one. This is called only if there are no
|
// No good template, trying to generate one. This is called only if there are no
|
||||||
// working nodes in the node groups. By default CA tries to use a real-world example.
|
// working nodes in the node groups. By default CA tries to use a real-world example.
|
||||||
nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, taintConfig)
|
nodeInfo, err := simulator.SanitizedTemplateNodeInfoFromNodeGroup(nodeGroup, daemonsets, taintConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == cloudprovider.ErrNotImplemented {
|
if errors.Is(err, cloudprovider.ErrNotImplemented) {
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
klog.Errorf("Unable to build proper template node for %s: %v", id, err)
|
klog.Errorf("Unable to build proper template node for %s: %v", id, err)
|
||||||
return map[string]*framework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
return map[string]*framework.NodeInfo{}, caerror.ToAutoscalerError(caerror.CloudProviderError, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result[id] = nodeInfo
|
result[id] = nodeInfo
|
||||||
|
@ -181,8 +169,8 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
|
||||||
}
|
}
|
||||||
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
|
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return map[string]*framework.NodeInfo{}, errors.ToAutoscalerError(
|
return map[string]*framework.NodeInfo{}, caerror.ToAutoscalerError(
|
||||||
errors.CloudProviderError, err)
|
caerror.CloudProviderError, err)
|
||||||
}
|
}
|
||||||
if added {
|
if added {
|
||||||
klog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name)
|
klog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name)
|
||||||
|
@ -192,19 +180,6 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodsForNodes(listers kube_util.ListerRegistry) (map[string][]*apiv1.Pod, errors.AutoscalerError) {
|
|
||||||
pods, err := listers.AllPodLister().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.ToAutoscalerError(errors.ApiCallError, err)
|
|
||||||
}
|
|
||||||
scheduledPods := kube_util.ScheduledPods(pods)
|
|
||||||
podsForNodes := map[string][]*apiv1.Pod{}
|
|
||||||
for _, p := range scheduledPods {
|
|
||||||
podsForNodes[p.Spec.NodeName] = append(podsForNodes[p.Spec.NodeName], p)
|
|
||||||
}
|
|
||||||
return podsForNodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNodeGoodTemplateCandidate(node *apiv1.Node, now time.Time) bool {
|
func isNodeGoodTemplateCandidate(node *apiv1.Node, now time.Time) bool {
|
||||||
ready, lastTransitionTime, _ := kube_util.GetReadinessState(node)
|
ready, lastTransitionTime, _ := kube_util.GetReadinessState(node)
|
||||||
stable := lastTransitionTime.Add(stabilizationDelay).Before(now)
|
stable := lastTransitionTime.Add(stabilizationDelay).Before(now)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
|
@ -30,6 +31,7 @@ import (
|
||||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
@ -81,14 +83,20 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
||||||
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}
|
||||||
|
snapshot := clustersnapshot.NewBasicClusterSnapshot()
|
||||||
|
err = snapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.AutoscalingContext{
|
ctx := context.AutoscalingContext{
|
||||||
CloudProvider: provider1,
|
CloudProvider: provider1,
|
||||||
|
ClusterSnapshot: snapshot,
|
||||||
PredicateChecker: predicateChecker,
|
PredicateChecker: predicateChecker,
|
||||||
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
||||||
ListerRegistry: registry,
|
ListerRegistry: registry,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 5, len(res))
|
assert.Equal(t, 5, len(res))
|
||||||
info, found := res["ng1"]
|
info, found := res["ng1"]
|
||||||
|
@ -110,6 +118,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
||||||
// Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder
|
// Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder
|
||||||
ctx = context.AutoscalingContext{
|
ctx = context.AutoscalingContext{
|
||||||
CloudProvider: provider2,
|
CloudProvider: provider2,
|
||||||
|
ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(),
|
||||||
PredicateChecker: predicateChecker,
|
PredicateChecker: predicateChecker,
|
||||||
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
||||||
ListerRegistry: registry,
|
ListerRegistry: registry,
|
||||||
|
@ -165,16 +174,22 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
||||||
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
nodes := []*apiv1.Node{unready4, unready3, ready2, ready1}
|
||||||
|
snapshot := clustersnapshot.NewBasicClusterSnapshot()
|
||||||
|
err = snapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Fill cache
|
// Fill cache
|
||||||
ctx := context.AutoscalingContext{
|
ctx := context.AutoscalingContext{
|
||||||
CloudProvider: provider1,
|
CloudProvider: provider1,
|
||||||
|
ClusterSnapshot: snapshot,
|
||||||
PredicateChecker: predicateChecker,
|
PredicateChecker: predicateChecker,
|
||||||
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
||||||
ListerRegistry: registry,
|
ListerRegistry: registry,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl, false)
|
niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl, false)
|
||||||
res, err := niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
res, err := niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// Check results
|
// Check results
|
||||||
assert.Equal(t, 4, len(res))
|
assert.Equal(t, 4, len(res))
|
||||||
|
@ -208,7 +223,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
||||||
assert.Equal(t, "ng3", lastDeletedGroup)
|
assert.Equal(t, "ng3", lastDeletedGroup)
|
||||||
|
|
||||||
// Check cache with all nodes removed
|
// Check cache with all nodes removed
|
||||||
res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// Check results
|
// Check results
|
||||||
assert.Equal(t, 2, len(res))
|
assert.Equal(t, 2, len(res))
|
||||||
|
@ -229,7 +244,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
||||||
// Fill cache manually
|
// Fill cache manually
|
||||||
infoNg4Node6 := framework.NewTestNodeInfo(ready6.DeepCopy())
|
infoNg4Node6 := framework.NewTestNodeInfo(ready6.DeepCopy())
|
||||||
niProcessor.nodeInfoCache = map[string]cacheItem{"ng4": {NodeInfo: infoNg4Node6, added: now}}
|
niProcessor.nodeInfoCache = map[string]cacheItem{"ng4": {NodeInfo: infoNg4Node6, added: now}}
|
||||||
res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
// Check if cache was used
|
// Check if cache was used
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 2, len(res))
|
assert.Equal(t, 2, len(res))
|
||||||
|
@ -253,8 +268,14 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
|
||||||
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
nodes := []*apiv1.Node{ready1}
|
||||||
|
snapshot := clustersnapshot.NewBasicClusterSnapshot()
|
||||||
|
err = snapshot.SetClusterState(nodes, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.AutoscalingContext{
|
ctx := context.AutoscalingContext{
|
||||||
CloudProvider: provider,
|
CloudProvider: provider,
|
||||||
|
ClusterSnapshot: snapshot,
|
||||||
PredicateChecker: predicateChecker,
|
PredicateChecker: predicateChecker,
|
||||||
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
||||||
ListerRegistry: registry,
|
ListerRegistry: registry,
|
||||||
|
@ -272,7 +293,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
|
||||||
provider.AddNode("ng1", ready1)
|
provider.AddNode("ng1", ready1)
|
||||||
|
|
||||||
assert.Equal(t, 2, len(niProcessor1.nodeInfoCache))
|
assert.Equal(t, 2, len(niProcessor1.nodeInfoCache))
|
||||||
_, err = niProcessor1.Process(&ctx, []*apiv1.Node{ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
_, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, len(niProcessor1.nodeInfoCache))
|
assert.Equal(t, 1, len(niProcessor1.nodeInfoCache))
|
||||||
|
|
||||||
|
@ -283,7 +304,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
|
||||||
"ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
|
"ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
|
||||||
}
|
}
|
||||||
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
|
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
|
||||||
_, err = niProcessor1.Process(&ctx, []*apiv1.Node{ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
_, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
|
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,24 @@ func (n *NodeInfo) ToScheduler() *schedulerframework.NodeInfo {
|
||||||
return n.schedNodeInfo
|
return n.schedNodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopy clones the NodeInfo.
|
||||||
|
func (n *NodeInfo) DeepCopy() *NodeInfo {
|
||||||
|
var newPods []*PodInfo
|
||||||
|
for _, podInfo := range n.Pods() {
|
||||||
|
var newClaims []*resourceapi.ResourceClaim
|
||||||
|
for _, claim := range podInfo.NeededResourceClaims {
|
||||||
|
newClaims = append(newClaims, claim.DeepCopy())
|
||||||
|
}
|
||||||
|
newPods = append(newPods, &PodInfo{Pod: podInfo.Pod.DeepCopy(), NeededResourceClaims: newClaims})
|
||||||
|
}
|
||||||
|
var newSlices []*resourceapi.ResourceSlice
|
||||||
|
for _, slice := range n.LocalResourceSlices {
|
||||||
|
newSlices = append(newSlices, slice.DeepCopy())
|
||||||
|
}
|
||||||
|
// Node() can be nil, but DeepCopy() handles nil receivers gracefully.
|
||||||
|
return NewNodeInfo(n.Node().DeepCopy(), newSlices, newPods...)
|
||||||
|
}
|
||||||
|
|
||||||
// NewNodeInfo returns a new internal NodeInfo from the provided data.
|
// NewNodeInfo returns a new internal NodeInfo from the provided data.
|
||||||
func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo {
|
func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo {
|
||||||
result := &NodeInfo{
|
result := &NodeInfo{
|
||||||
|
|
|
@ -208,6 +208,91 @@ func TestNodeInfo(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeepCopyNodeInfo(t *testing.T) {
|
||||||
|
node := test.BuildTestNode("node", 1000, 1000)
|
||||||
|
pods := []*PodInfo{
|
||||||
|
{Pod: test.BuildTestPod("p1", 80, 0, test.WithNodeName(node.Name))},
|
||||||
|
{
|
||||||
|
Pod: test.BuildTestPod("p2", 80, 0, test.WithNodeName(node.Name)),
|
||||||
|
NeededResourceClaims: []*resourceapi.ResourceClaim{
|
||||||
|
{ObjectMeta: v1.ObjectMeta{Name: "claim1"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req1"}}}}},
|
||||||
|
{ObjectMeta: v1.ObjectMeta{Name: "claim2"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req2"}}}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
slices := []*resourceapi.ResourceSlice{
|
||||||
|
{ObjectMeta: v1.ObjectMeta{Name: "slice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}},
|
||||||
|
{ObjectMeta: v1.ObjectMeta{Name: "slice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
testName string
|
||||||
|
nodeInfo *NodeInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testName: "empty NodeInfo",
|
||||||
|
nodeInfo: NewNodeInfo(nil, nil),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "NodeInfo with only Node set",
|
||||||
|
nodeInfo: NewNodeInfo(node, nil),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "NodeInfo with only Pods set",
|
||||||
|
nodeInfo: NewNodeInfo(nil, nil, pods...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "NodeInfo with both Node and Pods set",
|
||||||
|
nodeInfo: NewNodeInfo(node, nil, pods...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "NodeInfo with Node, ResourceSlices, and Pods set",
|
||||||
|
nodeInfo: NewNodeInfo(node, slices, pods...),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
|
// Verify that the contents are identical after copying.
|
||||||
|
nodeInfoCopy := tc.nodeInfo.DeepCopy()
|
||||||
|
if diff := cmp.Diff(tc.nodeInfo, nodeInfoCopy,
|
||||||
|
cmp.AllowUnexported(schedulerframework.NodeInfo{}, NodeInfo{}, PodInfo{}, podExtraInfo{}),
|
||||||
|
// We don't care about this field staying the same, and it differs because it's a global counter bumped
|
||||||
|
// on every AddPod.
|
||||||
|
cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
|
||||||
|
); diff != "" {
|
||||||
|
t.Errorf("nodeInfo differs after DeepCopyNodeInfo, diff (-want +got): %s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the object addresses changed in the copy.
|
||||||
|
if tc.nodeInfo == nodeInfoCopy {
|
||||||
|
t.Error("nodeInfo address identical after DeepCopyNodeInfo")
|
||||||
|
}
|
||||||
|
if tc.nodeInfo.ToScheduler() == nodeInfoCopy.ToScheduler() {
|
||||||
|
t.Error("schedulerframework.NodeInfo address identical after DeepCopyNodeInfo")
|
||||||
|
}
|
||||||
|
for i := range len(tc.nodeInfo.LocalResourceSlices) {
|
||||||
|
if tc.nodeInfo.LocalResourceSlices[i] == nodeInfoCopy.LocalResourceSlices[i] {
|
||||||
|
t.Errorf("%d-th LocalResourceSlice address identical after DeepCopyNodeInfo", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for podIndex := range len(tc.nodeInfo.Pods()) {
|
||||||
|
oldPodInfo := tc.nodeInfo.Pods()[podIndex]
|
||||||
|
newPodInfo := nodeInfoCopy.Pods()[podIndex]
|
||||||
|
if oldPodInfo == newPodInfo {
|
||||||
|
t.Errorf("%d-th PodInfo address identical after DeepCopyNodeInfo", podIndex)
|
||||||
|
}
|
||||||
|
if oldPodInfo.Pod == newPodInfo.Pod {
|
||||||
|
t.Errorf("%d-th PodInfo.Pod address identical after DeepCopyNodeInfo", podIndex)
|
||||||
|
}
|
||||||
|
for claimIndex := range len(newPodInfo.NeededResourceClaims) {
|
||||||
|
if oldPodInfo.NeededResourceClaims[podIndex] == newPodInfo.NeededResourceClaims[podIndex] {
|
||||||
|
t.Errorf("%d-th PodInfo - %d-th NeededResourceClaim address identical after DeepCopyNodeInfo", podIndex, claimIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testPodInfos(pods []*apiv1.Pod, addClaims bool) []*PodInfo {
|
func testPodInfos(pods []*apiv1.Pod, addClaims bool) []*PodInfo {
|
||||||
var result []*PodInfo
|
var result []*PodInfo
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package simulator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
|
||||||
|
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nodeGroupTemplateNodeInfoGetter interface {
|
||||||
|
Id() string
|
||||||
|
TemplateNodeInfo() (*framework.NodeInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SanitizedTemplateNodeInfoFromNodeGroup returns a template NodeInfo object based on NodeGroup.TemplateNodeInfo(). The template is sanitized, and only
|
||||||
|
// contains the pods that should appear on a new Node from the same node group (e.g. DaemonSet pods).
|
||||||
|
func SanitizedTemplateNodeInfoFromNodeGroup(nodeGroup nodeGroupTemplateNodeInfoGetter, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) {
|
||||||
|
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.ToAutoscalerError(errors.CloudProviderError, err).AddPrefix("failed to obtain template NodeInfo from node group %q: ", nodeGroup.Id())
|
||||||
|
}
|
||||||
|
labels.UpdateDeprecatedLabels(baseNodeInfo.Node().ObjectMeta.Labels)
|
||||||
|
|
||||||
|
return SanitizedTemplateNodeInfoFromNodeInfo(baseNodeInfo, nodeGroup.Id(), daemonsets, true, taintConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SanitizedTemplateNodeInfoFromNodeInfo returns a template NodeInfo object based on a real example NodeInfo from the cluster. The template is sanitized, and only
|
||||||
|
// contains the pods that should appear on a new Node from the same node group (e.g. DaemonSet pods).
|
||||||
|
func SanitizedTemplateNodeInfoFromNodeInfo(example *framework.NodeInfo, nodeGroupId string, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) {
|
||||||
|
randSuffix := fmt.Sprintf("%d", rand.Int63())
|
||||||
|
newNodeNameBase := fmt.Sprintf("template-node-for-%s", nodeGroupId)
|
||||||
|
|
||||||
|
// We need to sanitize the example before determining the DS pods, since taints are checked there, and
|
||||||
|
// we might need to filter some out during sanitization.
|
||||||
|
sanitizedExample := sanitizeNodeInfo(example, newNodeNameBase, randSuffix, &taintConfig)
|
||||||
|
expectedPods, err := podsExpectedOnFreshNode(sanitizedExample, daemonsets, forceDaemonSets, randSuffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// No need to sanitize the expected pods again - they either come from sanitizedExample and were sanitized above,
|
||||||
|
// or were added by podsExpectedOnFreshNode and sanitized there.
|
||||||
|
return framework.NewNodeInfo(sanitizedExample.Node(), nil, expectedPods...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfoSanitizedDeepCopy duplicates the provided template NodeInfo, returning a fresh NodeInfo that can be injected into the cluster snapshot.
|
||||||
|
// The NodeInfo is sanitized (names, UIDs are changed, etc.), so that it can be injected along other copies created from the same template.
|
||||||
|
func NodeInfoSanitizedDeepCopy(template *framework.NodeInfo, suffix string) *framework.NodeInfo {
|
||||||
|
// Template node infos should already have taints and pods filtered, so not setting these parameters.
|
||||||
|
return sanitizeNodeInfo(template, template.Node().Name, suffix, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeNodeInfo(nodeInfo *framework.NodeInfo, newNodeNameBase string, namesSuffix string, taintConfig *taints.TaintConfig) *framework.NodeInfo {
|
||||||
|
freshNodeName := fmt.Sprintf("%s-%s", newNodeNameBase, namesSuffix)
|
||||||
|
freshNode := sanitizeNode(nodeInfo.Node(), freshNodeName, taintConfig)
|
||||||
|
result := framework.NewNodeInfo(freshNode, nil)
|
||||||
|
|
||||||
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
|
freshPod := sanitizePod(podInfo.Pod, freshNode.Name, namesSuffix)
|
||||||
|
result.AddPod(&framework.PodInfo{Pod: freshPod})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeNode(node *apiv1.Node, newName string, taintConfig *taints.TaintConfig) *apiv1.Node {
|
||||||
|
newNode := node.DeepCopy()
|
||||||
|
newNode.UID = uuid.NewUUID()
|
||||||
|
|
||||||
|
newNode.Name = newName
|
||||||
|
if newNode.Labels == nil {
|
||||||
|
newNode.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
newNode.Labels[apiv1.LabelHostname] = newName
|
||||||
|
|
||||||
|
if taintConfig != nil {
|
||||||
|
newNode.Spec.Taints = taints.SanitizeTaints(newNode.Spec.Taints, *taintConfig)
|
||||||
|
}
|
||||||
|
return newNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizePod(pod *apiv1.Pod, nodeName, nameSuffix string) *apiv1.Pod {
|
||||||
|
sanitizedPod := pod.DeepCopy()
|
||||||
|
sanitizedPod.UID = uuid.NewUUID()
|
||||||
|
sanitizedPod.Name = fmt.Sprintf("%s-%s", pod.Name, nameSuffix)
|
||||||
|
sanitizedPod.Spec.NodeName = nodeName
|
||||||
|
return sanitizedPod
|
||||||
|
}
|
||||||
|
|
||||||
|
func podsExpectedOnFreshNode(sanitizedExampleNodeInfo *framework.NodeInfo, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool, nameSuffix string) ([]*framework.PodInfo, errors.AutoscalerError) {
|
||||||
|
var result []*framework.PodInfo
|
||||||
|
runningDS := make(map[types.UID]bool)
|
||||||
|
for _, pod := range sanitizedExampleNodeInfo.Pods() {
|
||||||
|
// Ignore scheduled pods in deletion phase
|
||||||
|
if pod.DeletionTimestamp != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Add scheduled mirror and DS pods
|
||||||
|
if pod_util.IsMirrorPod(pod.Pod) || pod_util.IsDaemonSetPod(pod.Pod) {
|
||||||
|
result = append(result, pod)
|
||||||
|
}
|
||||||
|
// Mark DS pods as running
|
||||||
|
controllerRef := metav1.GetControllerOf(pod)
|
||||||
|
if controllerRef != nil && controllerRef.Kind == "DaemonSet" {
|
||||||
|
runningDS[controllerRef.UID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add all pending DS pods if force scheduling DS
|
||||||
|
if forceDaemonSets {
|
||||||
|
var pendingDS []*appsv1.DaemonSet
|
||||||
|
for _, ds := range daemonsets {
|
||||||
|
if !runningDS[ds.UID] {
|
||||||
|
pendingDS = append(pendingDS, ds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The provided nodeInfo has to have taints properly sanitized, or this won't work correctly.
|
||||||
|
daemonPods, err := daemonset.GetDaemonSetPodsForNode(sanitizedExampleNodeInfo, pendingDS)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||||
|
}
|
||||||
|
for _, pod := range daemonPods {
|
||||||
|
// There's technically no need to sanitize these pods since they're created from scratch, but
|
||||||
|
// it's nice to have the same suffix for all names in one sanitized NodeInfo when debugging.
|
||||||
|
result = append(result, &framework.PodInfo{Pod: sanitizePod(pod.Pod, sanitizedExampleNodeInfo.Node().Name, nameSuffix)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
|
@ -0,0 +1,511 @@
|
||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package simulator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ds1 = &appsv1.DaemonSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "ds1",
|
||||||
|
Namespace: "ds1-namespace",
|
||||||
|
UID: types.UID("ds1"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ds2 = &appsv1.DaemonSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "ds2",
|
||||||
|
Namespace: "ds2-namespace",
|
||||||
|
UID: types.UID("ds2"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ds3 = &appsv1.DaemonSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "ds3",
|
||||||
|
Namespace: "ds3-namespace",
|
||||||
|
UID: types.UID("ds3"),
|
||||||
|
},
|
||||||
|
Spec: appsv1.DaemonSetSpec{
|
||||||
|
Template: apiv1.PodTemplateSpec{
|
||||||
|
Spec: apiv1.PodSpec{
|
||||||
|
NodeSelector: map[string]string{"key": "value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testDaemonSets = []*appsv1.DaemonSet{ds1, ds2, ds3}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSanitizedTemplateNodeInfoFromNodeGroup(t *testing.T) {
|
||||||
|
exampleNode := BuildTestNode("n", 1000, 10)
|
||||||
|
exampleNode.Spec.Taints = []apiv1.Taint{
|
||||||
|
{Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
testName string
|
||||||
|
nodeGroup *fakeNodeGroup
|
||||||
|
|
||||||
|
wantPods []*apiv1.Pod
|
||||||
|
wantCpError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testName: "node group error results in an error",
|
||||||
|
nodeGroup: &fakeNodeGroup{templateNodeInfoErr: fmt.Errorf("test error")},
|
||||||
|
wantCpError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "simple template with no pods",
|
||||||
|
nodeGroup: &fakeNodeGroup{
|
||||||
|
templateNodeInfoResult: framework.NewNodeInfo(exampleNode, nil),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
buildDSPod(ds2, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "template with all kinds of pods",
|
||||||
|
nodeGroup: &fakeNodeGroup{
|
||||||
|
templateNodeInfoResult: framework.NewNodeInfo(exampleNode, nil,
|
||||||
|
&framework.PodInfo{Pod: BuildScheduledTestPod("p1", 100, 1, "n")},
|
||||||
|
&framework.PodInfo{Pod: BuildScheduledTestPod("p2", 100, 1, "n")},
|
||||||
|
&framework.PodInfo{Pod: SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n"))},
|
||||||
|
&framework.PodInfo{Pod: setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n")))},
|
||||||
|
&framework.PodInfo{Pod: buildDSPod(ds1, "n")},
|
||||||
|
&framework.PodInfo{Pod: setDeletionTimestamp(buildDSPod(ds2, "n"))},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")),
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
buildDSPod(ds2, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
|
templateNodeInfo, err := SanitizedTemplateNodeInfoFromNodeGroup(tc.nodeGroup, testDaemonSets, taints.TaintConfig{})
|
||||||
|
if tc.wantCpError {
|
||||||
|
if err == nil || err.Type() != errors.CloudProviderError {
|
||||||
|
t.Fatalf("TemplateNodeInfoFromNodeGroupTemplate(): want CloudProviderError, but got: %v (%T)", err, err)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TemplateNodeInfoFromNodeGroupTemplate(): expected no error, but got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the taints are correctly sanitized.
|
||||||
|
// Verify that the NodeInfo is sanitized using the node group id as base.
|
||||||
|
// Pass empty string as nameSuffix so that it's auto-determined from the sanitized templateNodeInfo, because
|
||||||
|
// TemplateNodeInfoFromNodeGroupTemplate randomizes the suffix.
|
||||||
|
// Pass non-empty expectedPods to verify that the set of pods is changed as expected (e.g. DS pods added, non-DS/deleted pods removed).
|
||||||
|
if err := verifyNodeInfoSanitization(tc.nodeGroup.templateNodeInfoResult, templateNodeInfo, tc.wantPods, "template-node-for-"+tc.nodeGroup.id, "", nil); err != nil {
|
||||||
|
t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): NodeInfo wasn't properly sanitized: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSanitizedTemplateNodeInfoFromNodeInfo(t *testing.T) {
|
||||||
|
exampleNode := BuildTestNode("n", 1000, 10)
|
||||||
|
exampleNode.Spec.Taints = []apiv1.Taint{
|
||||||
|
{Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
pods []*apiv1.Pod
|
||||||
|
daemonSets []*appsv1.DaemonSet
|
||||||
|
forceDS bool
|
||||||
|
|
||||||
|
wantPods []*apiv1.Pod
|
||||||
|
wantError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "node without any pods",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with non-DS/mirror pods",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
BuildScheduledTestPod("p1", 100, 1, "n"),
|
||||||
|
BuildScheduledTestPod("p2", 100, 1, "n"),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with a mirror pod",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with a deleted mirror pod",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")),
|
||||||
|
setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p2", 100, 1, "n"))),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with DS pods [forceDS=false, no daemon sets]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with DS pods [forceDS=false, some daemon sets]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
daemonSets: testDaemonSets,
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with a DS pod [forceDS=true, no daemon sets]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
},
|
||||||
|
forceDS: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "node with a DS pod [forceDS=true, some daemon sets]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
daemonSets: testDaemonSets,
|
||||||
|
forceDS: true,
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
buildDSPod(ds2, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "everything together [forceDS=false]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
BuildScheduledTestPod("p1", 100, 1, "n"),
|
||||||
|
BuildScheduledTestPod("p2", 100, 1, "n"),
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")),
|
||||||
|
setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n"))),
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
daemonSets: testDaemonSets,
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")),
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "everything together [forceDS=true]",
|
||||||
|
pods: []*apiv1.Pod{
|
||||||
|
BuildScheduledTestPod("p1", 100, 1, "n"),
|
||||||
|
BuildScheduledTestPod("p2", 100, 1, "n"),
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")),
|
||||||
|
setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n"))),
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
||||||
|
},
|
||||||
|
daemonSets: testDaemonSets,
|
||||||
|
forceDS: true,
|
||||||
|
wantPods: []*apiv1.Pod{
|
||||||
|
SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")),
|
||||||
|
buildDSPod(ds1, "n"),
|
||||||
|
buildDSPod(ds2, "n"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
nodeGroupId := "nodeGroupId"
|
||||||
|
exampleNodeInfo := framework.NewNodeInfo(exampleNode, nil)
|
||||||
|
for _, pod := range tc.pods {
|
||||||
|
exampleNodeInfo.AddPod(&framework.PodInfo{Pod: pod})
|
||||||
|
}
|
||||||
|
|
||||||
|
templateNodeInfo, err := SanitizedTemplateNodeInfoFromNodeInfo(exampleNodeInfo, nodeGroupId, tc.daemonSets, tc.forceDS, taints.TaintConfig{})
|
||||||
|
if tc.wantError {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("TemplateNodeInfoFromExampleNodeInfo(): want error, but got nil")
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): expected no error, but got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the taints are correctly sanitized.
|
||||||
|
// Verify that the NodeInfo is sanitized using the node group id as base.
|
||||||
|
// Pass empty string as nameSuffix so that it's auto-determined from the sanitized templateNodeInfo, because
|
||||||
|
// TemplateNodeInfoFromExampleNodeInfo randomizes the suffix.
|
||||||
|
// Pass non-empty expectedPods to verify that the set of pods is changed as expected (e.g. DS pods added, non-DS/deleted pods removed).
|
||||||
|
if err := verifyNodeInfoSanitization(exampleNodeInfo, templateNodeInfo, tc.wantPods, "template-node-for-"+nodeGroupId, "", nil); err != nil {
|
||||||
|
t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): NodeInfo wasn't properly sanitized: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeInfoSanitizedDeepCopy(t *testing.T) {
|
||||||
|
nodeName := "template-node"
|
||||||
|
templateNode := BuildTestNode(nodeName, 1000, 1000)
|
||||||
|
templateNode.Spec.Taints = []apiv1.Taint{
|
||||||
|
{Key: "startup-taint", Value: "true", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
{Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
}
|
||||||
|
pods := []*framework.PodInfo{
|
||||||
|
{Pod: BuildTestPod("p1", 80, 0, WithNodeName(nodeName))},
|
||||||
|
{Pod: BuildTestPod("p2", 80, 0, WithNodeName(nodeName))},
|
||||||
|
}
|
||||||
|
templateNodeInfo := framework.NewNodeInfo(templateNode, nil, pods...)
|
||||||
|
|
||||||
|
suffix := "abc"
|
||||||
|
freshNodeInfo := NodeInfoSanitizedDeepCopy(templateNodeInfo, suffix)
|
||||||
|
// Verify that the taints are not sanitized (they should be sanitized in the template already).
|
||||||
|
// Verify that the NodeInfo is sanitized using the template Node name as base.
|
||||||
|
initialTaints := templateNodeInfo.Node().Spec.Taints
|
||||||
|
if err := verifyNodeInfoSanitization(templateNodeInfo, freshNodeInfo, nil, templateNodeInfo.Node().Name, suffix, initialTaints); err != nil {
|
||||||
|
t.Fatalf("FreshNodeInfoFromTemplateNodeInfo(): NodeInfo wasn't properly sanitized: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSanitizeNodeInfo(t *testing.T) {
|
||||||
|
oldNodeName := "old-node"
|
||||||
|
basicNode := BuildTestNode(oldNodeName, 1000, 1000)
|
||||||
|
|
||||||
|
labelsNode := basicNode.DeepCopy()
|
||||||
|
labelsNode.Labels = map[string]string{
|
||||||
|
apiv1.LabelHostname: oldNodeName,
|
||||||
|
"a": "b",
|
||||||
|
"x": "y",
|
||||||
|
}
|
||||||
|
|
||||||
|
taintsNode := basicNode.DeepCopy()
|
||||||
|
taintsNode.Spec.Taints = []apiv1.Taint{
|
||||||
|
{Key: "startup-taint", Value: "true", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
{Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule},
|
||||||
|
}
|
||||||
|
taintConfig := taints.NewTaintConfig(config.AutoscalingOptions{StartupTaints: []string{"startup-taint"}})
|
||||||
|
|
||||||
|
taintsLabelsNode := labelsNode.DeepCopy()
|
||||||
|
taintsLabelsNode.Spec.Taints = taintsNode.Spec.Taints
|
||||||
|
|
||||||
|
pods := []*framework.PodInfo{
|
||||||
|
{Pod: BuildTestPod("p1", 80, 0, WithNodeName(oldNodeName))},
|
||||||
|
{Pod: BuildTestPod("p2", 80, 0, WithNodeName(oldNodeName))},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
testName string
|
||||||
|
|
||||||
|
nodeInfo *framework.NodeInfo
|
||||||
|
taintConfig *taints.TaintConfig
|
||||||
|
|
||||||
|
wantTaints []apiv1.Taint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testName: "sanitize node",
|
||||||
|
nodeInfo: framework.NewTestNodeInfo(basicNode),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "sanitize node labels",
|
||||||
|
nodeInfo: framework.NewTestNodeInfo(labelsNode),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "sanitize node taints - disabled",
|
||||||
|
nodeInfo: framework.NewTestNodeInfo(taintsNode),
|
||||||
|
taintConfig: nil,
|
||||||
|
wantTaints: taintsNode.Spec.Taints,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "sanitize node taints - enabled",
|
||||||
|
nodeInfo: framework.NewTestNodeInfo(taintsNode),
|
||||||
|
taintConfig: &taintConfig,
|
||||||
|
wantTaints: []apiv1.Taint{{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "sanitize pods",
|
||||||
|
nodeInfo: framework.NewNodeInfo(basicNode, nil, pods...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "sanitize everything",
|
||||||
|
nodeInfo: framework.NewNodeInfo(taintsLabelsNode, nil, pods...),
|
||||||
|
taintConfig: &taintConfig,
|
||||||
|
wantTaints: []apiv1.Taint{{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
|
newNameBase := "node"
|
||||||
|
suffix := "abc"
|
||||||
|
sanitizedNodeInfo := sanitizeNodeInfo(tc.nodeInfo, newNameBase, suffix, tc.taintConfig)
|
||||||
|
if err := verifyNodeInfoSanitization(tc.nodeInfo, sanitizedNodeInfo, nil, newNameBase, suffix, tc.wantTaints); err != nil {
|
||||||
|
t.Fatalf("sanitizeNodeInfo(): NodeInfo wasn't properly sanitized: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyNodeInfoSanitization verifies whether sanitizedNodeInfo was correctly sanitized starting from initialNodeInfo, with the provided
|
||||||
|
// nameBase and nameSuffix. The expected taints aren't auto-determined, so wantTaints should always be provided.
|
||||||
|
//
|
||||||
|
// If nameSuffix is an empty string, the suffix will be determined from sanitizedNodeInfo. This is useful if
|
||||||
|
// the test doesn't know/control the name suffix (e.g. because it's randomized by the tested function).
|
||||||
|
//
|
||||||
|
// If expectedPods is nil, the set of pods is expected not to change between initialNodeInfo and sanitizedNodeInfo. If the sanitization is
|
||||||
|
// expected to change the set of pods, the expected set should be passed to expectedPods.
|
||||||
|
func verifyNodeInfoSanitization(initialNodeInfo, sanitizedNodeInfo *framework.NodeInfo, expectedPods []*apiv1.Pod, nameBase, nameSuffix string, wantTaints []apiv1.Taint) error {
|
||||||
|
if nameSuffix == "" {
|
||||||
|
// Determine the suffix from the provided sanitized NodeInfo - it should be the last part of a dash-separated name.
|
||||||
|
nameParts := strings.Split(sanitizedNodeInfo.Node().Name, "-")
|
||||||
|
if len(nameParts) < 2 {
|
||||||
|
return fmt.Errorf("sanitized NodeInfo name unexpected: want format <prefix>-<suffix>, got %q", sanitizedNodeInfo.Node().Name)
|
||||||
|
}
|
||||||
|
nameSuffix = nameParts[len(nameParts)-1]
|
||||||
|
}
|
||||||
|
if expectedPods != nil {
|
||||||
|
// If the sanitization is expected to change the set of pods, hack the initial NodeInfo to have the expected pods.
|
||||||
|
// Then we can just compare things pod-by-pod as if the set didn't change.
|
||||||
|
initialNodeInfo = framework.NewNodeInfo(initialNodeInfo.Node(), nil)
|
||||||
|
for _, pod := range expectedPods {
|
||||||
|
initialNodeInfo.AddPod(&framework.PodInfo{Pod: pod})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verification below assumes the same set of pods between initialNodeInfo and sanitizedNodeInfo.
|
||||||
|
wantNodeName := fmt.Sprintf("%s-%s", nameBase, nameSuffix)
|
||||||
|
if gotName := sanitizedNodeInfo.Node().Name; gotName != wantNodeName {
|
||||||
|
return fmt.Errorf("want sanitized Node name %q, got %q", wantNodeName, gotName)
|
||||||
|
}
|
||||||
|
if gotUid, oldUid := sanitizedNodeInfo.Node().UID, initialNodeInfo.Node().UID; gotUid == "" || gotUid == oldUid {
|
||||||
|
return fmt.Errorf("sanitized Node UID wasn't randomized - got %q, old UID was %q", gotUid, oldUid)
|
||||||
|
}
|
||||||
|
wantLabels := make(map[string]string)
|
||||||
|
for k, v := range initialNodeInfo.Node().Labels {
|
||||||
|
wantLabels[k] = v
|
||||||
|
}
|
||||||
|
wantLabels[apiv1.LabelHostname] = wantNodeName
|
||||||
|
if diff := cmp.Diff(wantLabels, sanitizedNodeInfo.Node().Labels); diff != "" {
|
||||||
|
return fmt.Errorf("sanitized Node labels unexpected, diff (-want +got): %s", diff)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantTaints, sanitizedNodeInfo.Node().Spec.Taints); diff != "" {
|
||||||
|
return fmt.Errorf("sanitized Node taints unexpected, diff (-want +got): %s", diff)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(initialNodeInfo.Node(), sanitizedNodeInfo.Node(),
|
||||||
|
cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "Labels", "UID"),
|
||||||
|
cmpopts.IgnoreFields(apiv1.NodeSpec{}, "Taints"),
|
||||||
|
); diff != "" {
|
||||||
|
return fmt.Errorf("sanitized Node unexpected diff (-want +got): %s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldPods := initialNodeInfo.Pods()
|
||||||
|
newPods := sanitizedNodeInfo.Pods()
|
||||||
|
if len(oldPods) != len(newPods) {
|
||||||
|
return fmt.Errorf("want %d pods in sanitized NodeInfo, got %d", len(oldPods), len(newPods))
|
||||||
|
}
|
||||||
|
for i, newPod := range newPods {
|
||||||
|
oldPod := oldPods[i]
|
||||||
|
|
||||||
|
if newPod.Name == oldPod.Name || !strings.HasSuffix(newPod.Name, nameSuffix) {
|
||||||
|
return fmt.Errorf("sanitized Pod name unexpected: want (different than %q, ending in %q), got %q", oldPod.Name, nameSuffix, newPod.Name)
|
||||||
|
}
|
||||||
|
if gotUid, oldUid := newPod.UID, oldPod.UID; gotUid == "" || gotUid == oldUid {
|
||||||
|
return fmt.Errorf("sanitized Pod UID wasn't randomized - got %q, old UID was %q", gotUid, oldUid)
|
||||||
|
}
|
||||||
|
if gotNodeName := newPod.Spec.NodeName; gotNodeName != wantNodeName {
|
||||||
|
return fmt.Errorf("want sanitized Pod.Spec.NodeName %q, got %q", wantNodeName, gotNodeName)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(oldPod, newPod,
|
||||||
|
cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "UID"),
|
||||||
|
cmpopts.IgnoreFields(apiv1.PodSpec{}, "NodeName"),
|
||||||
|
); diff != "" {
|
||||||
|
return fmt.Errorf("sanitized Pod unexpected diff (-want +got): %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDSPod(ds *appsv1.DaemonSet, nodeName string) *apiv1.Pod {
|
||||||
|
pod := daemon.NewPod(ds, nodeName)
|
||||||
|
pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63())
|
||||||
|
ptrVal := true
|
||||||
|
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{Kind: "DaemonSet", UID: ds.UID, Name: ds.Name, Controller: &ptrVal},
|
||||||
|
}
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDeletionTimestamp(pod *apiv1.Pod) *apiv1.Pod {
|
||||||
|
now := metav1.NewTime(time.Now())
|
||||||
|
pod.DeletionTimestamp = &now
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeNodeGroup struct {
|
||||||
|
id string
|
||||||
|
templateNodeInfoResult *framework.NodeInfo
|
||||||
|
templateNodeInfoErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeNodeGroup) Id() string {
|
||||||
|
return f.id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
|
return f.templateNodeInfoResult, f.templateNodeInfoErr
|
||||||
|
}
|
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package simulator
|
|
||||||
|
|
||||||
import (
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
|
||||||
|
|
||||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created.
|
|
||||||
func BuildNodeInfoForNode(node *apiv1.Node, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) {
|
|
||||||
nodeInfo := framework.NewNodeInfo(node, nil)
|
|
||||||
return addExpectedPods(nodeInfo, scheduledPods, daemonsets, forceDaemonSets)
|
|
||||||
}
|
|
||||||
|
|
||||||
func addExpectedPods(nodeInfo *framework.NodeInfo, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) {
|
|
||||||
runningDS := make(map[types.UID]bool)
|
|
||||||
for _, pod := range scheduledPods {
|
|
||||||
// Ignore scheduled pods in deletion phase
|
|
||||||
if pod.DeletionTimestamp != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Add scheduled mirror and DS pods
|
|
||||||
if pod_util.IsMirrorPod(pod) || pod_util.IsDaemonSetPod(pod) {
|
|
||||||
nodeInfo.AddPod(&framework.PodInfo{Pod: pod})
|
|
||||||
}
|
|
||||||
// Mark DS pods as running
|
|
||||||
controllerRef := metav1.GetControllerOf(pod)
|
|
||||||
if controllerRef != nil && controllerRef.Kind == "DaemonSet" {
|
|
||||||
runningDS[controllerRef.UID] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Add all pending DS pods if force scheduling DS
|
|
||||||
if forceDaemonSets {
|
|
||||||
var pendingDS []*appsv1.DaemonSet
|
|
||||||
for _, ds := range daemonsets {
|
|
||||||
if !runningDS[ds.UID] {
|
|
||||||
pendingDS = append(pendingDS, ds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
daemonPods, err := daemonset.GetDaemonSetPodsForNode(nodeInfo, pendingDS)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
|
||||||
}
|
|
||||||
for _, pod := range daemonPods {
|
|
||||||
nodeInfo.AddPod(pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeInfo, nil
|
|
||||||
}
|
|
|
@ -1,239 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package simulator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildNodeInfoForNode(t *testing.T) {
|
|
||||||
ds1 := &appsv1.DaemonSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "ds1",
|
|
||||||
Namespace: "ds1-namespace",
|
|
||||||
UID: types.UID("ds1"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ds2 := &appsv1.DaemonSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "ds2",
|
|
||||||
Namespace: "ds2-namespace",
|
|
||||||
UID: types.UID("ds2"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ds3 := &appsv1.DaemonSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "ds3",
|
|
||||||
Namespace: "ds3-namespace",
|
|
||||||
UID: types.UID("ds3"),
|
|
||||||
},
|
|
||||||
Spec: appsv1.DaemonSetSpec{
|
|
||||||
Template: apiv1.PodTemplateSpec{
|
|
||||||
Spec: apiv1.PodSpec{
|
|
||||||
NodeSelector: map[string]string{"key": "value"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
node *apiv1.Node
|
|
||||||
pods []*apiv1.Pod
|
|
||||||
daemonSets []*appsv1.DaemonSet
|
|
||||||
forceDS bool
|
|
||||||
|
|
||||||
wantPods []*apiv1.Pod
|
|
||||||
wantError bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "node without any pods",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with non-DS/mirror pods",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
test.BuildScheduledTestPod("p1", 100, 1, "n"),
|
|
||||||
test.BuildScheduledTestPod("p2", 100, 1, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with a mirror pod",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")),
|
|
||||||
},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with a deleted mirror pod",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")),
|
|
||||||
setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p2", 100, 1, "n"))),
|
|
||||||
},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with DS pods [forceDS=false, no daemon sets]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with DS pods [forceDS=false, some daemon sets]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with a DS pod [forceDS=true, no daemon sets]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
},
|
|
||||||
forceDS: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "node with a DS pod [forceDS=true, some daemon sets]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3},
|
|
||||||
forceDS: true,
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
buildDSPod(ds2, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "everything together [forceDS=false]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
test.BuildScheduledTestPod("p1", 100, 1, "n"),
|
|
||||||
test.BuildScheduledTestPod("p2", 100, 1, "n"),
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")),
|
|
||||||
setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p4", 100, 1, "n"))),
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3},
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")),
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "everything together [forceDS=true]",
|
|
||||||
node: test.BuildTestNode("n", 1000, 10),
|
|
||||||
pods: []*apiv1.Pod{
|
|
||||||
test.BuildScheduledTestPod("p1", 100, 1, "n"),
|
|
||||||
test.BuildScheduledTestPod("p2", 100, 1, "n"),
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")),
|
|
||||||
setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p4", 100, 1, "n"))),
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
setDeletionTimestamp(buildDSPod(ds2, "n")),
|
|
||||||
},
|
|
||||||
daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3},
|
|
||||||
forceDS: true,
|
|
||||||
wantPods: []*apiv1.Pod{
|
|
||||||
test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")),
|
|
||||||
buildDSPod(ds1, "n"),
|
|
||||||
buildDSPod(ds2, "n"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
nodeInfo, err := BuildNodeInfoForNode(tc.node, tc.pods, tc.daemonSets, tc.forceDS)
|
|
||||||
|
|
||||||
if tc.wantError {
|
|
||||||
assert.Error(t, err)
|
|
||||||
} else {
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, nodeInfo.Node(), tc.node)
|
|
||||||
|
|
||||||
// clean pod metadata for comparison purposes
|
|
||||||
var wantPods, pods []*apiv1.Pod
|
|
||||||
for _, pod := range tc.wantPods {
|
|
||||||
wantPods = append(wantPods, cleanPodMetadata(pod))
|
|
||||||
}
|
|
||||||
for _, podInfo := range nodeInfo.Pods() {
|
|
||||||
pods = append(pods, cleanPodMetadata(podInfo.Pod))
|
|
||||||
}
|
|
||||||
assert.ElementsMatch(t, tc.wantPods, pods)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanPodMetadata(pod *apiv1.Pod) *apiv1.Pod {
|
|
||||||
pod.Name = strings.Split(pod.Name, "-")[0]
|
|
||||||
pod.OwnerReferences = nil
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildDSPod(ds *appsv1.DaemonSet, nodeName string) *apiv1.Pod {
|
|
||||||
pod := daemon.NewPod(ds, nodeName)
|
|
||||||
pod.Name = ds.Name
|
|
||||||
ptrVal := true
|
|
||||||
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
|
||||||
{Kind: "DaemonSet", UID: ds.UID, Controller: &ptrVal},
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDeletionTimestamp(pod *apiv1.Pod) *apiv1.Pod {
|
|
||||||
now := metav1.NewTime(time.Now())
|
|
||||||
pod.DeletionTimestamp = &now
|
|
||||||
return pod
|
|
||||||
}
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||||
)
|
)
|
||||||
|
@ -40,6 +41,10 @@ func GetDaemonSetPodsForNode(nodeInfo *framework.NodeInfo, daemonsets []*appsv1.
|
||||||
if shouldRun {
|
if shouldRun {
|
||||||
pod := daemon.NewPod(ds, nodeInfo.Node().Name)
|
pod := daemon.NewPod(ds, nodeInfo.Node().Name)
|
||||||
pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63())
|
pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63())
|
||||||
|
ptrVal := true
|
||||||
|
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{Kind: "DaemonSet", UID: ds.UID, Name: ds.Name, Controller: &ptrVal},
|
||||||
|
}
|
||||||
result = append(result, &framework.PodInfo{Pod: pod})
|
result = append(result, &framework.PodInfo{Pod: pod})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import (
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
scheduler_scheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
scheduler_scheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||||
|
@ -79,27 +78,6 @@ func isHugePageResourceName(name apiv1.ResourceName) bool {
|
||||||
return strings.HasPrefix(string(name), apiv1.ResourceHugePagesPrefix)
|
return strings.HasPrefix(string(name), apiv1.ResourceHugePagesPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyTemplateNode copies NodeInfo object used as a template. It changes
|
|
||||||
// names of UIDs of both node and pods running on it, so that copies can be used
|
|
||||||
// to represent multiple nodes.
|
|
||||||
func DeepCopyTemplateNode(nodeTemplate *framework.NodeInfo, suffix string) *framework.NodeInfo {
|
|
||||||
node := nodeTemplate.Node().DeepCopy()
|
|
||||||
node.Name = fmt.Sprintf("%s-%s", node.Name, suffix)
|
|
||||||
node.UID = uuid.NewUUID()
|
|
||||||
if node.Labels == nil {
|
|
||||||
node.Labels = make(map[string]string)
|
|
||||||
}
|
|
||||||
node.Labels["kubernetes.io/hostname"] = node.Name
|
|
||||||
nodeInfo := framework.NewNodeInfo(node, nil)
|
|
||||||
for _, podInfo := range nodeTemplate.Pods() {
|
|
||||||
pod := podInfo.Pod.DeepCopy()
|
|
||||||
pod.Name = fmt.Sprintf("%s-%s", podInfo.Pod.Name, suffix)
|
|
||||||
pod.UID = uuid.NewUUID()
|
|
||||||
nodeInfo.AddPod(&framework.PodInfo{Pod: pod})
|
|
||||||
}
|
|
||||||
return nodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResourceToResourceList returns a resource list of the resource.
|
// ResourceToResourceList returns a resource list of the resource.
|
||||||
func ResourceToResourceList(r *schedulerframework.Resource) apiv1.ResourceList {
|
func ResourceToResourceList(r *schedulerframework.Resource) apiv1.ResourceList {
|
||||||
result := apiv1.ResourceList{
|
result := apiv1.ResourceList{
|
||||||
|
|
Loading…
Reference in New Issue