Add filtering out DS pods from scale-up, refactor default pod list processor

This commit is contained in:
Bartłomiej Wróblewski 2023-01-23 17:14:03 +00:00
parent 69de789049
commit d4b812e936
5 changed files with 152 additions and 20 deletions

View File

@ -0,0 +1,54 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podlistprocessor
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/context"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
klog "k8s.io/klog/v2"
)
type filterOutDaemonSetPodListProcessor struct {
}
// NewFilterOutDaemonSetPodListProcessor creates a PodListProcessor filtering out daemon set pods
func NewFilterOutDaemonSetPodListProcessor() *filterOutDaemonSetPodListProcessor {
return &filterOutDaemonSetPodListProcessor{}
}
// Process filters out pods which are daemon set pods.
func (p *filterOutDaemonSetPodListProcessor) Process(context *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
// Scale-up cannot help unschedulable Daemon Set pods, as those require a specific node
// for scheduling. To improve that we are filtering them here, as the CA won't be
// able to help them so there is no point to in passing them to scale-up logic.
klog.V(4).Infof("Filtering out daemon set pods")
var nonDaemonSetPods []*apiv1.Pod
for _, pod := range unschedulablePods {
if !podutils.IsDaemonSetPod(pod) {
nonDaemonSetPods = append(nonDaemonSetPods, pod)
}
}
klog.V(4).Infof("Filtered out %v daemon set pods, %v unschedulable pods left", len(unschedulablePods)-len(nonDaemonSetPods), len(nonDaemonSetPods))
return nonDaemonSetPods, nil
}
func (p *filterOutDaemonSetPodListProcessor) CleanUp() {
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podlistprocessor
import (
"testing"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
func TestFilterOutDaemonSetPodListProcessor(t *testing.T) {
testCases := []struct {
name string
pods []*apiv1.Pod
wantPods []*apiv1.Pod
}{
{
name: "no pods",
},
{
name: "single non-DS pod",
pods: []*apiv1.Pod{
test.BuildTestPod("p", 1000, 1),
},
wantPods: []*apiv1.Pod{
test.BuildTestPod("p", 1000, 1),
},
},
{
name: "single DS pod",
pods: []*apiv1.Pod{
test.SetDSPodSpec(test.BuildTestPod("p", 1000, 1)),
},
},
{
name: "mixed DS and non-DS pods",
pods: []*apiv1.Pod{
test.BuildTestPod("p1", 1000, 1),
test.SetDSPodSpec(test.BuildTestPod("p2", 1000, 1)),
test.SetDSPodSpec(test.BuildTestPod("p3", 1000, 1)),
test.BuildTestPod("p4", 1000, 1),
test.BuildTestPod("p5", 1000, 1),
test.SetDSPodSpec(test.BuildTestPod("p6", 1000, 1)),
},
wantPods: []*apiv1.Pod{
test.BuildTestPod("p1", 1000, 1),
test.BuildTestPod("p4", 1000, 1),
test.BuildTestPod("p5", 1000, 1),
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
processor := NewFilterOutDaemonSetPodListProcessor()
pods, err := processor.Process(nil, tc.pods)
assert.NoError(t, err)
assert.Equal(t, tc.wantPods, pods)
})
}
}

View File

@ -19,33 +19,40 @@ package podlistprocessor
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
)
type defaultPodListProcessor struct {
currentlyDrainedNodes *currentlyDrainedNodesPodListProcessor
filterOutSchedulable *filterOutSchedulablePodListProcessor
processors []pods.PodListProcessor
}
// NewDefaultPodListProcessor returns a default implementation of the pod list
// processor, which wraps and sequentially runs other sub-processors.
func NewDefaultPodListProcessor(currentlyDrainedNodes *currentlyDrainedNodesPodListProcessor, filterOutSchedulable *filterOutSchedulablePodListProcessor) *defaultPodListProcessor {
func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker) *defaultPodListProcessor {
return &defaultPodListProcessor{
currentlyDrainedNodes: currentlyDrainedNodes,
filterOutSchedulable: filterOutSchedulable,
processors: []pods.PodListProcessor{
NewCurrentlyDrainedNodesPodListProcessor(),
NewFilterOutSchedulablePodListProcessor(predicateChecker),
NewFilterOutDaemonSetPodListProcessor(),
},
}
}
// Process runs sub-processors sequentially
func (p *defaultPodListProcessor) Process(ctx *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
unschedulablePods, err := p.currentlyDrainedNodes.Process(ctx, unschedulablePods)
if err != nil {
return nil, err
var err error
for _, processor := range p.processors {
unschedulablePods, err = processor.Process(ctx, unschedulablePods)
if err != nil {
return nil, err
}
}
return p.filterOutSchedulable.Process(ctx, unschedulablePods)
return unschedulablePods, nil
}
func (p *defaultPodListProcessor) CleanUp() {
p.currentlyDrainedNodes.CleanUp()
p.filterOutSchedulable.CleanUp()
for _, processor := range p.processors {
processor.CleanUp()
}
}

View File

@ -136,10 +136,7 @@ func ExtractPodNames(pods []*apiv1.Pod) []string {
// NewTestProcessors returns a set of simple processors for use in tests.
func NewTestProcessors(context *context.AutoscalingContext) *processors.AutoscalingProcessors {
return &processors.AutoscalingProcessors{
PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(
podlistprocessor.NewCurrentlyDrainedNodesPodListProcessor(),
podlistprocessor.NewFilterOutSchedulablePodListProcessor(context.PredicateChecker),
),
PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(context.PredicateChecker),
NodeGroupListProcessor: &nodegroups.NoOpNodeGroupListProcessor{},
NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{}),
ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(),

View File

@ -400,10 +400,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
opts.Processors = ca_processors.DefaultProcessors()
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime)
opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor(
podlistprocessor.NewCurrentlyDrainedNodesPodListProcessor(),
podlistprocessor.NewFilterOutSchedulablePodListProcessor(opts.PredicateChecker),
)
opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker)
if autoscalingOptions.ParallelDrain {
sdProcessor := nodes.NewScaleDownCandidatesSortingProcessor()
opts.Processors.ScaleDownNodeProcessor = sdProcessor