Clean up estimators
This commit is contained in:
parent
0c49666eac
commit
bf6ff4be8e
|
@ -21,6 +21,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
|
@ -42,6 +43,8 @@ type AutoscalingContext struct {
|
|||
PredicateChecker *simulator.PredicateChecker
|
||||
// ExpanderStrategy is the strategy used to choose which node group to expand when scaling up
|
||||
ExpanderStrategy expander.Strategy
|
||||
// EstimatorBuilder is the builder function for node count estimator to be used.
|
||||
EstimatorBuilder estimator.EstimatorBuilder
|
||||
}
|
||||
|
||||
// AutoscalingKubeClients contains all Kubernetes API clients,
|
||||
|
@ -78,13 +81,14 @@ func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions)
|
|||
|
||||
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
|
||||
func NewAutoscalingContext(options config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
|
||||
autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy) *AutoscalingContext {
|
||||
autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy, estimatorBuilder estimator.EstimatorBuilder) *AutoscalingContext {
|
||||
return &AutoscalingContext{
|
||||
AutoscalingOptions: options,
|
||||
CloudProvider: cloudProvider,
|
||||
AutoscalingKubeClients: *autoscalingKubeClients,
|
||||
PredicateChecker: predicateChecker,
|
||||
ExpanderStrategy: expanderStrategy,
|
||||
EstimatorBuilder: estimatorBuilder,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/factory"
|
||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||
|
@ -39,6 +40,7 @@ type AutoscalerOptions struct {
|
|||
CloudProvider cloudprovider.CloudProvider
|
||||
PredicateChecker *simulator.PredicateChecker
|
||||
ExpanderStrategy expander.Strategy
|
||||
EstimatorBuilder estimator.EstimatorBuilder
|
||||
Processors *ca_processors.AutoscalingProcessors
|
||||
}
|
||||
|
||||
|
@ -57,7 +59,7 @@ func NewAutoscaler(opts AutoscalerOptions) (Autoscaler, errors.AutoscalerError)
|
|||
if err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
return NewStaticAutoscaler(opts.AutoscalingOptions, opts.PredicateChecker, opts.AutoscalingKubeClients, opts.Processors, opts.CloudProvider, opts.ExpanderStrategy), nil
|
||||
return NewStaticAutoscaler(opts.AutoscalingOptions, opts.PredicateChecker, opts.AutoscalingKubeClients, opts.Processors, opts.CloudProvider, opts.ExpanderStrategy, opts.EstimatorBuilder), nil
|
||||
}
|
||||
|
||||
// Initialize default options if not provided.
|
||||
|
@ -87,6 +89,13 @@ func initializeDefaultOptions(opts *AutoscalerOptions) error {
|
|||
}
|
||||
opts.ExpanderStrategy = expanderStrategy
|
||||
}
|
||||
if opts.EstimatorBuilder == nil {
|
||||
estimatorBuilder, err := estimator.NewEstimatorBuilder(opts.EstimatorName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.EstimatorBuilder = estimatorBuilder
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||
|
@ -76,6 +77,9 @@ type scaleTestConfig struct {
|
|||
func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClient kube_client.Interface, provider cloudprovider.CloudProvider) context.AutoscalingContext {
|
||||
fakeRecorder := kube_record.NewFakeRecorder(5)
|
||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||
// Ignoring error here is safe - if a test doesn't specify valid estimatorName,
|
||||
// it either doesn't need one, or should fail when it turns out to be nil.
|
||||
estimatorBuilder, _ := estimator.NewEstimatorBuilder(options.EstimatorName)
|
||||
return context.AutoscalingContext{
|
||||
AutoscalingOptions: options,
|
||||
AutoscalingKubeClients: context.AutoscalingKubeClients{
|
||||
|
@ -86,6 +90,7 @@ func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClien
|
|||
CloudProvider: provider,
|
||||
PredicateChecker: simulator.NewTestPredicateChecker(),
|
||||
ExpanderStrategy: random.NewStrategy(),
|
||||
EstimatorBuilder: estimatorBuilder,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||
|
@ -405,18 +404,8 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
|
|||
}
|
||||
|
||||
if len(option.Pods) > 0 {
|
||||
if context.EstimatorName == estimator.BinpackingEstimatorName {
|
||||
binpackingEstimator := estimator.NewBinpackingNodeEstimator(context.PredicateChecker)
|
||||
option.NodeCount = binpackingEstimator.Estimate(option.Pods, nodeInfo, upcomingNodes)
|
||||
} else if context.EstimatorName == estimator.BasicEstimatorName {
|
||||
basicEstimator := estimator.NewBasicNodeEstimator()
|
||||
for _, pod := range option.Pods {
|
||||
basicEstimator.Add(pod)
|
||||
}
|
||||
option.NodeCount, option.Debug = basicEstimator.Estimate(nodeInfo.Node(), upcomingNodes)
|
||||
} else {
|
||||
glog.Fatalf("Unrecognized estimator: %s", context.EstimatorName)
|
||||
}
|
||||
estimator := context.EstimatorBuilder(context.PredicateChecker)
|
||||
option.NodeCount = estimator.Estimate(option.Pods, nodeInfo, upcomingNodes)
|
||||
if option.NodeCount > 0 {
|
||||
expansionOptions = append(expansionOptions, option)
|
||||
} else {
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||
|
@ -66,8 +67,9 @@ type StaticAutoscaler struct {
|
|||
|
||||
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
|
||||
func NewStaticAutoscaler(opts config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
|
||||
autoscalingKubeClients *context.AutoscalingKubeClients, processors *ca_processors.AutoscalingProcessors, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy) *StaticAutoscaler {
|
||||
autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy)
|
||||
autoscalingKubeClients *context.AutoscalingKubeClients, processors *ca_processors.AutoscalingProcessors, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy, estimatorBuilder estimator.EstimatorBuilder) *StaticAutoscaler {
|
||||
autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy, estimatorBuilder)
|
||||
|
||||
clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
|
||||
MaxTotalUnreadyPercentage: opts.MaxTotalUnreadyPercentage,
|
||||
OkTotalUnreadyCount: opts.OkTotalUnreadyCount,
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package estimator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// BasicNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
|
||||
// It will never overestimate the number of nodes but is quite likely to provide a number that
|
||||
// is too small.
|
||||
type BasicNodeEstimator struct {
|
||||
cpuSum resource.Quantity
|
||||
memorySum resource.Quantity
|
||||
portSum map[int32]int
|
||||
FittingPods map[*apiv1.Pod]struct{}
|
||||
}
|
||||
|
||||
// NewBasicNodeEstimator builds BasicNodeEstimator.
|
||||
func NewBasicNodeEstimator() *BasicNodeEstimator {
|
||||
return &BasicNodeEstimator{
|
||||
portSum: make(map[int32]int),
|
||||
FittingPods: make(map[*apiv1.Pod]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds Pod to the estimation.
|
||||
func (basicEstimator *BasicNodeEstimator) Add(pod *apiv1.Pod) error {
|
||||
ports := make(map[int32]struct{})
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok {
|
||||
basicEstimator.cpuSum.Add(request)
|
||||
}
|
||||
if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok {
|
||||
basicEstimator.memorySum.Add(request)
|
||||
}
|
||||
for _, port := range container.Ports {
|
||||
if port.HostPort > 0 {
|
||||
ports[port.HostPort] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
for port := range ports {
|
||||
if sum, ok := basicEstimator.portSum[port]; ok {
|
||||
basicEstimator.portSum[port] = sum + 1
|
||||
} else {
|
||||
basicEstimator.portSum[port] = 1
|
||||
}
|
||||
}
|
||||
basicEstimator.FittingPods[pod] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetDebug returns debug information about the current state of BasicNodeEstimator
|
||||
func (basicEstimator *BasicNodeEstimator) GetDebug() string {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("Resources needed:\n")
|
||||
buffer.WriteString(fmt.Sprintf("CPU: %s\n", basicEstimator.cpuSum.String()))
|
||||
buffer.WriteString(fmt.Sprintf("Mem: %s\n", basicEstimator.memorySum.String()))
|
||||
for port, count := range basicEstimator.portSum {
|
||||
buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count))
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Estimate estimates the number needed of nodes of the given shape.
|
||||
func (basicEstimator *BasicNodeEstimator) Estimate(pods []*apiv1.Pod, nodeInfo *schedulercache.NodeInfo, upcomingNodes []*schedulercache.NodeInfo) int {
|
||||
for _, pod := range pods {
|
||||
basicEstimator.Add(pod)
|
||||
}
|
||||
|
||||
result := 0
|
||||
resources := apiv1.ResourceList{}
|
||||
for _, node := range upcomingNodes {
|
||||
cpu := resources[apiv1.ResourceCPU]
|
||||
cpu.Add(node.Node().Status.Capacity[apiv1.ResourceCPU])
|
||||
resources[apiv1.ResourceCPU] = cpu
|
||||
|
||||
mem := resources[apiv1.ResourceMemory]
|
||||
mem.Add(node.Node().Status.Capacity[apiv1.ResourceMemory])
|
||||
resources[apiv1.ResourceMemory] = mem
|
||||
|
||||
pods := resources[apiv1.ResourcePods]
|
||||
pods.Add(node.Node().Status.Capacity[apiv1.ResourcePods])
|
||||
resources[apiv1.ResourcePods] = pods
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if cpuCapacity, ok := node.Status.Capacity[apiv1.ResourceCPU]; ok {
|
||||
comingCpu := resources[apiv1.ResourceCPU]
|
||||
prop := int(math.Ceil(float64(
|
||||
basicEstimator.cpuSum.MilliValue()-comingCpu.MilliValue()) /
|
||||
float64(cpuCapacity.MilliValue())))
|
||||
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
if memCapacity, ok := node.Status.Capacity[apiv1.ResourceMemory]; ok {
|
||||
comingMem := resources[apiv1.ResourceMemory]
|
||||
prop := int(math.Ceil(float64(
|
||||
basicEstimator.memorySum.Value()-comingMem.Value()) /
|
||||
float64(memCapacity.Value())))
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
if podCapacity, ok := node.Status.Capacity[apiv1.ResourcePods]; ok {
|
||||
comingPods := resources[apiv1.ResourcePods]
|
||||
prop := int(math.Ceil(float64(basicEstimator.GetCount()-int(comingPods.Value())) /
|
||||
float64(podCapacity.Value())))
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
for _, count := range basicEstimator.portSum {
|
||||
result = maxInt(result, count-len(upcomingNodes))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetCount returns number of pods included in the estimation.
|
||||
func (basicEstimator *BasicNodeEstimator) GetCount() int {
|
||||
return len(basicEstimator.FittingPods)
|
||||
}
|
|
@ -48,16 +48,12 @@ func TestEstimate(t *testing.T) {
|
|||
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||
pod := makePod(cpuPerPod, memoryPerPod)
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
pods := []*apiv1.Pod{}
|
||||
for i := 0; i < 5; i++ {
|
||||
podCopy := *pod
|
||||
estimator.Add(&podCopy)
|
||||
pods = append(pods, &podCopy)
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
|
||||
assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
|
||||
assert.Equal(t, 5, estimator.GetCount())
|
||||
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{
|
||||
|
@ -67,10 +63,20 @@ func TestEstimate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{})
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
assert.Contains(t, report, "CPU")
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
|
||||
// Check result.
|
||||
assert.Equal(t, 3, estimate)
|
||||
|
||||
// Check internal state of estimator.
|
||||
assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
|
||||
assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
|
||||
assert.Equal(t, 5, estimator.GetCount())
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
}
|
||||
|
||||
func TestEstimateWithComing(t *testing.T) {
|
||||
|
@ -78,17 +84,12 @@ func TestEstimateWithComing(t *testing.T) {
|
|||
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||
|
||||
pod := makePod(cpuPerPod, memoryPerPod)
|
||||
estimator := NewBasicNodeEstimator()
|
||||
|
||||
pods := []*apiv1.Pod{}
|
||||
for i := 0; i < 5; i++ {
|
||||
podCopy := *pod
|
||||
estimator.Add(&podCopy)
|
||||
pods = append(pods, &podCopy)
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
|
||||
assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
|
||||
assert.Equal(t, 5, estimator.GetCount())
|
||||
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{
|
||||
|
@ -102,10 +103,18 @@ func TestEstimateWithComing(t *testing.T) {
|
|||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{nodeInfo, nodeInfo})
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
assert.Contains(t, report, "CPU")
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{nodeInfo, nodeInfo})
|
||||
|
||||
// Check result.
|
||||
assert.Equal(t, 1, estimate)
|
||||
|
||||
// Check internal state of estimator.
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
|
||||
assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
|
||||
assert.Equal(t, 5, estimator.GetCount())
|
||||
|
||||
}
|
||||
|
||||
func TestEstimateWithPorts(t *testing.T) {
|
||||
|
@ -119,9 +128,9 @@ func TestEstimateWithPorts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
pods := []*apiv1.Pod{}
|
||||
for i := 0; i < 5; i++ {
|
||||
estimator.Add(pod)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
|
@ -132,9 +141,11 @@ func TestEstimateWithPorts(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{})
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
assert.Contains(t, report, "CPU")
|
||||
assert.Equal(t, 5, estimate)
|
||||
}
|
|
@ -51,7 +51,7 @@ func NewBinpackingNodeEstimator(predicateChecker *simulator.PredicateChecker) *B
|
|||
// It is assumed that all pods from the given list can fit to nodeTemplate.
|
||||
// Returns the number of nodes needed to accommodate all pods from the list.
|
||||
func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTemplate *schedulercache.NodeInfo,
|
||||
comingNodes []*schedulercache.NodeInfo) int {
|
||||
upcomingNodes []*schedulercache.NodeInfo) int {
|
||||
|
||||
podInfos := calculatePodScore(pods, nodeTemplate)
|
||||
sort.Slice(podInfos, func(i, j int) bool { return podInfos[i].score > podInfos[j].score })
|
||||
|
@ -66,7 +66,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla
|
|||
}
|
||||
|
||||
newNodes := make([]*schedulercache.NodeInfo, 0)
|
||||
newNodes = append(newNodes, comingNodes...)
|
||||
newNodes = append(newNodes, upcomingNodes...)
|
||||
|
||||
for _, podInfo := range podInfos {
|
||||
found := false
|
||||
|
@ -81,7 +81,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla
|
|||
newNodes = append(newNodes, nodeWithPod(nodeTemplate, podInfo.pod))
|
||||
}
|
||||
}
|
||||
return len(newNodes) - len(comingNodes)
|
||||
return len(newNodes) - len(upcomingNodes)
|
||||
}
|
||||
|
||||
// Calculates score for all pods and returns podInfo structure.
|
||||
|
|
|
@ -17,12 +17,10 @@ limitations under the License.
|
|||
package estimator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
|
@ -36,123 +34,22 @@ const (
|
|||
// AvailableEstimators is a list of available estimators.
|
||||
var AvailableEstimators = []string{BasicEstimatorName, BinpackingEstimatorName}
|
||||
|
||||
// BasicNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
|
||||
// It will never overestimate the number of nodes but is quite likely to provide a number that
|
||||
// is too small.
|
||||
type BasicNodeEstimator struct {
|
||||
cpuSum resource.Quantity
|
||||
memorySum resource.Quantity
|
||||
portSum map[int32]int
|
||||
FittingPods map[*apiv1.Pod]struct{}
|
||||
type Estimator interface {
|
||||
Estimate([]*apiv1.Pod, *schedulercache.NodeInfo, []*schedulercache.NodeInfo) int
|
||||
}
|
||||
|
||||
// NewBasicNodeEstimator builds BasicNodeEstimator.
|
||||
func NewBasicNodeEstimator() *BasicNodeEstimator {
|
||||
return &BasicNodeEstimator{
|
||||
portSum: make(map[int32]int),
|
||||
FittingPods: make(map[*apiv1.Pod]struct{}),
|
||||
type EstimatorBuilder func(*simulator.PredicateChecker) Estimator
|
||||
|
||||
func NewEstimatorBuilder(name string) (EstimatorBuilder, error) {
|
||||
switch name {
|
||||
case BasicEstimatorName:
|
||||
return func(_ *simulator.PredicateChecker) Estimator {
|
||||
return NewBasicNodeEstimator()
|
||||
}, nil
|
||||
case BinpackingEstimatorName:
|
||||
return func(predicateChecker *simulator.PredicateChecker) Estimator {
|
||||
return NewBinpackingNodeEstimator(predicateChecker)
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds Pod to the estimation.
|
||||
func (basicEstimator *BasicNodeEstimator) Add(pod *apiv1.Pod) error {
|
||||
ports := make(map[int32]struct{})
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok {
|
||||
basicEstimator.cpuSum.Add(request)
|
||||
}
|
||||
if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok {
|
||||
basicEstimator.memorySum.Add(request)
|
||||
}
|
||||
for _, port := range container.Ports {
|
||||
if port.HostPort > 0 {
|
||||
ports[port.HostPort] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
for port := range ports {
|
||||
if sum, ok := basicEstimator.portSum[port]; ok {
|
||||
basicEstimator.portSum[port] = sum + 1
|
||||
} else {
|
||||
basicEstimator.portSum[port] = 1
|
||||
}
|
||||
}
|
||||
basicEstimator.FittingPods[pod] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetDebug returns debug information about the current state of BasicNodeEstimator
|
||||
func (basicEstimator *BasicNodeEstimator) GetDebug() string {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("Resources needed:\n")
|
||||
buffer.WriteString(fmt.Sprintf("CPU: %s\n", basicEstimator.cpuSum.String()))
|
||||
buffer.WriteString(fmt.Sprintf("Mem: %s\n", basicEstimator.memorySum.String()))
|
||||
for port, count := range basicEstimator.portSum {
|
||||
buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count))
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Estimate estimates the number needed of nodes of the given shape.
|
||||
func (basicEstimator *BasicNodeEstimator) Estimate(node *apiv1.Node, comingNodes []*schedulercache.NodeInfo) (int, string) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("Needed nodes according to:\n")
|
||||
result := 0
|
||||
|
||||
resources := apiv1.ResourceList{}
|
||||
for _, node := range comingNodes {
|
||||
cpu := resources[apiv1.ResourceCPU]
|
||||
cpu.Add(node.Node().Status.Capacity[apiv1.ResourceCPU])
|
||||
resources[apiv1.ResourceCPU] = cpu
|
||||
|
||||
mem := resources[apiv1.ResourceMemory]
|
||||
mem.Add(node.Node().Status.Capacity[apiv1.ResourceMemory])
|
||||
resources[apiv1.ResourceMemory] = mem
|
||||
|
||||
pods := resources[apiv1.ResourcePods]
|
||||
pods.Add(node.Node().Status.Capacity[apiv1.ResourcePods])
|
||||
resources[apiv1.ResourcePods] = pods
|
||||
}
|
||||
|
||||
if cpuCapacity, ok := node.Status.Capacity[apiv1.ResourceCPU]; ok {
|
||||
comingCpu := resources[apiv1.ResourceCPU]
|
||||
prop := int(math.Ceil(float64(
|
||||
basicEstimator.cpuSum.MilliValue()-comingCpu.MilliValue()) /
|
||||
float64(cpuCapacity.MilliValue())))
|
||||
|
||||
buffer.WriteString(fmt.Sprintf("CPU: %d\n", prop))
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
if memCapacity, ok := node.Status.Capacity[apiv1.ResourceMemory]; ok {
|
||||
comingMem := resources[apiv1.ResourceMemory]
|
||||
prop := int(math.Ceil(float64(
|
||||
basicEstimator.memorySum.Value()-comingMem.Value()) /
|
||||
float64(memCapacity.Value())))
|
||||
buffer.WriteString(fmt.Sprintf("Mem: %d\n", prop))
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
if podCapacity, ok := node.Status.Capacity[apiv1.ResourcePods]; ok {
|
||||
comingPods := resources[apiv1.ResourcePods]
|
||||
prop := int(math.Ceil(float64(basicEstimator.GetCount()-int(comingPods.Value())) /
|
||||
float64(podCapacity.Value())))
|
||||
buffer.WriteString(fmt.Sprintf("Pods: %d\n", prop))
|
||||
result = maxInt(result, prop)
|
||||
}
|
||||
for port, count := range basicEstimator.portSum {
|
||||
buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count))
|
||||
result = maxInt(result, count-len(comingNodes))
|
||||
}
|
||||
return result, buffer.String()
|
||||
}
|
||||
|
||||
// GetCount returns number of pods included in the estimation.
|
||||
func (basicEstimator *BasicNodeEstimator) GetCount() int {
|
||||
return len(basicEstimator.FittingPods)
|
||||
return nil, fmt.Errorf("Unknown estimator: %s", name)
|
||||
}
|
||||
|
|
|
@ -323,16 +323,6 @@ func main() {
|
|||
|
||||
glog.V(1).Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion)
|
||||
|
||||
correctEstimator := false
|
||||
for _, availableEstimator := range estimator.AvailableEstimators {
|
||||
if *estimatorFlag == availableEstimator {
|
||||
correctEstimator = true
|
||||
}
|
||||
}
|
||||
if !correctEstimator {
|
||||
glog.Fatalf("Unrecognized estimator: %v", *estimatorFlag)
|
||||
}
|
||||
|
||||
go func() {
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
http.Handle("/health-check", healthCheck)
|
||||
|
|
Loading…
Reference in New Issue