Use bytes instead of MB for memory limits

This commit is contained in:
Aleksandra Malinowska 2017-12-06 18:17:14 +01:00
parent d7dc3616f7
commit fcc3d004f5
9 changed files with 24 additions and 24 deletions

View File

@ -157,7 +157,7 @@ const (
// ResourceNameCores is string name for cores. It's used by ResourceLimiter.
ResourceNameCores = "cpu"
// ResourceNameMemory is string name for memory. It's used by ResourceLimiter.
// Memory should always be provided in megabytes.
// Memory should always be provided in bytes.
ResourceNameMemory = "memory"
)

View File

@ -42,6 +42,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
provider_gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@ -1023,12 +1024,12 @@ func (m *gceManagerImpl) fetchResourceLimiter() error {
maxLimits[limit.Name] = limit.Maximum
}
// GKE API provides memory in GB, but ResourceLimiter expects them in MB
// GKE API provides memory in GB, but ResourceLimiter expects them in bytes
if _, found := minLimits[cloudprovider.ResourceNameMemory]; found {
minLimits[cloudprovider.ResourceNameMemory] = minLimits[cloudprovider.ResourceNameMemory] * 1024
minLimits[cloudprovider.ResourceNameMemory] = minLimits[cloudprovider.ResourceNameMemory] * config.Gigabyte
}
if _, found := maxLimits[cloudprovider.ResourceNameMemory]; found {
maxLimits[cloudprovider.ResourceNameMemory] = maxLimits[cloudprovider.ResourceNameMemory] * 1024
maxLimits[cloudprovider.ResourceNameMemory] = maxLimits[cloudprovider.ResourceNameMemory] * config.Gigabyte
}
resourceLimiter := cloudprovider.NewResourceLimiter(minLimits, maxLimits)

View File

@ -17,8 +17,15 @@ limitations under the License.
package config
const (
// Defaults.
// DefaultMaxClusterCores is the default maximum number of cores in the cluster.
DefaultMaxClusterCores = 5000 * 64
// DefaultMaxClusterMemory is the default maximum number of gigabytes of memory in cluster.
DefaultMaxClusterMemory = 5000 * 64 * 20
// Useful universal constants.
// Gigabyte is 2^30 bytes.
Gigabyte = 1024 * 1024 * 1024
)

View File

@ -71,9 +71,9 @@ type AutoscalingOptions struct {
MaxCoresTotal int64
// MinCoresTotal sets the minimum number of cores in the whole cluster
MinCoresTotal int64
// MaxMemoryTotal sets the maximum memory (in megabytes) in the whole cluster
// MaxMemoryTotal sets the maximum memory (in bytes) in the whole cluster
MaxMemoryTotal int64
// MinMemoryTotal sets the maximum memory (in megabytes) in the whole cluster
// MinMemoryTotal sets the maximum memory (in bytes) in the whole cluster
MinMemoryTotal int64
// NodeGroupAutoDiscovery represents one or more definition(s) of node group auto-discovery
NodeGroupAutoDiscovery []string

View File

@ -863,7 +863,7 @@ var defaultScaleDownOptions = context.AutoscalingOptions{
MinCoresTotal: 0,
MinMemoryTotal: 0,
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
MaxMemoryTotal: config.DefaultMaxClusterMemory * config.Gigabyte,
}
func TestScaleDownEmptyMultipleNodeGroups(t *testing.T) {
@ -908,7 +908,7 @@ func TestScaleDownEmptyMinCoresLimitHit(t *testing.T) {
func TestScaleDownEmptyMinMemoryLimitHit(t *testing.T) {
options := defaultScaleDownOptions
options.MinMemoryTotal = 4000
options.MinMemoryTotal = 4000 * MB
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 2000, 1000 * MB, 0, true, "ng1"},
@ -1305,7 +1305,7 @@ func TestCalculateCoresAndMemoryTotal(t *testing.T) {
coresTotal, memoryTotal := calculateCoresAndMemoryTotal(nodes, time.Now())
assert.Equal(t, int64(42), coresTotal)
assert.Equal(t, int64(44000), memoryTotal)
assert.Equal(t, int64(44000*MB), memoryTotal)
}
func TestFilterOutMasters(t *testing.T) {

View File

@ -50,7 +50,7 @@ import (
var defaultOptions = context.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
MaxMemoryTotal: config.DefaultMaxClusterMemory * config.Gigabyte,
MinCoresTotal: 0,
MinMemoryTotal: 0,
}
@ -104,7 +104,7 @@ const MB = 1024 * 1024
func TestScaleUpMaxMemoryLimitHit(t *testing.T) {
options := defaultOptions
options.MaxMemoryTotal = 1300 // set in mb
options.MaxMemoryTotal = 1300 * MB
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 2000, 100 * MB, 0, true, "ng1"},

View File

@ -18,7 +18,6 @@ package core
import (
"fmt"
"math"
"math/rand"
"reflect"
"time"
@ -458,12 +457,6 @@ func ConfigurePredicateCheckerForLoop(unschedulablePods []*apiv1.Pod, schedulabl
}
}
// Getting node cores/memory
const (
// Megabyte is 2^20 bytes.
Megabyte float64 = 1024 * 1024
)
func getNodeCoresAndMemory(node *apiv1.Node) (int64, int64, error) {
cores, err := getNodeResource(node, apiv1.ResourceCPU)
if err != nil {
@ -479,8 +472,7 @@ func getNodeCoresAndMemory(node *apiv1.Node) (int64, int64, error) {
return 0, 0, fmt.Errorf("Invalid node CPU/memory values - cpu %v, memory %v", cores, memory)
}
memoryMb := math.Ceil(float64(memory) / Megabyte)
return cores, int64(memoryMb), nil
return cores, memory, nil
}
func getNodeResource(node *apiv1.Node, resource apiv1.ResourceName) (int64, error) {

View File

@ -555,7 +555,7 @@ func TestGetNodeCoresAndMemory(t *testing.T) {
cores, memory, err := getNodeCoresAndMemory(node)
assert.NoError(t, err)
assert.Equal(t, int64(2), cores)
assert.Equal(t, int64(2048), memory)
assert.Equal(t, int64(2048*MB), memory)
node.Status.Capacity = apiv1.ResourceList{}

View File

@ -143,9 +143,9 @@ func createAutoscalingOptions() context.AutoscalingOptions {
if err != nil {
glog.Fatalf("Failed to parse flags: %v", err)
}
// Convert memory limits to megabytes.
minMemoryTotal = minMemoryTotal * 1024
maxMemoryTotal = maxMemoryTotal * 1024
// Convert memory limits to bytes.
minMemoryTotal = minMemoryTotal * config.Gigabyte
maxMemoryTotal = maxMemoryTotal * config.Gigabyte
return context.AutoscalingOptions{
CloudConfig: *cloudConfig,