From c967445ac6fe1e64d3d179075e05d6beec69b5c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Spriet?= Date: Thu, 7 Aug 2025 15:42:44 +0200 Subject: [PATCH 1/3] feat(recommender): add enforce cpu memory ratio --- vertical-pod-autoscaler/docs/flags.md | 1 + .../pkg/recommender/logic/recommender.go | 9 +- .../pkg/recommender/model/types.go | 34 +++++++ .../pkg/recommender/model/types_test.go | 97 +++++++++++++++++++ 4 files changed, 137 insertions(+), 4 deletions(-) diff --git a/vertical-pod-autoscaler/docs/flags.md b/vertical-pod-autoscaler/docs/flags.md index feb3dc32e3..dfa5ee92b1 100644 --- a/vertical-pod-autoscaler/docs/flags.md +++ b/vertical-pod-autoscaler/docs/flags.md @@ -66,6 +66,7 @@ This document is auto-generated from the flag definitions in the VPA recommender | `container-recommendation-max-allowed-memory` | | | quantity Maximum amount of memory that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed. | | `cpu-histogram-decay-half-life` | | 24h0m0s | duration The amount of time it takes a historical CPU usage sample to lose half of its weight. | | `cpu-integer-post-processor-enabled` | | | Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental) | +| `enforce-cpu-memory-ratio` | float | | If > 0, enforce a fixed memory-per-CPU ratio expressed as bytes per millicores across all recommendations. | | `external-metrics-cpu-metric` | string | | ALPHA. Metric to use with external metrics provider for CPU usage. | | `external-metrics-memory-metric` | string | | ALPHA. Metric to use with external metrics provider for memory usage. | | `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) | diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go index b7b6f8a75b..e8f3983b97 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go @@ -40,6 +40,7 @@ var ( humanizeMemory = flag.Bool("humanize-memory", false, "DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead.") roundCPUMillicores = flag.Int("round-cpu-millicores", 1, `CPU recommendation rounding factor in millicores. The CPU value will always be rounded up to the nearest multiple of this factor.`) roundMemoryBytes = flag.Int("round-memory-bytes", 1, `Memory recommendation rounding factor in bytes. The Memory value will always be rounded up to the nearest multiple of this factor.`) + enforceCPUMemoryRatio = flag.Float64("enforce-cpu-memory-ratio", 0, `If > 0, enforce a fixed memory-per-CPU ratio expressed as bytes per millicores across all recommendations.`) ) // PodResourceRecommender computes resource recommendation for a Vpa object. @@ -194,10 +195,10 @@ func MapToListOfRecommendedContainerResources(resources RecommendedPodResources) for _, name := range containerNames { containerResources = append(containerResources, vpa_types.RecommendedContainerResources{ ContainerName: name, - Target: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), - LowerBound: model.ResourcesAsResourceList(resources[name].LowerBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), - UpperBound: model.ResourcesAsResourceList(resources[name].UpperBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), - UncappedTarget: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), + Target: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), + LowerBound: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].LowerBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), + UpperBound: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].UpperBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), + UncappedTarget: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), }) } recommendation := &vpa_types.RecommendedPodResources{ diff --git a/vertical-pod-autoscaler/pkg/recommender/model/types.go b/vertical-pod-autoscaler/pkg/recommender/model/types.go index d4d817b8ef..9e8b823272 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/types.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/types.go @@ -81,6 +81,40 @@ func ScaleResource(amount ResourceAmount, factor float64) ResourceAmount { return resourceAmountFromFloat(float64(amount) * factor) } +// EnforceCPUMemoryRatio adjusts the CPU or Memory to maintain a fixed ratio in bytes per millicore. +// If the actual memory per millicore is too low, memory is increased. +// If it is too high, CPU is increased. +func EnforceCPUMemoryRatio(resources apiv1.ResourceList, ratioBytesPerMillicore *float64) apiv1.ResourceList { + if ratioBytesPerMillicore == nil || *ratioBytesPerMillicore <= 0 { + // No ratio specified or invalid ratio, nothing to do + return resources + } + + cpuQty, hasCPU := resources[apiv1.ResourceCPU] + memQty, hasMem := resources[apiv1.ResourceMemory] + + if !hasCPU || !hasMem || cpuQty.IsZero() || memQty.IsZero() { + return resources + } + + cpuMilli := float64(cpuQty.MilliValue()) + memBytes := float64(memQty.Value()) + + currentRatio := memBytes / cpuMilli + + if currentRatio < *ratioBytesPerMillicore { + // Not enough RAM for the given CPU → increase memory + desiredMem := cpuMilli * *ratioBytesPerMillicore + resources[apiv1.ResourceMemory] = *resource.NewQuantity(int64(desiredMem), resource.BinarySI) + } else if currentRatio > *ratioBytesPerMillicore { + // Too much RAM for the given CPU → increase CPU + desiredCPU := memBytes / *ratioBytesPerMillicore + resources[apiv1.ResourceCPU] = *resource.NewMilliQuantity(int64(desiredCPU), resource.DecimalSI) + } + + return resources +} + // ResourcesAsResourceList converts internal Resources representation to ResourcesList. func ResourcesAsResourceList(resources Resources, humanizeMemory bool, roundCPUMillicores, roundMemoryBytes int) apiv1.ResourceList { result := make(apiv1.ResourceList) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/types_test.go b/vertical-pod-autoscaler/pkg/recommender/model/types_test.go index ab3501611d..85d548d66b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/types_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/types_test.go @@ -758,3 +758,100 @@ func TestResourceAmountFromFloat(t *testing.T) { }) } } + +type EnforceCPUMemoryRatioTestCase struct { + name string + input apiv1.ResourceList + ratio *float64 + expected apiv1.ResourceList +} + +func TestEnforceCPUMemoryRatio2(t *testing.T) { + // 1 CPU -> 4 GiB => bytes per millicore + ratio4GiBPerCore := float64(4*1024*1024*1024) / 1000.0 // 4_294_967.296 + + tc := []EnforceCPUMemoryRatioTestCase{ + { + name: "no ratio provided", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), + }, + ratio: nil, + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), + }, + }, + { + name: "valid ratio already respected", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), // 2 cores + apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi + }, + ratio: float64Ptr(ratio4GiBPerCore), + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), + }, + }, + { + name: "too much RAM, should increase CPU", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), // 1 core + apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi + }, + ratio: float64Ptr(ratio4GiBPerCore), + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), // 8Gi / 4 = 2 cores + apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), + }, + }, + { + name: "not enough RAM, should increase RAM", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(4000, resource.DecimalSI), // 4 cores + apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi + }, + ratio: float64Ptr(ratio4GiBPerCore), + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(4000, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 4 cores * 4 = 16Gi + }, + }, + { + name: "missing memory, no-op", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + }, + ratio: float64Ptr(ratio4GiBPerCore), + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + }, + }, + { + name: "zero values, no-op", + input: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + }, + ratio: float64Ptr(ratio4GiBPerCore), + expected: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + }, + }, + } + + for _, tc := range tc { + t.Run(tc.name, func(t *testing.T) { + result := EnforceCPUMemoryRatio(tc.input.DeepCopy(), tc.ratio) + assert.Equal(t, tc.expected[apiv1.ResourceCPU], result[apiv1.ResourceCPU]) + assert.Equal(t, tc.expected[apiv1.ResourceMemory], result[apiv1.ResourceMemory]) + }) + } +} + +func float64Ptr(v float64) *float64 { + return &v +} From 0a85516c696b148bdd67d9e303b1afc4a7a01391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Spriet?= Date: Tue, 12 Aug 2025 11:51:23 +0200 Subject: [PATCH 2/3] feat(recommender): enforce CPU/memory ratio via VPA API instead of flag --- .../deploy/vpa-v1-crd-gen.yaml | 12 ++ vertical-pod-autoscaler/docs/api.md | 1 + vertical-pod-autoscaler/docs/flags.md | 1 - .../pkg/apis/autoscaling.k8s.io/v1/types.go | 9 ++ .../pkg/recommender/logic/recommender.go | 9 +- .../pkg/recommender/main.go | 1 + .../pkg/recommender/model/types.go | 34 ---- .../pkg/recommender/model/types_test.go | 97 ------------ .../routines/memory_per_cpu_post_processor.go | 103 ++++++++++++ .../memory_per_cpu_post_processor_test.go | 147 ++++++++++++++++++ 10 files changed, 277 insertions(+), 137 deletions(-) create mode 100644 vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor.go create mode 100644 vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor_test.go diff --git a/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml b/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml index 70adb552bb..05f974e2bd 100644 --- a/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml +++ b/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml @@ -354,6 +354,18 @@ spec: Specifies the maximum amount of resources that will be recommended for the container. The default is no maximum. type: object + memoryPerCPU: + anyOf: + - type: integer + - type: string + description: |- + Enforce a fixed memory-per-CPU ratio for this container’s recommendations. + If set, the recommender will adjust memory or CPU so that: + memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core). + Applied to Target, LowerBound, UpperBound, and UncappedTarget. + Example: "4Gi" means 1 CPU -> 4 GiB. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true minAllowed: additionalProperties: anyOf: diff --git a/vertical-pod-autoscaler/docs/api.md b/vertical-pod-autoscaler/docs/api.md index f7e03b0611..b1b4846df3 100644 --- a/vertical-pod-autoscaler/docs/api.md +++ b/vertical-pod-autoscaler/docs/api.md @@ -48,6 +48,7 @@ _Appears in:_ | `maxAllowed` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcelist-v1-core)_ | Specifies the maximum amount of resources that will be recommended
for the container. The default is no maximum. | | | | `controlledResources` _[ResourceName](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcename-v1-core)_ | Specifies the type of recommendations that will be computed
(and possibly applied) by VPA.
If not specified, the default of [ResourceCPU, ResourceMemory] will be used. | | | | `controlledValues` _[ContainerControlledValues](#containercontrolledvalues)_ | Specifies which resource values should be controlled.
The default is "RequestsAndLimits". | | Enum: [RequestsAndLimits RequestsOnly]
| +| `memoryPerCPU` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#quantity-resource-api)_ | Enforce a fixed memory-per-CPU ratio for this container’s recommendations.
If set, the recommender will adjust memory or CPU so that:
memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core).
Applied to Target, LowerBound, UpperBound, and UncappedTarget.
Example: "4Gi" means 1 CPU -> 4 GiB. | | | #### ContainerScalingMode diff --git a/vertical-pod-autoscaler/docs/flags.md b/vertical-pod-autoscaler/docs/flags.md index dfa5ee92b1..feb3dc32e3 100644 --- a/vertical-pod-autoscaler/docs/flags.md +++ b/vertical-pod-autoscaler/docs/flags.md @@ -66,7 +66,6 @@ This document is auto-generated from the flag definitions in the VPA recommender | `container-recommendation-max-allowed-memory` | | | quantity Maximum amount of memory that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed. | | `cpu-histogram-decay-half-life` | | 24h0m0s | duration The amount of time it takes a historical CPU usage sample to lose half of its weight. | | `cpu-integer-post-processor-enabled` | | | Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental) | -| `enforce-cpu-memory-ratio` | float | | If > 0, enforce a fixed memory-per-CPU ratio expressed as bytes per millicores across all recommendations. | | `external-metrics-cpu-metric` | string | | ALPHA. Metric to use with external metrics provider for CPU usage. | | `external-metrics-memory-metric` | string | | ALPHA. Metric to use with external metrics provider for memory usage. | | `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) | diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go index 6ae164ce4c..fe70784745 100644 --- a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go @@ -19,6 +19,7 @@ package v1 import ( autoscaling "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -221,6 +222,14 @@ type ContainerResourcePolicy struct { // The default is "RequestsAndLimits". // +optional ControlledValues *ContainerControlledValues `json:"controlledValues,omitempty" protobuf:"bytes,6,rep,name=controlledValues"` + + // Enforce a fixed memory-per-CPU ratio for this container’s recommendations. + // If set, the recommender will adjust memory or CPU so that: + // memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core). + // Applied to Target, LowerBound, UpperBound, and UncappedTarget. + // Example: "4Gi" means 1 CPU -> 4 GiB. + // +optional + MemoryPerCPU *resource.Quantity `json:"memoryPerCPU,omitempty"` } const ( diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go index e8f3983b97..b7b6f8a75b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go @@ -40,7 +40,6 @@ var ( humanizeMemory = flag.Bool("humanize-memory", false, "DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead.") roundCPUMillicores = flag.Int("round-cpu-millicores", 1, `CPU recommendation rounding factor in millicores. The CPU value will always be rounded up to the nearest multiple of this factor.`) roundMemoryBytes = flag.Int("round-memory-bytes", 1, `Memory recommendation rounding factor in bytes. The Memory value will always be rounded up to the nearest multiple of this factor.`) - enforceCPUMemoryRatio = flag.Float64("enforce-cpu-memory-ratio", 0, `If > 0, enforce a fixed memory-per-CPU ratio expressed as bytes per millicores across all recommendations.`) ) // PodResourceRecommender computes resource recommendation for a Vpa object. @@ -195,10 +194,10 @@ func MapToListOfRecommendedContainerResources(resources RecommendedPodResources) for _, name := range containerNames { containerResources = append(containerResources, vpa_types.RecommendedContainerResources{ ContainerName: name, - Target: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), - LowerBound: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].LowerBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), - UpperBound: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].UpperBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), - UncappedTarget: model.EnforceCPUMemoryRatio(model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), enforceCPUMemoryRatio), + Target: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), + LowerBound: model.ResourcesAsResourceList(resources[name].LowerBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), + UpperBound: model.ResourcesAsResourceList(resources[name].UpperBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), + UncappedTarget: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes), }) } recommendation := &vpa_types.RecommendedPodResources{ diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index 4f0f2f997e..b944aa14b1 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -268,6 +268,7 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{}) } + postProcessors = append(postProcessors, &routines.MemoryPerCPUPostProcessor{}) globalMaxAllowed := initGlobalMaxAllowed() // CappingPostProcessor, should always come in the last position for post-processing postProcessors = append(postProcessors, routines.NewCappingRecommendationProcessor(globalMaxAllowed)) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/types.go b/vertical-pod-autoscaler/pkg/recommender/model/types.go index 9e8b823272..d4d817b8ef 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/types.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/types.go @@ -81,40 +81,6 @@ func ScaleResource(amount ResourceAmount, factor float64) ResourceAmount { return resourceAmountFromFloat(float64(amount) * factor) } -// EnforceCPUMemoryRatio adjusts the CPU or Memory to maintain a fixed ratio in bytes per millicore. -// If the actual memory per millicore is too low, memory is increased. -// If it is too high, CPU is increased. -func EnforceCPUMemoryRatio(resources apiv1.ResourceList, ratioBytesPerMillicore *float64) apiv1.ResourceList { - if ratioBytesPerMillicore == nil || *ratioBytesPerMillicore <= 0 { - // No ratio specified or invalid ratio, nothing to do - return resources - } - - cpuQty, hasCPU := resources[apiv1.ResourceCPU] - memQty, hasMem := resources[apiv1.ResourceMemory] - - if !hasCPU || !hasMem || cpuQty.IsZero() || memQty.IsZero() { - return resources - } - - cpuMilli := float64(cpuQty.MilliValue()) - memBytes := float64(memQty.Value()) - - currentRatio := memBytes / cpuMilli - - if currentRatio < *ratioBytesPerMillicore { - // Not enough RAM for the given CPU → increase memory - desiredMem := cpuMilli * *ratioBytesPerMillicore - resources[apiv1.ResourceMemory] = *resource.NewQuantity(int64(desiredMem), resource.BinarySI) - } else if currentRatio > *ratioBytesPerMillicore { - // Too much RAM for the given CPU → increase CPU - desiredCPU := memBytes / *ratioBytesPerMillicore - resources[apiv1.ResourceCPU] = *resource.NewMilliQuantity(int64(desiredCPU), resource.DecimalSI) - } - - return resources -} - // ResourcesAsResourceList converts internal Resources representation to ResourcesList. func ResourcesAsResourceList(resources Resources, humanizeMemory bool, roundCPUMillicores, roundMemoryBytes int) apiv1.ResourceList { result := make(apiv1.ResourceList) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/types_test.go b/vertical-pod-autoscaler/pkg/recommender/model/types_test.go index 85d548d66b..ab3501611d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/types_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/types_test.go @@ -758,100 +758,3 @@ func TestResourceAmountFromFloat(t *testing.T) { }) } } - -type EnforceCPUMemoryRatioTestCase struct { - name string - input apiv1.ResourceList - ratio *float64 - expected apiv1.ResourceList -} - -func TestEnforceCPUMemoryRatio2(t *testing.T) { - // 1 CPU -> 4 GiB => bytes per millicore - ratio4GiBPerCore := float64(4*1024*1024*1024) / 1000.0 // 4_294_967.296 - - tc := []EnforceCPUMemoryRatioTestCase{ - { - name: "no ratio provided", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), - }, - ratio: nil, - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), - }, - }, - { - name: "valid ratio already respected", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), // 2 cores - apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi - }, - ratio: float64Ptr(ratio4GiBPerCore), - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), - }, - }, - { - name: "too much RAM, should increase CPU", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), // 1 core - apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi - }, - ratio: float64Ptr(ratio4GiBPerCore), - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), // 8Gi / 4 = 2 cores - apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), - }, - }, - { - name: "not enough RAM, should increase RAM", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(4000, resource.DecimalSI), // 4 cores - apiv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8Gi - }, - ratio: float64Ptr(ratio4GiBPerCore), - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(4000, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 4 cores * 4 = 16Gi - }, - }, - { - name: "missing memory, no-op", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), - }, - ratio: float64Ptr(ratio4GiBPerCore), - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), - }, - }, - { - name: "zero values, no-op", - input: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), - }, - ratio: float64Ptr(ratio4GiBPerCore), - expected: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), - }, - }, - } - - for _, tc := range tc { - t.Run(tc.name, func(t *testing.T) { - result := EnforceCPUMemoryRatio(tc.input.DeepCopy(), tc.ratio) - assert.Equal(t, tc.expected[apiv1.ResourceCPU], result[apiv1.ResourceCPU]) - assert.Equal(t, tc.expected[apiv1.ResourceMemory], result[apiv1.ResourceMemory]) - }) - } -} - -func float64Ptr(v float64) *float64 { - return &v -} diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor.go b/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor.go new file mode 100644 index 0000000000..7bfccc56b6 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor.go @@ -0,0 +1,103 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routines + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + vpa_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" +) + +// MemoryPerCPUPostProcessor enforces a fixed memory-per-CPU ratio for each container's recommendation. +// The ratio is defined in the container's policy as MemoryPerCPU (bytes per 1 CPU core). +// Applied to Target, LowerBound, UpperBound, and UncappedTarget. +type MemoryPerCPUPostProcessor struct{} + +var _ RecommendationPostProcessor = &MemoryPerCPUPostProcessor{} + +// Process applies the memory-per-CPU enforcement to the recommendation if specified in the container policy. +func (p *MemoryPerCPUPostProcessor) Process( + vpa *vpa_types.VerticalPodAutoscaler, + recommendation *vpa_types.RecommendedPodResources, +) *vpa_types.RecommendedPodResources { + if vpa == nil || vpa.Spec.ResourcePolicy == nil || recommendation == nil { + return recommendation + } + + amendedRecommendation := recommendation.DeepCopy() + + for _, r := range amendedRecommendation.ContainerRecommendations { + pol := vpa_utils.GetContainerResourcePolicy(r.ContainerName, vpa.Spec.ResourcePolicy) + if pol != nil && pol.MemoryPerCPU != nil { + memPerCPUBytes := pol.MemoryPerCPU.Value() + r.Target = enforceMemoryPerCPU(r.Target, memPerCPUBytes) + r.LowerBound = enforceMemoryPerCPU(r.LowerBound, memPerCPUBytes) + r.UpperBound = enforceMemoryPerCPU(r.UpperBound, memPerCPUBytes) + r.UncappedTarget = enforceMemoryPerCPU(r.UncappedTarget, memPerCPUBytes) + } + } + + return amendedRecommendation +} + +// enforceMemoryPerCPU adjusts CPU or Memory to satisfy: +// +// memory_bytes = cpu_cores * memPerCPUBytes +// +// If memory is too low for the given CPU, increase memory. +// If memory is too high for the given CPU, increase CPU. +// enforceMemoryPerCPU adjusts CPU or Memory to satisfy: +// +// memory_bytes = cpu_cores * memPerCPUBytes +// +// If memory is too low for the given CPU, increase memory. +// If memory is too high for the given CPU, increase CPU. +func enforceMemoryPerCPU(resources apiv1.ResourceList, bytesPerCore int64) apiv1.ResourceList { + if bytesPerCore <= 0 { + return resources + } + + cpuQty, hasCPU := resources[apiv1.ResourceCPU] + memQty, hasMem := resources[apiv1.ResourceMemory] + if !hasCPU || !hasMem || cpuQty.IsZero() || memQty.IsZero() { + return resources + } + + // cpuCores = milliCPU / 1000 + cpuMilli := cpuQty.MilliValue() + memBytes := memQty.Value() + + // Desired memory in bytes = CPU cores * bytes per core + desiredMem := divCeil(cpuMilli*bytesPerCore, 1000) + + if memBytes < desiredMem { + // Not enough RAM → increase memory + resources[apiv1.ResourceMemory] = *resource.NewQuantity(desiredMem, resource.BinarySI) + } else if memBytes > desiredMem { + // Too much RAM → increase CPU + desiredMilli := divCeil(memBytes*1000, bytesPerCore) + resources[apiv1.ResourceCPU] = *resource.NewMilliQuantity(desiredMilli, resource.DecimalSI) + } + + return resources +} + +func divCeil(a, b int64) int64 { + return (a + b - 1) / b +} diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor_test.go new file mode 100644 index 0000000000..f768423b62 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/recommender/routines/memory_per_cpu_post_processor_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routines + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" +) + +func TestMemoryPerCPUPostProcessor_Process(t *testing.T) { + const Gi = int64(1024 * 1024 * 1024) + + tests := []struct { + name string + vpa *vpa_types.VerticalPodAutoscaler + recommendation *vpa_types.RecommendedPodResources + want *vpa_types.RecommendedPodResources + }{ + { + name: "No policy defined - no change", + vpa: &vpa_types.VerticalPodAutoscaler{}, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("1", "4Gi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("1", "4Gi").GetContainerResources(), + }, + }, + }, + { + name: "Policy matches - too much RAM -> increase CPU", + vpa: &vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "c1", + MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI), // 1 core -> 4Gi + }, + }, + }, + }, + }, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("1", "8Gi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("2", "8Gi").GetContainerResources(), + }, + }, + }, + { + name: "Policy matches - not enough RAM -> increase Memory", + vpa: &vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "c1", + MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI), + }, + }, + }, + }, + }, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("4", "8Gi").GetContainerResources(), + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + test.Recommendation().WithContainer("c1").WithTarget("4", "16Gi").GetContainerResources(), + }, + }, + }, + { + name: "Missing CPU or Memory - no change", + vpa: &vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "c1", + MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI), + }, + }, + }, + }, + }, + recommendation: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "c1", + Target: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + }, + }, + }, + }, + want: &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "c1", + Target: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := MemoryPerCPUPostProcessor{} + got := c.Process(tt.vpa, tt.recommendation) + assert.True(t, equalRecommendedPodResources(tt.want, got), "Process(%v, %v)", tt.vpa, tt.recommendation) + }) + } +} From 691cb27872b0e3451452d339ec600560e5bf0d6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Spriet?= Date: Tue, 19 Aug 2025 15:43:59 +0200 Subject: [PATCH 3/3] feat(vpa): add MemoryPerCPURatio alpha feature gate --- vertical-pod-autoscaler/docs/features.md | 50 +++++++++++++++++++ vertical-pod-autoscaler/docs/flags.md | 6 +-- .../pkg/features/features.go | 6 +++ .../pkg/features/versioned_features.go | 3 ++ .../pkg/recommender/main.go | 5 +- 5 files changed, 66 insertions(+), 4 deletions(-) diff --git a/vertical-pod-autoscaler/docs/features.md b/vertical-pod-autoscaler/docs/features.md index 9b3c292d1b..772c4a7e67 100644 --- a/vertical-pod-autoscaler/docs/features.md +++ b/vertical-pod-autoscaler/docs/features.md @@ -7,6 +7,7 @@ - [CPU Recommendation Rounding](#cpu-recommendation-rounding) - [Memory Recommendation Rounding](#memory-recommendation-rounding) - [In-Place Updates](#in-place-updates-inplaceorrecreate) +- [MemoryPerCPU](#memorypercpu-memorypercpuratio) ## Limits control @@ -153,3 +154,52 @@ VPA provides metrics to track in-place update operations: * `vpa_vpas_with_in_place_updatable_pods_total`: Number of VPAs with pods eligible for in-place updates * `vpa_vpas_with_in_place_updated_pods_total`: Number of VPAs with successfully in-place updated pods * `vpa_updater_failed_in_place_update_attempts_total`: Number of failed attempts to update pods in-place. + +## MemoryPerCPU (`MemoryPerCPURatio`) + +> [!WARNING] +> FEATURE STATE: VPA v1.5.0 [alpha] + +VPA can enforce a fixed memory-per-CPU ratio in its recommendations. +When enabled, the recommender adjusts CPU or memory so that: +``` +memory_bytes = cpu_cores * memoryPerCPU +``` + +This applies to Target, LowerBound, UpperBound, and UncappedTarget recommendations. + +### Usage + +Enable the feature on the recommender with: +```bash +--feature-gates=MemoryPerCPURatio=true +``` + +Then configure the ratio in your VPA object using the memoryPerCPU field, for example: +```yaml +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: my-app +spec: + resourcePolicy: + containerPolicies: + - containerName: app + minAllowed: + cpu: 1 + memory: 4Gi + maxAllowed: + cpu: 4 + memory: 16Gi + controlledResources: ["cpu", "memory"] + controlledValues: RequestsAndLimits + memoryPerCPU: "4Gi" +``` + +### Behavior + +* If both CPU and Memory are controlled, VPA enforces the ratio. + +### Limitations + +* If `minAllowed` or `maxAllowed` constraints conflict with the ratio, the constraints take precedence and the ratio may not be respected. diff --git a/vertical-pod-autoscaler/docs/flags.md b/vertical-pod-autoscaler/docs/flags.md index feb3dc32e3..590be65f89 100644 --- a/vertical-pod-autoscaler/docs/flags.md +++ b/vertical-pod-autoscaler/docs/flags.md @@ -14,7 +14,7 @@ This document is auto-generated from the flag definitions in the VPA admission-c | `address` | string | ":8944" | The address to expose Prometheus metrics. | | `alsologtostderr` | | | log to standard error as well as files (no effect when -logtostderr=true) | | `client-ca-file` | string | "/etc/tls-certs/caCert.pem" | Path to CA PEM file. | -| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) | +| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
MemoryPerCPURatio=true\|false (ALPHA - default=false) | | `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. | | `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver | | `kube-api-qps` | float | 50 | QPS limit when making requests to Kubernetes apiserver | @@ -68,7 +68,7 @@ This document is auto-generated from the flag definitions in the VPA recommender | `cpu-integer-post-processor-enabled` | | | Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental) | | `external-metrics-cpu-metric` | string | | ALPHA. Metric to use with external metrics provider for CPU usage. | | `external-metrics-memory-metric` | string | | ALPHA. Metric to use with external metrics provider for memory usage. | -| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) | +| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
MemoryPerCPURatio=true\|false (ALPHA - default=false) | | `history-length` | string | "8d" | How much time back prometheus have to be queried to get historical metrics | | `history-resolution` | string | "1h" | Resolution at which Prometheus is queried for historical metrics | | `humanize-memory` | | | DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead. | @@ -144,7 +144,7 @@ This document is auto-generated from the flag definitions in the VPA updater cod | `eviction-rate-burst` | int | 1 | Burst of pods that can be evicted. | | `eviction-rate-limit` | float | | Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable
the rate limiter. (default -1) | | `eviction-tolerance` | float | 0.5 | Fraction of replica count that can be evicted for update, if more than one pod can be evicted. | -| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) | +| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
MemoryPerCPURatio=true\|false (ALPHA - default=false) | | `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. | | `in-recommendation-bounds-eviction-lifetime-threshold` | | 12h0m0s | duration Pods that live for at least that long can be evicted even if their request is within the [MinRecommended...MaxRecommended] range | | `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver | diff --git a/vertical-pod-autoscaler/pkg/features/features.go b/vertical-pod-autoscaler/pkg/features/features.go index 2c34ac4001..af552069a9 100644 --- a/vertical-pod-autoscaler/pkg/features/features.go +++ b/vertical-pod-autoscaler/pkg/features/features.go @@ -40,6 +40,12 @@ const ( // In each feature gate description, you must specify "components". // The feature must be enabled by the --feature-gates argument on each listed component. + // alpha: v1.5.0 + // components: recommender + + // MemoryPerCPURatio enables enforcing a fixed memory-per-CPU ratio in recommendations. + MemoryPerCPURatio featuregate.Feature = "MemoryPerCPURatio" + // alpha: v1.4.0 // beta: v1.5.0 diff --git a/vertical-pod-autoscaler/pkg/features/versioned_features.go b/vertical-pod-autoscaler/pkg/features/versioned_features.go index e623061fff..2e25312e76 100644 --- a/vertical-pod-autoscaler/pkg/features/versioned_features.go +++ b/vertical-pod-autoscaler/pkg/features/versioned_features.go @@ -27,6 +27,9 @@ import ( // Entries are alphabetized. var defaultVersionedFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + MemoryPerCPURatio: { + {Version: version.MustParse("1.5"), Default: false, PreRelease: featuregate.Alpha}, + }, InPlaceOrRecreate: { {Version: version.MustParse("1.4"), Default: false, PreRelease: featuregate.Alpha}, {Version: version.MustParse("1.5"), Default: true, PreRelease: featuregate.Beta}, diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index b944aa14b1..5b830f7c30 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -268,7 +268,10 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{}) } - postProcessors = append(postProcessors, &routines.MemoryPerCPUPostProcessor{}) + if features.Enabled(features.MemoryPerCPURatio) { + postProcessors = append(postProcessors, &routines.MemoryPerCPUPostProcessor{}) + } + globalMaxAllowed := initGlobalMaxAllowed() // CappingPostProcessor, should always come in the last position for post-processing postProcessors = append(postProcessors, routines.NewCappingRecommendationProcessor(globalMaxAllowed))