Merge 691cb27872 into a9292351c3
This commit is contained in:
commit
ba6de2c44c
|
|
@ -354,6 +354,18 @@ spec:
|
|||
Specifies the maximum amount of resources that will be recommended
|
||||
for the container. The default is no maximum.
|
||||
type: object
|
||||
memoryPerCPU:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: |-
|
||||
Enforce a fixed memory-per-CPU ratio for this container’s recommendations.
|
||||
If set, the recommender will adjust memory or CPU so that:
|
||||
memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core).
|
||||
Applied to Target, LowerBound, UpperBound, and UncappedTarget.
|
||||
Example: "4Gi" means 1 CPU -> 4 GiB.
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
minAllowed:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ _Appears in:_
|
|||
| `maxAllowed` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcelist-v1-core)_ | Specifies the maximum amount of resources that will be recommended<br />for the container. The default is no maximum. | | |
|
||||
| `controlledResources` _[ResourceName](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcename-v1-core)_ | Specifies the type of recommendations that will be computed<br />(and possibly applied) by VPA.<br />If not specified, the default of [ResourceCPU, ResourceMemory] will be used. | | |
|
||||
| `controlledValues` _[ContainerControlledValues](#containercontrolledvalues)_ | Specifies which resource values should be controlled.<br />The default is "RequestsAndLimits". | | Enum: [RequestsAndLimits RequestsOnly] <br /> |
|
||||
| `memoryPerCPU` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#quantity-resource-api)_ | Enforce a fixed memory-per-CPU ratio for this container’s recommendations.<br />If set, the recommender will adjust memory or CPU so that:<br /> memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core).<br />Applied to Target, LowerBound, UpperBound, and UncappedTarget.<br />Example: "4Gi" means 1 CPU -> 4 GiB. | | |
|
||||
|
||||
|
||||
#### ContainerScalingMode
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
- [CPU Recommendation Rounding](#cpu-recommendation-rounding)
|
||||
- [Memory Recommendation Rounding](#memory-recommendation-rounding)
|
||||
- [In-Place Updates](#in-place-updates-inplaceorrecreate)
|
||||
- [MemoryPerCPU](#memorypercpu-memorypercpuratio)
|
||||
|
||||
## Limits control
|
||||
|
||||
|
|
@ -153,3 +154,52 @@ VPA provides metrics to track in-place update operations:
|
|||
* `vpa_vpas_with_in_place_updatable_pods_total`: Number of VPAs with pods eligible for in-place updates
|
||||
* `vpa_vpas_with_in_place_updated_pods_total`: Number of VPAs with successfully in-place updated pods
|
||||
* `vpa_updater_failed_in_place_update_attempts_total`: Number of failed attempts to update pods in-place.
|
||||
|
||||
## MemoryPerCPU (`MemoryPerCPURatio`)
|
||||
|
||||
> [!WARNING]
|
||||
> FEATURE STATE: VPA v1.5.0 [alpha]
|
||||
|
||||
VPA can enforce a fixed memory-per-CPU ratio in its recommendations.
|
||||
When enabled, the recommender adjusts CPU or memory so that:
|
||||
```
|
||||
memory_bytes = cpu_cores * memoryPerCPU
|
||||
```
|
||||
|
||||
This applies to Target, LowerBound, UpperBound, and UncappedTarget recommendations.
|
||||
|
||||
### Usage
|
||||
|
||||
Enable the feature on the recommender with:
|
||||
```bash
|
||||
--feature-gates=MemoryPerCPURatio=true
|
||||
```
|
||||
|
||||
Then configure the ratio in your VPA object using the memoryPerCPU field, for example:
|
||||
```yaml
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: my-app
|
||||
spec:
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: app
|
||||
minAllowed:
|
||||
cpu: 1
|
||||
memory: 4Gi
|
||||
maxAllowed:
|
||||
cpu: 4
|
||||
memory: 16Gi
|
||||
controlledResources: ["cpu", "memory"]
|
||||
controlledValues: RequestsAndLimits
|
||||
memoryPerCPU: "4Gi"
|
||||
```
|
||||
|
||||
### Behavior
|
||||
|
||||
* If both CPU and Memory are controlled, VPA enforces the ratio.
|
||||
|
||||
### Limitations
|
||||
|
||||
* If `minAllowed` or `maxAllowed` constraints conflict with the ratio, the constraints take precedence and the ratio may not be respected.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ This document is auto-generated from the flag definitions in the VPA admission-c
|
|||
| `address` | string | ":8944" | The address to expose Prometheus metrics. |
|
||||
| `alsologtostderr` | | | log to standard error as well as files (no effect when -logtostderr=true) |
|
||||
| `client-ca-file` | string | "/etc/tls-certs/caCert.pem" | Path to CA PEM file. |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true) |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true)<br>MemoryPerCPURatio=true\|false (ALPHA - default=false) |
|
||||
| `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. |
|
||||
| `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver |
|
||||
| `kube-api-qps` | float | 50 | QPS limit when making requests to Kubernetes apiserver |
|
||||
|
|
@ -68,7 +68,7 @@ This document is auto-generated from the flag definitions in the VPA recommender
|
|||
| `cpu-integer-post-processor-enabled` | | | Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental) |
|
||||
| `external-metrics-cpu-metric` | string | | ALPHA. Metric to use with external metrics provider for CPU usage. |
|
||||
| `external-metrics-memory-metric` | string | | ALPHA. Metric to use with external metrics provider for memory usage. |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true) |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true)<br>MemoryPerCPURatio=true\|false (ALPHA - default=false) |
|
||||
| `history-length` | string | "8d" | How much time back prometheus have to be queried to get historical metrics |
|
||||
| `history-resolution` | string | "1h" | Resolution at which Prometheus is queried for historical metrics |
|
||||
| `humanize-memory` | | | DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead. |
|
||||
|
|
@ -144,7 +144,7 @@ This document is auto-generated from the flag definitions in the VPA updater cod
|
|||
| `eviction-rate-burst` | int | 1 | Burst of pods that can be evicted. |
|
||||
| `eviction-rate-limit` | float | | Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable<br>the rate limiter. (default -1) |
|
||||
| `eviction-tolerance` | float | 0.5 | Fraction of replica count that can be evicted for update, if more than one pod can be evicted. |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true) |
|
||||
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:<br>AllAlpha=true\|false (ALPHA - default=false)<br>AllBeta=true\|false (BETA - default=false)<br>InPlaceOrRecreate=true\|false (BETA - default=true)<br>MemoryPerCPURatio=true\|false (ALPHA - default=false) |
|
||||
| `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. |
|
||||
| `in-recommendation-bounds-eviction-lifetime-threshold` | | 12h0m0s | duration Pods that live for at least that long can be evicted even if their request is within the [MinRecommended...MaxRecommended] range |
|
||||
| `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver |
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package v1
|
|||
import (
|
||||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
@ -224,6 +225,14 @@ type ContainerResourcePolicy struct {
|
|||
// The default is "RequestsAndLimits".
|
||||
// +optional
|
||||
ControlledValues *ContainerControlledValues `json:"controlledValues,omitempty" protobuf:"bytes,6,rep,name=controlledValues"`
|
||||
|
||||
// Enforce a fixed memory-per-CPU ratio for this container’s recommendations.
|
||||
// If set, the recommender will adjust memory or CPU so that:
|
||||
// memory_bytes = cpu_cores * memoryPerCPU (bytes per 1 core).
|
||||
// Applied to Target, LowerBound, UpperBound, and UncappedTarget.
|
||||
// Example: "4Gi" means 1 CPU -> 4 GiB.
|
||||
// +optional
|
||||
MemoryPerCPU *resource.Quantity `json:"memoryPerCPU,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -40,6 +40,12 @@ const (
|
|||
// In each feature gate description, you must specify "components".
|
||||
// The feature must be enabled by the --feature-gates argument on each listed component.
|
||||
|
||||
// alpha: v1.5.0
|
||||
// components: recommender
|
||||
|
||||
// MemoryPerCPURatio enables enforcing a fixed memory-per-CPU ratio in recommendations.
|
||||
MemoryPerCPURatio featuregate.Feature = "MemoryPerCPURatio"
|
||||
|
||||
// alpha: v1.4.0
|
||||
// beta: v1.5.0
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@ import (
|
|||
|
||||
// Entries are alphabetized.
|
||||
var defaultVersionedFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
|
||||
MemoryPerCPURatio: {
|
||||
{Version: version.MustParse("1.5"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
InPlaceOrRecreate: {
|
||||
{Version: version.MustParse("1.4"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.5"), Default: true, PreRelease: featuregate.Beta},
|
||||
|
|
|
|||
|
|
@ -268,6 +268,10 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
|
|||
postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{})
|
||||
}
|
||||
|
||||
if features.Enabled(features.MemoryPerCPURatio) {
|
||||
postProcessors = append(postProcessors, &routines.MemoryPerCPUPostProcessor{})
|
||||
}
|
||||
|
||||
globalMaxAllowed := initGlobalMaxAllowed()
|
||||
// CappingPostProcessor, should always come in the last position for post-processing
|
||||
postProcessors = append(postProcessors, routines.NewCappingRecommendationProcessor(globalMaxAllowed))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routines
|
||||
|
||||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
vpa_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
|
||||
)
|
||||
|
||||
// MemoryPerCPUPostProcessor enforces a fixed memory-per-CPU ratio for each container's recommendation.
|
||||
// The ratio is defined in the container's policy as MemoryPerCPU (bytes per 1 CPU core).
|
||||
// Applied to Target, LowerBound, UpperBound, and UncappedTarget.
|
||||
type MemoryPerCPUPostProcessor struct{}
|
||||
|
||||
var _ RecommendationPostProcessor = &MemoryPerCPUPostProcessor{}
|
||||
|
||||
// Process applies the memory-per-CPU enforcement to the recommendation if specified in the container policy.
|
||||
func (p *MemoryPerCPUPostProcessor) Process(
|
||||
vpa *vpa_types.VerticalPodAutoscaler,
|
||||
recommendation *vpa_types.RecommendedPodResources,
|
||||
) *vpa_types.RecommendedPodResources {
|
||||
if vpa == nil || vpa.Spec.ResourcePolicy == nil || recommendation == nil {
|
||||
return recommendation
|
||||
}
|
||||
|
||||
amendedRecommendation := recommendation.DeepCopy()
|
||||
|
||||
for _, r := range amendedRecommendation.ContainerRecommendations {
|
||||
pol := vpa_utils.GetContainerResourcePolicy(r.ContainerName, vpa.Spec.ResourcePolicy)
|
||||
if pol != nil && pol.MemoryPerCPU != nil {
|
||||
memPerCPUBytes := pol.MemoryPerCPU.Value()
|
||||
r.Target = enforceMemoryPerCPU(r.Target, memPerCPUBytes)
|
||||
r.LowerBound = enforceMemoryPerCPU(r.LowerBound, memPerCPUBytes)
|
||||
r.UpperBound = enforceMemoryPerCPU(r.UpperBound, memPerCPUBytes)
|
||||
r.UncappedTarget = enforceMemoryPerCPU(r.UncappedTarget, memPerCPUBytes)
|
||||
}
|
||||
}
|
||||
|
||||
return amendedRecommendation
|
||||
}
|
||||
|
||||
// enforceMemoryPerCPU adjusts CPU or Memory to satisfy:
|
||||
//
|
||||
// memory_bytes = cpu_cores * memPerCPUBytes
|
||||
//
|
||||
// If memory is too low for the given CPU, increase memory.
|
||||
// If memory is too high for the given CPU, increase CPU.
|
||||
// enforceMemoryPerCPU adjusts CPU or Memory to satisfy:
|
||||
//
|
||||
// memory_bytes = cpu_cores * memPerCPUBytes
|
||||
//
|
||||
// If memory is too low for the given CPU, increase memory.
|
||||
// If memory is too high for the given CPU, increase CPU.
|
||||
func enforceMemoryPerCPU(resources apiv1.ResourceList, bytesPerCore int64) apiv1.ResourceList {
|
||||
if bytesPerCore <= 0 {
|
||||
return resources
|
||||
}
|
||||
|
||||
cpuQty, hasCPU := resources[apiv1.ResourceCPU]
|
||||
memQty, hasMem := resources[apiv1.ResourceMemory]
|
||||
if !hasCPU || !hasMem || cpuQty.IsZero() || memQty.IsZero() {
|
||||
return resources
|
||||
}
|
||||
|
||||
// cpuCores = milliCPU / 1000
|
||||
cpuMilli := cpuQty.MilliValue()
|
||||
memBytes := memQty.Value()
|
||||
|
||||
// Desired memory in bytes = CPU cores * bytes per core
|
||||
desiredMem := divCeil(cpuMilli*bytesPerCore, 1000)
|
||||
|
||||
if memBytes < desiredMem {
|
||||
// Not enough RAM → increase memory
|
||||
resources[apiv1.ResourceMemory] = *resource.NewQuantity(desiredMem, resource.BinarySI)
|
||||
} else if memBytes > desiredMem {
|
||||
// Too much RAM → increase CPU
|
||||
desiredMilli := divCeil(memBytes*1000, bytesPerCore)
|
||||
resources[apiv1.ResourceCPU] = *resource.NewMilliQuantity(desiredMilli, resource.DecimalSI)
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func divCeil(a, b int64) int64 {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routines
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
)
|
||||
|
||||
func TestMemoryPerCPUPostProcessor_Process(t *testing.T) {
|
||||
const Gi = int64(1024 * 1024 * 1024)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vpa *vpa_types.VerticalPodAutoscaler
|
||||
recommendation *vpa_types.RecommendedPodResources
|
||||
want *vpa_types.RecommendedPodResources
|
||||
}{
|
||||
{
|
||||
name: "No policy defined - no change",
|
||||
vpa: &vpa_types.VerticalPodAutoscaler{},
|
||||
recommendation: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("1", "4Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
want: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("1", "4Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Policy matches - too much RAM -> increase CPU",
|
||||
vpa: &vpa_types.VerticalPodAutoscaler{
|
||||
Spec: vpa_types.VerticalPodAutoscalerSpec{
|
||||
ResourcePolicy: &vpa_types.PodResourcePolicy{
|
||||
ContainerPolicies: []vpa_types.ContainerResourcePolicy{
|
||||
{
|
||||
ContainerName: "c1",
|
||||
MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI), // 1 core -> 4Gi
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
recommendation: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("1", "8Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
want: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("2", "8Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Policy matches - not enough RAM -> increase Memory",
|
||||
vpa: &vpa_types.VerticalPodAutoscaler{
|
||||
Spec: vpa_types.VerticalPodAutoscalerSpec{
|
||||
ResourcePolicy: &vpa_types.PodResourcePolicy{
|
||||
ContainerPolicies: []vpa_types.ContainerResourcePolicy{
|
||||
{
|
||||
ContainerName: "c1",
|
||||
MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
recommendation: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("4", "8Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
want: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
test.Recommendation().WithContainer("c1").WithTarget("4", "16Gi").GetContainerResources(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing CPU or Memory - no change",
|
||||
vpa: &vpa_types.VerticalPodAutoscaler{
|
||||
Spec: vpa_types.VerticalPodAutoscalerSpec{
|
||||
ResourcePolicy: &vpa_types.PodResourcePolicy{
|
||||
ContainerPolicies: []vpa_types.ContainerResourcePolicy{
|
||||
{
|
||||
ContainerName: "c1",
|
||||
MemoryPerCPU: resource.NewQuantity(4*Gi, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
recommendation: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
ContainerName: "c1",
|
||||
Target: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
ContainerName: "c1",
|
||||
Target: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := MemoryPerCPUPostProcessor{}
|
||||
got := c.Process(tt.vpa, tt.recommendation)
|
||||
assert.True(t, equalRecommendedPodResources(tt.want, got), "Process(%v, %v)", tt.vpa, tt.recommendation)
|
||||
})
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue