Pass the whole VPA into cappingRecommendationProcessor.Apply()
The current log message for when no container is found is very misleading and can cause confusion. This passes the entire VPA object into that function, in order for it to create a log file with the relevant VPA name in it. It kinda feels like surgery with a scalpel, any alternative approaches would be appreciated.
This commit is contained in:
parent
3080b95129
commit
66eabb3e30
|
|
@ -116,7 +116,7 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa
|
|||
|
||||
if vpa.Status.Recommendation != nil {
|
||||
var err error
|
||||
recommendedPodResources, annotations, err = p.recommendationProcessor.Apply(vpa.Status.Recommendation, vpa.Spec.ResourcePolicy, vpa.Status.Conditions, pod)
|
||||
recommendedPodResources, annotations, err = p.recommendationProcessor.Apply(vpa, pod)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Cannot process recommendation for pod", "pod", klog.KObj(pod))
|
||||
return nil, annotations, err
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func NewUpdatePriorityCalculator(vpa *vpa_types.VerticalPodAutoscaler,
|
|||
|
||||
// AddPod adds pod to the UpdatePriorityCalculator.
|
||||
func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, now time.Time) {
|
||||
processedRecommendation, _, err := calc.recommendationProcessor.Apply(calc.vpa.Status.Recommendation, calc.vpa.Spec.ResourcePolicy, calc.vpa.Status.Conditions, pod)
|
||||
processedRecommendation, _, err := calc.recommendationProcessor.Apply(calc.vpa, pod)
|
||||
if err != nil {
|
||||
klog.V(2).ErrorS(err, "Cannot process recommendation for pod", "pod", klog.KObj(pod))
|
||||
return
|
||||
|
|
|
|||
|
|
@ -219,9 +219,7 @@ type RecommendationProcessorMock struct {
|
|||
}
|
||||
|
||||
// Apply is a mock implementation of RecommendationProcessor.Apply
|
||||
func (m *RecommendationProcessorMock) Apply(podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
func (m *RecommendationProcessorMock) Apply(vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, map[string][]string, error) {
|
||||
args := m.Called()
|
||||
var returnArg *vpa_types.RecommendedPodResources
|
||||
|
|
@ -239,11 +237,9 @@ func (m *RecommendationProcessorMock) Apply(podRecommendation *vpa_types.Recomme
|
|||
type FakeRecommendationProcessor struct{}
|
||||
|
||||
// Apply is a dummy implementation of RecommendationProcessor.Apply which returns provided podRecommendation
|
||||
func (f *FakeRecommendationProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
func (f *FakeRecommendationProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, map[string][]string, error) {
|
||||
return podRecommendation, nil, nil
|
||||
return vpa.Status.Recommendation, nil, nil
|
||||
}
|
||||
|
||||
// fakeEventRecorder is a dummy implementation of record.EventRecorder.
|
||||
|
|
|
|||
|
|
@ -52,12 +52,13 @@ type cappingRecommendationProcessor struct {
|
|||
|
||||
// Apply returns a recommendation for the given pod, adjusted to obey policy and limits.
|
||||
func (c *cappingRecommendationProcessor) Apply(
|
||||
podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
|
||||
// TODO: Annotate if request enforced by maintaining proportion with limit and allowed limit range is in conflict with policy.
|
||||
|
||||
policy := vpa.Spec.ResourcePolicy.DeepCopy()
|
||||
podRecommendation := vpa.Status.Recommendation.DeepCopy()
|
||||
|
||||
if podRecommendation == nil && policy == nil {
|
||||
// If there is no recommendation and no policies have been defined then no recommendation can be computed.
|
||||
return nil, nil, nil
|
||||
|
|
@ -76,7 +77,7 @@ func (c *cappingRecommendationProcessor) Apply(
|
|||
container := getContainer(containerRecommendation.ContainerName, pod)
|
||||
|
||||
if container == nil {
|
||||
klog.V(2).InfoS("No matching Container found for recommendation", "containerName", containerRecommendation.ContainerName)
|
||||
klog.V(2).InfoS("No matching Container found for recommendation", "containerName", containerRecommendation.ContainerName, "vpa", klog.KObj(vpa))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,27 +28,26 @@ import (
|
|||
|
||||
func TestRecommendationNotAvailable(t *testing.T) {
|
||||
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
|
||||
podRecommendation := vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
ContainerName: "ctr-name-other",
|
||||
Target: apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: *resource.NewScaledQuantity(100, 1),
|
||||
apiv1.ResourceMemory: *resource.NewScaledQuantity(50000, 1),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
policy := vpa_types.PodResourcePolicy{}
|
||||
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
|
||||
containerName := "ctr-name-other"
|
||||
vpa := test.VerticalPodAutoscaler().
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
WithContainer(containerName).
|
||||
WithTarget("100m", "50000Mi").
|
||||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
|
||||
assert.Nil(t, err)
|
||||
assert.Empty(t, annotations)
|
||||
assert.Empty(t, res.ContainerRecommendations)
|
||||
}
|
||||
|
||||
func TestRecommendationToLimitCapping(t *testing.T) {
|
||||
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
|
||||
containerName := "ctr-name"
|
||||
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName(containerName).Get()).Get()
|
||||
pod.Spec.Containers[0].Resources.Limits =
|
||||
apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1),
|
||||
|
|
@ -69,6 +68,10 @@ func TestRecommendationToLimitCapping(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
|
||||
vpa.Status.Recommendation = &podRecommendation
|
||||
|
||||
requestsAndLimits := vpa_types.ContainerControlledValuesRequestsAndLimits
|
||||
requestsOnly := vpa_types.ContainerControlledValuesRequestsOnly
|
||||
for _, tc := range []struct {
|
||||
|
|
@ -125,7 +128,8 @@ func TestRecommendationToLimitCapping(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &tc.policy, nil, pod)
|
||||
vpa.Spec.ResourcePolicy = &tc.policy
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, tc.expectedTarget, res.ContainerRecommendations[0].Target)
|
||||
|
||||
|
|
@ -179,7 +183,14 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
|
||||
containerName := "ctr-name"
|
||||
vpa := test.VerticalPodAutoscaler().
|
||||
WithContainer(containerName).
|
||||
Get()
|
||||
vpa.Spec.ResourcePolicy = &policy
|
||||
vpa.Status.Recommendation = &podRecommendation
|
||||
|
||||
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1),
|
||||
|
|
@ -238,11 +249,16 @@ var applyTestCases = []struct {
|
|||
}
|
||||
|
||||
func TestApply(t *testing.T) {
|
||||
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
|
||||
containerName := "ctr-name"
|
||||
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName(containerName).Get()).Get()
|
||||
|
||||
for _, testCase := range applyTestCases {
|
||||
|
||||
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
|
||||
vpa.Spec.ResourcePolicy = testCase.Policy
|
||||
vpa.Status.Recommendation = testCase.PodRecommendation
|
||||
res, _, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(
|
||||
testCase.PodRecommendation, testCase.Policy, nil, pod)
|
||||
vpa, pod)
|
||||
assert.Equal(t, testCase.ExpectedPodRecommendation, res)
|
||||
assert.Equal(t, testCase.ExpectedError, err)
|
||||
}
|
||||
|
|
@ -334,6 +350,12 @@ func TestApplyCapsToLimitRange(t *testing.T) {
|
|||
apiv1.ResourceMemory: resource.MustParse("500M"),
|
||||
},
|
||||
}
|
||||
|
||||
containerName := "container"
|
||||
vpa := test.VerticalPodAutoscaler().
|
||||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
recommendation := vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
|
|
@ -345,6 +367,8 @@ func TestApplyCapsToLimitRange(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
vpa.Status.Recommendation = &recommendation
|
||||
|
||||
pod := apiv1.Pod{
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
|
|
@ -378,7 +402,7 @@ func TestApplyCapsToLimitRange(t *testing.T) {
|
|||
|
||||
calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange}
|
||||
processor := NewCappingRecommendationProcessor(&calculator)
|
||||
processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod)
|
||||
processedRecommendation, annotations, err := processor.Apply(vpa, &pod)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, annotations, "container")
|
||||
assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"])
|
||||
|
|
@ -1144,7 +1168,13 @@ func TestApplyLimitRangeMinToRequest(t *testing.T) {
|
|||
podLimitRange: tc.podLimitRange,
|
||||
}
|
||||
processor := NewCappingRecommendationProcessor(&calculator)
|
||||
processedRecommendation, annotations, err := processor.Apply(&tc.resources, tc.policy, nil, &tc.pod)
|
||||
|
||||
containerName := "ctr-name"
|
||||
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
|
||||
vpa.Spec.ResourcePolicy = tc.policy
|
||||
vpa.Status.Recommendation = &tc.resources
|
||||
|
||||
processedRecommendation, annotations, err := processor.Apply(vpa, &tc.pod)
|
||||
assert.NoError(t, err)
|
||||
for containerName, expectedAnnotations := range tc.expectAnnotations {
|
||||
if assert.Contains(t, annotations, containerName) {
|
||||
|
|
@ -1270,7 +1300,12 @@ func TestCapPodMemoryWithUnderByteSplit(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
calculator := fakeLimitRangeCalculator{podLimitRange: tc.limitRange}
|
||||
processor := NewCappingRecommendationProcessor(&calculator)
|
||||
processedRecommendation, _, err := processor.Apply(&recommendation, nil, nil, &pod)
|
||||
|
||||
containerName := "ctr-name"
|
||||
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
|
||||
vpa.Status.Recommendation = &recommendation
|
||||
|
||||
processedRecommendation, _, err := processor.Apply(vpa, &pod)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedRecommendation, *processedRecommendation)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package api
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
)
|
||||
|
||||
|
|
@ -29,8 +29,6 @@ type RecommendationProcessor interface {
|
|||
// Apply processes and updates recommendation for given pod, based on container limits,
|
||||
// VPA policy and possibly other internal RecommendationProcessor context.
|
||||
// Must return a non-nil pointer to RecommendedPodResources or error.
|
||||
Apply(podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
Apply(Vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package api
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
)
|
||||
|
||||
|
|
@ -31,11 +31,11 @@ type sequentialRecommendationProcessor struct {
|
|||
}
|
||||
|
||||
// Apply chains calls to underlying RecommendationProcessors in order provided on object construction
|
||||
func (p *sequentialRecommendationProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
func (p *sequentialRecommendationProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
|
||||
recommendation := podRecommendation
|
||||
|
||||
recommendation := vpa.Status.Recommendation.DeepCopy()
|
||||
|
||||
accumulatedContainerToAnnotationsMap := ContainerToAnnotationsMap{}
|
||||
|
||||
for _, processor := range p.processors {
|
||||
|
|
@ -43,7 +43,7 @@ func (p *sequentialRecommendationProcessor) Apply(podRecommendation *vpa_types.R
|
|||
err error
|
||||
containerToAnnotationsMap ContainerToAnnotationsMap
|
||||
)
|
||||
recommendation, containerToAnnotationsMap, err = processor.Apply(recommendation, policy, conditions, pod)
|
||||
recommendation, containerToAnnotationsMap, err = processor.Apply(vpa, pod)
|
||||
|
||||
for container, newAnnotations := range containerToAnnotationsMap {
|
||||
annotations, found := accumulatedContainerToAnnotationsMap[container]
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package api
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -29,11 +29,9 @@ type fakeProcessor struct {
|
|||
message string
|
||||
}
|
||||
|
||||
func (p *fakeProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
|
||||
policy *vpa_types.PodResourcePolicy,
|
||||
conditions []vpa_types.VerticalPodAutoscalerCondition,
|
||||
func (p *fakeProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler,
|
||||
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
|
||||
result := podRecommendation.DeepCopy()
|
||||
result := vpa.Status.Recommendation.DeepCopy()
|
||||
result.ContainerRecommendations[0].ContainerName += p.message
|
||||
containerToAnnotationsMap := ContainerToAnnotationsMap{"trace": []string{p.message}}
|
||||
return result, containerToAnnotationsMap, nil
|
||||
|
|
@ -43,13 +41,18 @@ func TestSequentialProcessor(t *testing.T) {
|
|||
name1 := "processor1"
|
||||
name2 := "processor2"
|
||||
tested := NewSequentialProcessor([]RecommendationProcessor{&fakeProcessor{name1}, &fakeProcessor{name2}})
|
||||
rec1 := &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
ContainerName: "",
|
||||
vpa1 := &vpa_types.VerticalPodAutoscaler{
|
||||
Status: vpa_types.VerticalPodAutoscalerStatus{
|
||||
Recommendation: &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
|
||||
{
|
||||
ContainerName: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
result, annotations, _ := tested.Apply(rec1, nil, nil, nil)
|
||||
},
|
||||
}
|
||||
result, annotations, _ := tested.Apply(vpa1, nil)
|
||||
assert.Equal(t, name1+name2, result.ContainerRecommendations[0].ContainerName)
|
||||
assert.Contains(t, annotations, "trace")
|
||||
assert.Contains(t, annotations["trace"], name1)
|
||||
|
|
|
|||
Loading…
Reference in New Issue