From ac1c7b5463e050b66b579caa1567c3d1c24e9b9d Mon Sep 17 00:00:00 2001 From: Piotr Betkier Date: Fri, 18 Apr 2025 13:59:01 +0200 Subject: [PATCH] use k8s.io/component-helpers/resource for pod request calculations --- .../cloudprovider/equinixmetal/price_model.go | 8 +++----- .../cloudprovider/gce/gce_price_model.go | 8 +++----- .../estimator/decreasing_pod_orderer.go | 20 +++++++------------ cluster-autoscaler/expander/waste/waste.go | 12 ++++------- .../simulator/utilization/info.go | 4 ++-- cluster-autoscaler/utils/gpu/gpu.go | 13 ++++-------- cluster-autoscaler/utils/labels/labels.go | 10 ++++------ cluster-autoscaler/utils/pod/pod.go | 14 +++++++++++++ 8 files changed, 41 insertions(+), 48 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/equinixmetal/price_model.go b/cluster-autoscaler/cloudprovider/equinixmetal/price_model.go index cbe843a873..aa6f65124c 100644 --- a/cluster-autoscaler/cloudprovider/equinixmetal/price_model.go +++ b/cluster-autoscaler/cloudprovider/equinixmetal/price_model.go @@ -21,6 +21,7 @@ import ( "time" apiv1 "k8s.io/api/core/v1" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" "k8s.io/autoscaler/cluster-autoscaler/utils/units" ) @@ -73,11 +74,8 @@ func getHours(startTime time.Time, endTime time.Time) float64 { // PodPrice returns a theoretical minimum price of running a pod for a given // period of time on a perfectly matching machine. func (model *Price) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { - price := 0.0 - for _, container := range pod.Spec.Containers { - price += getBasePrice(container.Resources.Requests, startTime, endTime) - } - return price, nil + podRequests := podutils.PodRequests(pod) + return getBasePrice(podRequests, startTime, endTime), nil } func getBasePrice(resources apiv1.ResourceList, startTime time.Time, endTime time.Time) float64 { diff --git a/cluster-autoscaler/cloudprovider/gce/gce_price_model.go b/cluster-autoscaler/cloudprovider/gce/gce_price_model.go index 23a5bc1dc6..613cfedcec 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_price_model.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_price_model.go @@ -24,6 +24,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce/localssdsize" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" "k8s.io/autoscaler/cluster-autoscaler/utils/units" klog "k8s.io/klog/v2" @@ -155,11 +156,8 @@ func (model *GcePriceModel) getPreemptibleDiscount(node *apiv1.Node) float64 { // PodPrice returns a theoretical minimum price of running a pod for a given // period of time on a perfectly matching machine. func (model *GcePriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { - price := 0.0 - for _, container := range pod.Spec.Containers { - price += model.getBasePrice(container.Resources.Requests, "", startTime, endTime) - price += model.getAdditionalPrice(container.Resources.Requests, startTime, endTime) - } + podRequests := podutils.PodRequests(pod) + price := model.getBasePrice(podRequests, "", startTime, endTime) + model.getAdditionalPrice(podRequests, startTime, endTime) return price, nil } diff --git a/cluster-autoscaler/estimator/decreasing_pod_orderer.go b/cluster-autoscaler/estimator/decreasing_pod_orderer.go index d4ac467e90..d92d9c7d7d 100644 --- a/cluster-autoscaler/estimator/decreasing_pod_orderer.go +++ b/cluster-autoscaler/estimator/decreasing_pod_orderer.go @@ -20,9 +20,10 @@ import ( "sort" apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" ) // podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first. @@ -68,23 +69,16 @@ func (d *DecreasingPodOrderer) calculatePodScore(podsEquivalentGroup PodEquivale } } - cpuSum := resource.Quantity{} - memorySum := resource.Quantity{} + podRequests := podutils.PodRequests(samplePod) + podCPU := podRequests[apiv1.ResourceCPU] + podMemory := podRequests[apiv1.ResourceMemory] - for _, container := range samplePod.Spec.Containers { - if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok { - cpuSum.Add(request) - } - if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok { - memorySum.Add(request) - } - } score := float64(0) if cpuAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceCPU]; ok && cpuAllocatable.MilliValue() > 0 { - score += float64(cpuSum.MilliValue()) / float64(cpuAllocatable.MilliValue()) + score += float64(podCPU.MilliValue()) / float64(cpuAllocatable.MilliValue()) } if memAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceMemory]; ok && memAllocatable.Value() > 0 { - score += float64(memorySum.Value()) / float64(memAllocatable.Value()) + score += float64(podMemory.Value()) / float64(memAllocatable.Value()) } return &podScoreInfo{ diff --git a/cluster-autoscaler/expander/waste/waste.go b/cluster-autoscaler/expander/waste/waste.go index d05de2516c..4412feaa82 100644 --- a/cluster-autoscaler/expander/waste/waste.go +++ b/cluster-autoscaler/expander/waste/waste.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" klog "k8s.io/klog/v2" ) @@ -73,14 +74,9 @@ func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo ma func resourcesForPods(pods []*apiv1.Pod) (cpu resource.Quantity, memory resource.Quantity) { for _, pod := range pods { - for _, container := range pod.Spec.Containers { - if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok { - cpu.Add(request) - } - if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok { - memory.Add(request) - } - } + podRequests := podutils.PodRequests(pod) + cpu.Add(podRequests[apiv1.ResourceCPU]) + memory.Add(podRequests[apiv1.ResourceMemory]) } return cpu, memory diff --git a/cluster-autoscaler/simulator/utilization/info.go b/cluster-autoscaler/simulator/utilization/info.go index 03ceac56f3..a9bbf9233e 100644 --- a/cluster-autoscaler/simulator/utilization/info.go +++ b/cluster-autoscaler/simulator/utilization/info.go @@ -29,7 +29,6 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog/v2" - resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" ) // Info contains utilization information for a node. @@ -108,7 +107,8 @@ func CalculateUtilizationOfResource(nodeInfo *framework.NodeInfo, resourceName a podsRequest := resource.MustParse("0") daemonSetAndMirrorPodsUtilization := resource.MustParse("0") for _, podInfo := range nodeInfo.Pods() { - resourceValue := resourcehelper.GetResourceRequestQuantity(podInfo.Pod, resourceName) + podRequests := podutils.PodRequests(podInfo.Pod) + resourceValue := podRequests[resourceName] // factor daemonset pods out of the utilization calculations if skipDaemonSetPods && podutils.IsDaemonSetPod(podInfo.Pod) { diff --git a/cluster-autoscaler/utils/gpu/gpu.go b/cluster-autoscaler/utils/gpu/gpu.go index 6ca265aeaa..27c1407076 100644 --- a/cluster-autoscaler/utils/gpu/gpu.go +++ b/cluster-autoscaler/utils/gpu/gpu.go @@ -19,6 +19,7 @@ package gpu import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" "k8s.io/klog/v2" ) @@ -105,15 +106,9 @@ func NodeHasGpu(GPULabel string, node *apiv1.Node) bool { // PodRequestsGpu returns true if a given pod has GPU request. func PodRequestsGpu(pod *apiv1.Pod) bool { - for _, container := range pod.Spec.Containers { - if container.Resources.Requests != nil { - _, gpuFound := container.Resources.Requests[ResourceNvidiaGPU] - if gpuFound { - return true - } - } - } - return false + podRequests := podutils.PodRequests(pod) + _, gpuFound := podRequests[ResourceNvidiaGPU] + return gpuFound } // GetNodeGPUFromCloudProvider returns the GPU the node has. Returned GPU has the GPU label of the diff --git a/cluster-autoscaler/utils/labels/labels.go b/cluster-autoscaler/utils/labels/labels.go index 081e78b14b..cd07360529 100644 --- a/cluster-autoscaler/utils/labels/labels.go +++ b/cluster-autoscaler/utils/labels/labels.go @@ -23,6 +23,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod" ) const ( @@ -88,12 +89,9 @@ func calculateNodeSelectorStats(pods []*apiv1.Pod) []nodeSelectorStats { stats := make([]nodeSelectorStats, 0) for _, pod := range pods { var podCpu resource.Quantity - for _, container := range pod.Spec.Containers { - if container.Resources.Requests != nil { - containerCpu := container.Resources.Requests[apiv1.ResourceCPU] - podCpu.Add(containerCpu) - } - } + podRequests := podutils.PodRequests(pod) + podCpu.Add(podRequests[apiv1.ResourceCPU]) + if podCpu.MilliValue() == 0 { podCpu = defaultMinCPU } diff --git a/cluster-autoscaler/utils/pod/pod.go b/cluster-autoscaler/utils/pod/pod.go index b85b14ac32..00b2b98426 100644 --- a/cluster-autoscaler/utils/pod/pod.go +++ b/cluster-autoscaler/utils/pod/pod.go @@ -17,10 +17,13 @@ limitations under the License. package pod import ( + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/types" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + resourcehelper "k8s.io/component-helpers/resource" ) const ( @@ -79,3 +82,14 @@ func ClearPodNodeNames(pods []*apiv1.Pod) []*apiv1.Pod { } return newPods } + +// PodRequests calculates Pod requests using a common resource helper shared with the scheduler +func PodRequests(pod *apiv1.Pod) apiv1.ResourceList { + inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) + podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) + + return resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ + UseStatusResources: inPlacePodVerticalScalingEnabled, + SkipPodLevelResources: !podLevelResourcesEnabled, + }) +}