use k8s.io/component-helpers/resource for pod request calculations

This commit is contained in:
Piotr Betkier 2025-04-18 13:59:01 +02:00
parent 66feee1483
commit ac1c7b5463
8 changed files with 41 additions and 48 deletions

View File

@ -21,6 +21,7 @@ import (
"time"
apiv1 "k8s.io/api/core/v1"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
)
@ -73,11 +74,8 @@ func getHours(startTime time.Time, endTime time.Time) float64 {
// PodPrice returns a theoretical minimum price of running a pod for a given
// period of time on a perfectly matching machine.
func (model *Price) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) {
price := 0.0
for _, container := range pod.Spec.Containers {
price += getBasePrice(container.Resources.Requests, startTime, endTime)
}
return price, nil
podRequests := podutils.PodRequests(pod)
return getBasePrice(podRequests, startTime, endTime), nil
}
func getBasePrice(resources apiv1.ResourceList, startTime time.Time, endTime time.Time) float64 {

View File

@ -24,6 +24,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce/localssdsize"
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
klog "k8s.io/klog/v2"
@ -155,11 +156,8 @@ func (model *GcePriceModel) getPreemptibleDiscount(node *apiv1.Node) float64 {
// PodPrice returns a theoretical minimum price of running a pod for a given
// period of time on a perfectly matching machine.
func (model *GcePriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) {
price := 0.0
for _, container := range pod.Spec.Containers {
price += model.getBasePrice(container.Resources.Requests, "", startTime, endTime)
price += model.getAdditionalPrice(container.Resources.Requests, startTime, endTime)
}
podRequests := podutils.PodRequests(pod)
price := model.getBasePrice(podRequests, "", startTime, endTime) + model.getAdditionalPrice(podRequests, startTime, endTime)
return price, nil
}

View File

@ -20,9 +20,10 @@ import (
"sort"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
)
// podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first.
@ -68,23 +69,16 @@ func (d *DecreasingPodOrderer) calculatePodScore(podsEquivalentGroup PodEquivale
}
}
cpuSum := resource.Quantity{}
memorySum := resource.Quantity{}
podRequests := podutils.PodRequests(samplePod)
podCPU := podRequests[apiv1.ResourceCPU]
podMemory := podRequests[apiv1.ResourceMemory]
for _, container := range samplePod.Spec.Containers {
if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok {
cpuSum.Add(request)
}
if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok {
memorySum.Add(request)
}
}
score := float64(0)
if cpuAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceCPU]; ok && cpuAllocatable.MilliValue() > 0 {
score += float64(cpuSum.MilliValue()) / float64(cpuAllocatable.MilliValue())
score += float64(podCPU.MilliValue()) / float64(cpuAllocatable.MilliValue())
}
if memAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceMemory]; ok && memAllocatable.Value() > 0 {
score += float64(memorySum.Value()) / float64(memAllocatable.Value())
score += float64(podMemory.Value()) / float64(memAllocatable.Value())
}
return &podScoreInfo{

View File

@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
klog "k8s.io/klog/v2"
)
@ -73,14 +74,9 @@ func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo ma
func resourcesForPods(pods []*apiv1.Pod) (cpu resource.Quantity, memory resource.Quantity) {
for _, pod := range pods {
for _, container := range pod.Spec.Containers {
if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok {
cpu.Add(request)
}
if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok {
memory.Add(request)
}
}
podRequests := podutils.PodRequests(pod)
cpu.Add(podRequests[apiv1.ResourceCPU])
memory.Add(podRequests[apiv1.ResourceMemory])
}
return cpu, memory

View File

@ -29,7 +29,6 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
)
// Info contains utilization information for a node.
@ -108,7 +107,8 @@ func CalculateUtilizationOfResource(nodeInfo *framework.NodeInfo, resourceName a
podsRequest := resource.MustParse("0")
daemonSetAndMirrorPodsUtilization := resource.MustParse("0")
for _, podInfo := range nodeInfo.Pods() {
resourceValue := resourcehelper.GetResourceRequestQuantity(podInfo.Pod, resourceName)
podRequests := podutils.PodRequests(podInfo.Pod)
resourceValue := podRequests[resourceName]
// factor daemonset pods out of the utilization calculations
if skipDaemonSetPods && podutils.IsDaemonSetPod(podInfo.Pod) {

View File

@ -19,6 +19,7 @@ package gpu
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
"k8s.io/klog/v2"
)
@ -105,15 +106,9 @@ func NodeHasGpu(GPULabel string, node *apiv1.Node) bool {
// PodRequestsGpu returns true if a given pod has GPU request.
func PodRequestsGpu(pod *apiv1.Pod) bool {
for _, container := range pod.Spec.Containers {
if container.Resources.Requests != nil {
_, gpuFound := container.Resources.Requests[ResourceNvidiaGPU]
if gpuFound {
return true
}
}
}
return false
podRequests := podutils.PodRequests(pod)
_, gpuFound := podRequests[ResourceNvidiaGPU]
return gpuFound
}
// GetNodeGPUFromCloudProvider returns the GPU the node has. Returned GPU has the GPU label of the

View File

@ -23,6 +23,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
)
const (
@ -88,12 +89,9 @@ func calculateNodeSelectorStats(pods []*apiv1.Pod) []nodeSelectorStats {
stats := make([]nodeSelectorStats, 0)
for _, pod := range pods {
var podCpu resource.Quantity
for _, container := range pod.Spec.Containers {
if container.Resources.Requests != nil {
containerCpu := container.Resources.Requests[apiv1.ResourceCPU]
podCpu.Add(containerCpu)
}
}
podRequests := podutils.PodRequests(pod)
podCpu.Add(podRequests[apiv1.ResourceCPU])
if podCpu.MilliValue() == 0 {
podCpu = defaultMinCPU
}

View File

@ -17,10 +17,13 @@ limitations under the License.
package pod
import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/types"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
)
const (
@ -79,3 +82,14 @@ func ClearPodNodeNames(pods []*apiv1.Pod) []*apiv1.Pod {
}
return newPods
}
// PodRequests calculates Pod requests using a common resource helper shared with the scheduler
func PodRequests(pod *apiv1.Pod) apiv1.ResourceList {
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling)
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
return resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
UseStatusResources: inPlacePodVerticalScalingEnabled,
SkipPodLevelResources: !podLevelResourcesEnabled,
})
}