Merge pull request #104909 from pacoxu/kubectl-qos

kubectl: include init containers when determining pod QoS

Kubernetes-commit: 6ebd6f38b9804aa9eb58c5f742a56fa1f9e6ab85
This commit is contained in:
Kubernetes Publisher 2021-11-01 20:00:58 -07:00
commit a1514b65fc
3 changed files with 22 additions and 19 deletions

8
go.mod
View File

@ -31,10 +31,10 @@ require (
github.com/stretchr/testify v1.7.0
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.0.0-20211029083603-41019181ea88
k8s.io/api v0.0.0-20211029201511-cca52a076791
k8s.io/apimachinery v0.0.0-20211028185107-b255da54548a
k8s.io/cli-runtime v0.0.0-20211027005851-fd0a6d95140a
k8s.io/client-go v0.0.0-20211029083953-5be956ba48bd
k8s.io/client-go v0.0.0-20211030045615-2f5d8b0c528d
k8s.io/component-base v0.0.0-20211027004438-bd08cb7812c3
k8s.io/component-helpers v0.0.0-20211027004542-73bcdef827e4
k8s.io/klog/v2 v2.30.0
@ -47,10 +47,10 @@ require (
)
replace (
k8s.io/api => k8s.io/api v0.0.0-20211029083603-41019181ea88
k8s.io/api => k8s.io/api v0.0.0-20211029201511-cca52a076791
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20211028185107-b255da54548a
k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20211027005851-fd0a6d95140a
k8s.io/client-go => k8s.io/client-go v0.0.0-20211029083953-5be956ba48bd
k8s.io/client-go => k8s.io/client-go v0.0.0-20211030045615-2f5d8b0c528d
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20211026222709-e92ab9f4d5a1
k8s.io/component-base => k8s.io/component-base v0.0.0-20211027004438-bd08cb7812c3
k8s.io/component-helpers => k8s.io/component-helpers v0.0.0-20211027004542-73bcdef827e4

8
go.sum
View File

@ -902,14 +902,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.0.0-20211029083603-41019181ea88 h1:fNrtAsJFqgYBBCed0914pfMwaOUnYg8Ygl43m5Ow+RM=
k8s.io/api v0.0.0-20211029083603-41019181ea88/go.mod h1:dx3B5TOvDwqR2Oqn8muvHae5vt+xIgmxap/fvP+iItQ=
k8s.io/api v0.0.0-20211029201511-cca52a076791 h1:d7AqOEjufZ9cuB97JXVjyvpwonRKyphG0aLfK7+dBNI=
k8s.io/api v0.0.0-20211029201511-cca52a076791/go.mod h1:dx3B5TOvDwqR2Oqn8muvHae5vt+xIgmxap/fvP+iItQ=
k8s.io/apimachinery v0.0.0-20211028185107-b255da54548a h1:3NA1KMmF0ie/tLSNnA3v1ihGjVX7Z5OmZIyAOm0YmVo=
k8s.io/apimachinery v0.0.0-20211028185107-b255da54548a/go.mod h1:oyH3LcOKLLooQH1NlpHlilzkWxqsiHWETyHgssntcXg=
k8s.io/cli-runtime v0.0.0-20211027005851-fd0a6d95140a h1:6N6d0a80cFO2PI8RssKTsofnWWdzr4LBWKKe0t+1kAk=
k8s.io/cli-runtime v0.0.0-20211027005851-fd0a6d95140a/go.mod h1:C/9bEqryTALzT3y/AJFeg2TqzudoaRZrdr4Zdoeetn0=
k8s.io/client-go v0.0.0-20211029083953-5be956ba48bd h1:k4VNvA+D6bQXpz5bal2Bwrn2aZ3CBg9I+3P2xPztjhs=
k8s.io/client-go v0.0.0-20211029083953-5be956ba48bd/go.mod h1:/Cx6iv3iME4npCXzwBmPx9YFW9YvuLpEUzzreR/3EsM=
k8s.io/client-go v0.0.0-20211030045615-2f5d8b0c528d h1:x2WpKnettnxR4Y+chwKpBMRSz5ibcRsGkXnn3V1zuVg=
k8s.io/client-go v0.0.0-20211030045615-2f5d8b0c528d/go.mod h1:AeiEb+/G5s6RA7edXOQ1MJ1JyCAxXwoAYZ7CJhpF0Ow=
k8s.io/code-generator v0.0.0-20211026222709-e92ab9f4d5a1/go.mod h1:alK4pz5+y/zKXOPBnND3TvXOC/iF2oYTBDynHO1+qlI=
k8s.io/component-base v0.0.0-20211027004438-bd08cb7812c3 h1:Hx2Olj7sA4W9r8veeamsvNiiYf5SMTN/N/1uAMQk13c=
k8s.io/component-base v0.0.0-20211027004438-bd08cb7812c3/go.mod h1:7xqZFY7A2hOLqh+cOR9jinp+xJVXp6F5I1rOO+kBKJc=

View File

@ -17,14 +17,14 @@ limitations under the License.
package qos
import (
corev1 "k8s.io/api/core/v1"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory))
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
@ -32,12 +32,15 @@ func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
requests := corev1.ResourceList{}
limits := corev1.ResourceList{}
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
for _, container := range pod.Spec.Containers {
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
@ -71,12 +74,12 @@ func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
}
}
if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) {
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return corev1.PodQOSBestEffort
return core.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
@ -89,7 +92,7 @@ func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
}
if isGuaranteed &&
len(requests) == len(limits) {
return corev1.PodQOSGuaranteed
return core.PodQOSGuaranteed
}
return corev1.PodQOSBurstable
return core.PodQOSBurstable
}