kubectl: fix describe node output when sidecars are present

Update the resource calculation so that it accumulates the resources
from scheduled pods correctly when those pods contain restartable
init containers.

Kubernetes-commit: 479b2d03a4369b8ae4565ba73850c703331cdcc5
This commit is contained in:
Todd Neal 2023-07-21 09:02:11 -05:00 committed by Kubernetes Publisher
parent f8c6b2ac72
commit f5a7ea080f
2 changed files with 262 additions and 11 deletions

View File

@ -5456,6 +5456,156 @@ func TestDescribeNode(t *testing.T) {
}
}
func TestDescribeNodeWithSidecar(t *testing.T) {
holderIdentity := "holder"
nodeCapacity := mergeResourceLists(
getHugePageResourceList("2Mi", "4Gi"),
getResourceList("8", "24Gi"),
getHugePageResourceList("1Gi", "0"),
)
nodeAllocatable := mergeResourceLists(
getHugePageResourceList("2Mi", "2Gi"),
getResourceList("4", "12Gi"),
getHugePageResourceList("1Gi", "0"),
)
restartPolicy := corev1.ContainerRestartPolicyAlways
fake := fake.NewSimpleClientset(
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
UID: "uid",
},
Spec: corev1.NodeSpec{
Unschedulable: true,
},
Status: corev1.NodeStatus{
Capacity: nodeCapacity,
Allocatable: nodeAllocatable,
},
},
&coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: corev1.NamespaceNodeLease,
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: &holderIdentity,
AcquireTime: &metav1.MicroTime{Time: time.Now().Add(-time.Hour)},
RenewTime: &metav1.MicroTime{Time: time.Now()},
},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-resources",
Namespace: "foo",
},
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
// sidecar, should sum into the total resources
{
Name: "init-container-1",
RestartPolicy: &restartPolicy,
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
},
},
// non-sidecar
{
Name: "init-container-2",
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
},
},
},
Containers: []corev1.Container{
{
Name: "cpu-mem",
Image: "image:latest",
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
Limits: getResourceList("2", "2Gi"),
},
},
{
Name: "hugepages",
Image: "image:latest",
Resources: corev1.ResourceRequirements{
Requests: getHugePageResourceList("2Mi", "512Mi"),
Limits: getHugePageResourceList("2Mi", "512Mi"),
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
&corev1.EventList{
Items: []corev1.Event{
{
ObjectMeta: metav1.ObjectMeta{
Name: "event-1",
Namespace: "default",
},
InvolvedObject: corev1.ObjectReference{
Kind: "Node",
Name: "bar",
UID: "bar",
},
Message: "Node bar status is now: NodeHasNoDiskPressure",
FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: corev1.EventTypeNormal,
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "event-2",
Namespace: "default",
},
InvolvedObject: corev1.ObjectReference{
Kind: "Node",
Name: "bar",
UID: "0ceac5fb-a393-49d7-b04f-9ea5f18de5e9",
},
Message: "Node bar status is now: NodeReady",
FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 2,
Type: corev1.EventTypeNormal,
},
},
},
)
c := &describeClient{T: t, Namespace: "foo", Interface: fake}
d := NodeDescriber{c}
out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expectedOut := []string{"Unschedulable", "true", "holder",
`Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 2 (50%) 2 (50%)
memory 2Gi (16%) 2Gi (16%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 512Mi (25%) 512Mi (25%)`,
`Node bar status is now: NodeHasNoDiskPressure`,
`Node bar status is now: NodeReady`}
for _, expected := range expectedOut {
if !strings.Contains(out, expected) {
t.Errorf("expected to find %s in output: %s", expected, out)
}
}
}
func TestDescribeStatefulSet(t *testing.T) {
var partition int32 = 2672
var replicas int32 = 1

View File

@ -32,21 +32,100 @@ import (
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) {
reqs, limits = corev1.ResourceList{}, corev1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
return podRequests(pod), podLimits(pod)
}
// podRequests is a simplified form of PodRequests from k8s.io/kubernetes/pkg/api/v1/resource that doesn't check
// feature gate enablement and avoids adding a dependency on k8s.io/kubernetes/pkg/apis/core/v1 for kubectl.
func podRequests(pod *corev1.Pod) corev1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
reqs := corev1.ResourceList{}
containerStatuses := map[string]*corev1.ContainerStatus{}
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
// Add overhead for running a pod to the sum of requests and to non-zero limits:
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
cs, found := containerStatuses[container.Name]
if found {
if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
}
}
addResourceList(reqs, containerReqs)
}
restartableInitContainerReqs := corev1.ResourceList{}
initContainerReqs := corev1.ResourceList{}
for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests
if container.RestartPolicy != nil && *container.RestartPolicy == corev1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReqs)
containerReqs = restartableInitContainerReqs
} else {
tmp := corev1.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitContainerReqs)
containerReqs = tmp
}
maxResourceList(initContainerReqs, containerReqs)
}
maxResourceList(reqs, initContainerReqs)
// Add overhead for running a pod to the sum of requests if requested:
if pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
}
return reqs
}
// podLimits is a simplified form of PodLimits from k8s.io/kubernetes/pkg/api/v1/resource that doesn't check
// feature gate enablement and avoids adding a dependency on k8s.io/kubernetes/pkg/apis/core/v1 for kubectl.
func podLimits(pod *corev1.Pod) corev1.ResourceList {
limits := corev1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(limits, container.Resources.Limits)
}
restartableInitContainerLimits := corev1.ResourceList{}
initContainerLimits := corev1.ResourceList{}
for _, container := range pod.Spec.InitContainers {
containerLimits := container.Resources.Limits
// Is the init container marked as a restartable init container?
if container.RestartPolicy != nil && *container.RestartPolicy == corev1.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := corev1.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}
maxResourceList(initContainerLimits, containerLimits)
}
maxResourceList(limits, initContainerLimits)
// Add overhead to non-zero limits if requested:
if pod.Spec.Overhead != nil {
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
@ -54,7 +133,29 @@ func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) {
}
}
}
return
return limits
}
// max returns the result of max(a, b) for each named resource and is only used if we can't
// accumulate into an existing resource list
func max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
result := corev1.ResourceList{}
for key, value := range a {
if other, found := b[key]; found {
if value.Cmp(other) <= 0 {
result[key] = other.DeepCopy()
continue
}
}
result[key] = value.DeepCopy()
}
for key, value := range b {
if _, found := result[key]; !found {
result[key] = value.DeepCopy()
}
}
return result
}
// addResourceList adds the resources in newList to list