Merge pull request #124952 from AxeZhan/maxContainerRestarts
[Sidecar Containers] Pods comparison by maxContainerRestarts should account for sidecar containers Kubernetes-commit: 9a9331afd60a4db3ffd8dc9bd38b788948308175
This commit is contained in:
commit
2cccb2df6a
2
go.mod
2
go.mod
|
@ -29,7 +29,7 @@ require (
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
golang.org/x/sys v0.26.0
|
golang.org/x/sys v0.26.0
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0
|
gopkg.in/evanphx/json-patch.v4 v4.12.0
|
||||||
k8s.io/api v0.0.0-20241106230208-459b3bf01982
|
k8s.io/api v0.0.0-20241108114310-4772861d607e
|
||||||
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83
|
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83
|
||||||
k8s.io/cli-runtime v0.0.0-20241105234034-30b82e78f32f
|
k8s.io/cli-runtime v0.0.0-20241105234034-30b82e78f32f
|
||||||
k8s.io/client-go v0.0.0-20241107030607-c57dbd8decb0
|
k8s.io/client-go v0.0.0-20241107030607-c57dbd8decb0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -202,8 +202,8 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
k8s.io/api v0.0.0-20241106230208-459b3bf01982 h1:dawXS/j3CvWhfIXAJA29WpGivkWXdaOE6HmDY24ujcU=
|
k8s.io/api v0.0.0-20241108114310-4772861d607e h1:SJxpg9FSTdq3qp/R+LRlZv+xjY+miO+30ZpC7QkSkoM=
|
||||||
k8s.io/api v0.0.0-20241106230208-459b3bf01982/go.mod h1:a1KpjW7qk2zdbW2qzX85ex/fBhKNkow9dOvgGEKU+u8=
|
k8s.io/api v0.0.0-20241108114310-4772861d607e/go.mod h1:h7yaPC7+0KxMELdLjLoo6n6m3EWq6AeHEY25PjH4cPI=
|
||||||
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83 h1:4KfMPmiiRIpvYJQ8cBYFEFht59EKysW1anuJWzHLHNg=
|
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83 h1:4KfMPmiiRIpvYJQ8cBYFEFht59EKysW1anuJWzHLHNg=
|
||||||
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83/go.mod h1:HqhdaJUgQqky29T1V0o2yFkt/pZqLFIDyn9Zi/8rxoY=
|
k8s.io/apimachinery v0.0.0-20241106231735-d941d9fb4c83/go.mod h1:HqhdaJUgQqky29T1V0o2yFkt/pZqLFIDyn9Zi/8rxoY=
|
||||||
k8s.io/cli-runtime v0.0.0-20241105234034-30b82e78f32f h1:MV1YnjgZhMzrdCt6HNUhCycAg6uphIgNgbyAyj4DBOI=
|
k8s.io/cli-runtime v0.0.0-20241105234034-30b82e78f32f h1:MV1YnjgZhMzrdCt6HNUhCycAg6uphIgNgbyAyj4DBOI=
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||||
|
@ -113,8 +114,8 @@ func (s ByLogging) Less(i, j int) bool {
|
||||||
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
||||||
}
|
}
|
||||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
|
||||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
return *res
|
||||||
}
|
}
|
||||||
// 6. older pods < newer pods < empty timestamp pods
|
// 6. older pods < newer pods < empty timestamp pods
|
||||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||||
|
@ -161,8 +162,8 @@ func (s ActivePods) Less(i, j int) bool {
|
||||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||||
}
|
}
|
||||||
// 7. Pods with containers with higher restart counts < lower restart counts
|
// 7. Pods with containers with higher restart counts < lower restart counts
|
||||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
|
||||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
return *res
|
||||||
}
|
}
|
||||||
// 8. Empty creation time pods < newer pods < older pods
|
// 8. Empty creation time pods < newer pods < older pods
|
||||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||||
|
@ -190,12 +191,41 @@ func podReadyTime(pod *corev1.Pod) *metav1.Time {
|
||||||
return &metav1.Time{}
|
return &metav1.Time{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func maxContainerRestarts(pod *corev1.Pod) int {
|
func maxContainerRestarts(pod *corev1.Pod) (regularRestarts, sidecarRestarts int) {
|
||||||
maxRestarts := 0
|
|
||||||
for _, c := range pod.Status.ContainerStatuses {
|
for _, c := range pod.Status.ContainerStatuses {
|
||||||
maxRestarts = max(maxRestarts, int(c.RestartCount))
|
regularRestarts = max(regularRestarts, int(c.RestartCount))
|
||||||
}
|
}
|
||||||
return maxRestarts
|
names := sets.New[string]()
|
||||||
|
for _, c := range pod.Spec.InitContainers {
|
||||||
|
if c.RestartPolicy != nil && *c.RestartPolicy == corev1.ContainerRestartPolicyAlways {
|
||||||
|
names.Insert(c.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, c := range pod.Status.InitContainerStatuses {
|
||||||
|
if names.Has(c.Name) {
|
||||||
|
sidecarRestarts = max(sidecarRestarts, int(c.RestartCount))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use *bool here to determine equality:
|
||||||
|
// true: pi has a higher container restart count.
|
||||||
|
// false: pj has a higher container restart count.
|
||||||
|
// nil: Both have the same container restart count.
|
||||||
|
func compareMaxContainerRestarts(pi *corev1.Pod, pj *corev1.Pod) *bool {
|
||||||
|
regularRestartsI, sidecarRestartsI := maxContainerRestarts(pi)
|
||||||
|
regularRestartsJ, sidecarRestartsJ := maxContainerRestarts(pj)
|
||||||
|
if regularRestartsI != regularRestartsJ {
|
||||||
|
res := regularRestartsI > regularRestartsJ
|
||||||
|
return &res
|
||||||
|
}
|
||||||
|
// If pods have the same restart count, an attempt is made to compare the restart counts of sidecar containers.
|
||||||
|
if sidecarRestartsI != sidecarRestartsJ {
|
||||||
|
res := sidecarRestartsI > sidecarRestartsJ
|
||||||
|
return &res
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerType and VisitContainers are taken from
|
// ContainerType and VisitContainers are taken from
|
||||||
|
|
|
@ -29,6 +29,7 @@ func TestActivePods(t *testing.T) {
|
||||||
time1 := metav1.Now()
|
time1 := metav1.Now()
|
||||||
time2 := metav1.NewTime(time1.Add(1 * time.Second))
|
time2 := metav1.NewTime(time1.Add(1 * time.Second))
|
||||||
time3 := metav1.NewTime(time1.Add(2 * time.Second))
|
time3 := metav1.NewTime(time1.Add(2 * time.Second))
|
||||||
|
restartAlways := corev1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -364,6 +365,81 @@ func TestActivePods(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "higher sidecar restart count should sort before lower restart count",
|
||||||
|
pod1: &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "podWithMoreRestarts",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
NodeName: "node1",
|
||||||
|
InitContainers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "sidecar",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{
|
||||||
|
Phase: corev1.PodRunning,
|
||||||
|
Conditions: []corev1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: corev1.PodReady,
|
||||||
|
Status: corev1.ConditionTrue,
|
||||||
|
LastTransitionTime: time1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
RestartCount: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainerStatuses: []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "sidecar",
|
||||||
|
RestartCount: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pod2: &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "podWithLessRestarts",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
NodeName: "node1",
|
||||||
|
InitContainers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "sidecar",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{
|
||||||
|
Phase: corev1.PodRunning,
|
||||||
|
Conditions: []corev1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: corev1.PodReady,
|
||||||
|
Status: corev1.ConditionTrue,
|
||||||
|
LastTransitionTime: time1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
RestartCount: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainerStatuses: []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "sidecar",
|
||||||
|
RestartCount: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
|
Loading…
Reference in New Issue