Fix the flaky kubectl tests at scale
Kubernetes-commit: 98058a1b68da3726e1110dc8bba3838459d03b29
This commit is contained in:
parent
eb899df9a1
commit
5b27ac0ca2
|
@ -355,21 +355,6 @@ func waitForEphemeralContainer(ctx context.Context, podClient corev1client.PodsG
|
|||
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, 0*time.Second)
|
||||
defer cancel()
|
||||
|
||||
preconditionFunc := func(store cache.Store) (bool, error) {
|
||||
_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: ns, Name: podName})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if !exists {
|
||||
// We need to make sure we see the object in the cache before we start waiting for events
|
||||
// or we would be waiting for the timeout if such object didn't exist.
|
||||
// (e.g. it was deleted before we started informers so they wouldn't even see the delete event)
|
||||
return true, errors.NewNotFound(corev1.Resource("pods"), podName)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", podName).String()
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
|
@ -385,7 +370,7 @@ func waitForEphemeralContainer(ctx context.Context, podClient corev1client.PodsG
|
|||
intr := interrupt.New(nil, cancel)
|
||||
var result *corev1.Pod
|
||||
err := intr.Run(func() error {
|
||||
ev, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, preconditionFunc, func(ev watch.Event) (bool, error) {
|
||||
ev, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(ev watch.Event) (bool, error) {
|
||||
switch ev.Type {
|
||||
case watch.Deleted:
|
||||
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -200,25 +199,11 @@ func (o *RolloutStatusOptions) Run() error {
|
|||
},
|
||||
}
|
||||
|
||||
preconditionFunc := func(store cache.Store) (bool, error) {
|
||||
_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: info.Namespace, Name: info.Name})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if !exists {
|
||||
// We need to make sure we see the object in the cache before we start waiting for events
|
||||
// or we would be waiting for the timeout if such object didn't exist.
|
||||
return true, apierrors.NewNotFound(mapping.Resource.GroupResource(), info.Name)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if the rollout isn't done yet, keep watching deployment status
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout)
|
||||
intr := interrupt.New(nil, cancel)
|
||||
return intr.Run(func() error {
|
||||
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, preconditionFunc, func(e watch.Event) (bool, error) {
|
||||
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
|
||||
switch t := e.Type; t {
|
||||
case watch.Added, watch.Modified:
|
||||
status, done, err := statusViewer.Status(e.Object.(runtime.Unstructured), o.Revision)
|
||||
|
|
|
@ -457,21 +457,6 @@ func waitForPod(podClient corev1client.PodsGetter, ns, name string, timeout time
|
|||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
preconditionFunc := func(store cache.Store) (bool, error) {
|
||||
_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: ns, Name: name})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if !exists {
|
||||
// We need to make sure we see the object in the cache before we start waiting for events
|
||||
// or we would be waiting for the timeout if such object didn't exist.
|
||||
// (e.g. it was deleted before we started informers so they wouldn't even see the delete event)
|
||||
return true, errors.NewNotFound(corev1.Resource("pods"), name)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String()
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
|
@ -487,9 +472,7 @@ func waitForPod(podClient corev1client.PodsGetter, ns, name string, timeout time
|
|||
intr := interrupt.New(nil, cancel)
|
||||
var result *corev1.Pod
|
||||
err := intr.Run(func() error {
|
||||
ev, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, preconditionFunc, func(ev watch.Event) (bool, error) {
|
||||
return exitCondition(ev)
|
||||
})
|
||||
ev, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, exitCondition)
|
||||
if ev != nil {
|
||||
result = ev.Object.(*corev1.Pod)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue