mirror of https://github.com/knative/pkg.git
Replace all usages of deprecated wait.PollImmediate with wait.PollUntilContextTimeout (#3004)
Signed-off-by: Prashant Rewar <108176843+prashantrewar@users.noreply.github.com>
This commit is contained in:
parent
03bf3de6e2
commit
fef5da99bd
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -73,7 +74,7 @@ func TestRateLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify the items were properly added for consumption.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return q.Len() == 2, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 2")
|
||||
|
@ -89,7 +90,7 @@ func TestSlowQueue(t *testing.T) {
|
|||
q := newTwoLaneWorkQueue("live-in-the-fast-lane", workqueue.DefaultControllerRateLimiter())
|
||||
q.SlowLane().Add("1")
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return q.Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -74,7 +74,7 @@ func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
|
|||
// These timeout and retry interval are set by heuristics.
|
||||
// e.g. istio sidecar needs a few seconds to configure the pod network.
|
||||
var lastErr error
|
||||
if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
loggingConfigMap, lastErr = kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(ctx, logging.ConfigMapName(), metav1.GetOptions{})
|
||||
return lastErr == nil || apierrors.IsNotFound(lastErr), nil
|
||||
}); err != nil {
|
||||
|
|
|
@ -153,7 +153,7 @@ func TestMetricsExport(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
// Wait for the webserver to actually start serving metrics
|
||||
return wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
return wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 10*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", prometheusPort))
|
||||
return err == nil && resp.StatusCode == http.StatusOK, nil
|
||||
})
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -173,7 +174,7 @@ func TestAllMetersExpiration(t *testing.T) {
|
|||
|
||||
// Expire the second entry
|
||||
fakeClock.Step(9 * time.Minute) // t+12m
|
||||
_ = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||
_ = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
// Non-expiring defaultMeter should be available along with the non-expired entry
|
||||
allMeters.lock.Lock()
|
||||
defer allMeters.lock.Unlock()
|
||||
|
@ -244,7 +245,7 @@ func TestIfAllMeterResourcesAreRemoved(t *testing.T) {
|
|||
// Expire all meters and views
|
||||
// We need to unlock before we move the clock ahead in time
|
||||
fakeClock.Step(12 * time.Minute) // t+12m
|
||||
_ = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||
_ = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
// Non-expiring defaultMeter should be available
|
||||
allMeters.lock.Lock()
|
||||
defer allMeters.lock.Unlock()
|
||||
|
|
|
@ -120,7 +120,7 @@ func RunAndSyncInformers(ctx context.Context, informers ...controller.Informer)
|
|||
return wf, err
|
||||
}
|
||||
|
||||
err = wait.PollImmediate(time.Microsecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Microsecond, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
if watchesPending.Load() == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package test
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -66,7 +67,7 @@ func TestCleanupOnInterrupt(t *testing.T) {
|
|||
|
||||
// poll until the ready file is gone - indicating the subtest has been set up
|
||||
// with the cleanup functions
|
||||
err = wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err := os.Stat(readyFile.Name())
|
||||
if os.IsNotExist(err) {
|
||||
return true, nil
|
||||
|
|
|
@ -78,7 +78,7 @@ func WaitForNewLeaders(ctx context.Context, t *testing.T, client kubernetes.Inte
|
|||
defer span.End()
|
||||
|
||||
var leaders sets.Set[string]
|
||||
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
currLeaders, err := GetLeaders(ctx, t, client, deploymentName, namespace)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -105,7 +105,7 @@ func WaitForNewLeader(ctx context.Context, client kubernetes.Interface, lease, n
|
|||
span := logging.GetEmitableSpan(ctx, "WaitForNewLeader/"+lease)
|
||||
defer span.End()
|
||||
var leader string
|
||||
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
lease, err := client.CoordinationV1().Leases(namespace).Get(ctx, lease, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error getting lease %s: %w", lease, err)
|
||||
|
|
|
@ -53,7 +53,7 @@ func WaitForDeploymentState(ctx context.Context, client kubernetes.Interface, na
|
|||
span := logging.GetEmitableSpan(ctx, fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc))
|
||||
defer span.End()
|
||||
var lastState *appsv1.Deployment
|
||||
waitErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
lastState, err = d.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -78,7 +78,7 @@ func WaitForPodListState(ctx context.Context, client kubernetes.Interface, inSta
|
|||
defer span.End()
|
||||
|
||||
var lastState *corev1.PodList
|
||||
waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, podTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
lastState, err = p.List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -103,7 +103,7 @@ func WaitForPodState(ctx context.Context, client kubernetes.Interface, inState f
|
|||
defer span.End()
|
||||
|
||||
var lastState *corev1.Pod
|
||||
waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, podTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
lastState, err = p.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -139,7 +139,7 @@ func WaitForServiceEndpoints(ctx context.Context, client kubernetes.Interface, s
|
|||
defer span.End()
|
||||
|
||||
var endpoints *corev1.Endpoints
|
||||
waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, podTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
endpoints, err = endpointsService.Get(ctx, svcName, metav1.GetOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
|
@ -186,7 +186,7 @@ func GetEndpointAddresses(ctx context.Context, client kubernetes.Interface, svcN
|
|||
// WaitForChangedEndpoints waits until the endpoints for the given service differ from origEndpoints.
|
||||
func WaitForChangedEndpoints(ctx context.Context, client kubernetes.Interface, svcName, svcNamespace string, origEndpoints []string) error {
|
||||
var newEndpoints []string
|
||||
waitErr := wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, 1*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
newEndpoints, err = GetEndpointAddresses(ctx, client, svcName, svcNamespace)
|
||||
return !cmp.Equal(origEndpoints, newEndpoints), err
|
||||
|
@ -213,7 +213,7 @@ func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) {
|
|||
// If the content is not present within timeout it returns error.
|
||||
func WaitForLogContent(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace, content string) error {
|
||||
var logs []byte
|
||||
waitErr := wait.PollImmediate(interval, logTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, logTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
logs, err = PodLogs(ctx, client, podName, containerName, namespace)
|
||||
if err != nil {
|
||||
|
@ -236,7 +236,7 @@ func WaitForAllPodsRunning(ctx context.Context, client kubernetes.Interface, nam
|
|||
func WaitForPodRunning(ctx context.Context, client kubernetes.Interface, name string, namespace string) error {
|
||||
var p *corev1.Pod
|
||||
pods := client.CoreV1().Pods(namespace)
|
||||
waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollUntilContextTimeout(ctx, interval, podTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
p, err = pods.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
|
|
@ -304,7 +304,7 @@ func TestNamespaceStream(t *testing.T) {
|
|||
|
||||
// We can't assume that the cancel signal doesn't race the pod creation signal, so
|
||||
// we retry a few times to give some leeway.
|
||||
if err := wait.PollImmediate(10*time.Millisecond, time.Second, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
if _, err := podClient.Create(context.Background(), knativePod, metav1.CreateOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker, check
|
|||
}
|
||||
|
||||
var resp *Response
|
||||
err := wait.PollImmediate(sc.RequestInterval, sc.RequestTimeout, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), sc.RequestInterval, sc.RequestTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
// Starting span to capture zipkin trace.
|
||||
traceContext, span := trace.StartSpan(req.Context(), "SpoofingClient-Trace")
|
||||
defer span.End()
|
||||
|
|
|
@ -243,7 +243,7 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -353,7 +353,7 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -131,7 +131,7 @@ func waitForNonTLSServerAvailable(t *testing.T, serverURL string, timeout time.D
|
|||
t.Helper()
|
||||
var interval = 100 * time.Millisecond
|
||||
|
||||
conditionFunc := func() (done bool, err error) {
|
||||
conditionFunc := func(ctx context.Context) (done bool, err error) {
|
||||
var conn net.Conn
|
||||
conn, _ = net.DialTimeout("tcp", serverURL, timeout)
|
||||
if conn != nil {
|
||||
|
@ -141,7 +141,7 @@ func waitForNonTLSServerAvailable(t *testing.T, serverURL string, timeout time.D
|
|||
return false, nil
|
||||
}
|
||||
|
||||
return wait.PollImmediate(interval, timeout, conditionFunc)
|
||||
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, conditionFunc)
|
||||
}
|
||||
|
||||
func waitForServerAvailable(t *testing.T, serverURL string, timeout time.Duration) error {
|
||||
|
@ -158,7 +158,7 @@ func waitForServerAvailable(t *testing.T, serverURL string, timeout time.Duratio
|
|||
}
|
||||
)
|
||||
|
||||
conditionFunc := func() (done bool, err error) {
|
||||
conditionFunc := func(ctx context.Context) (done bool, err error) {
|
||||
conn, _ := tls.DialWithDialer(dialer, "tcp", serverURL, tlsConf)
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
|
@ -167,7 +167,7 @@ func waitForServerAvailable(t *testing.T, serverURL string, timeout time.Duratio
|
|||
return false, nil
|
||||
}
|
||||
|
||||
return wait.PollImmediate(interval, timeout, conditionFunc)
|
||||
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, conditionFunc)
|
||||
}
|
||||
|
||||
func createNamespace(t *testing.T, kubeClient kubernetes.Interface, name string) error {
|
||||
|
|
|
@ -1067,7 +1067,7 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -487,7 +487,7 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -601,7 +601,7 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -715,7 +715,7 @@ func newTestResourceAdmissionController(t *testing.T) webhook.AdmissionControlle
|
|||
}
|
||||
|
||||
// Queue has async moving parts so if we check at the wrong moment, this might still be 0.
|
||||
if wait.PollImmediate(10*time.Millisecond, 250*time.Millisecond, func() (bool, error) {
|
||||
if wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 250*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
return c.WorkQueue().Len() == 1, nil
|
||||
}) != nil {
|
||||
t.Error("Queue length was never 1")
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package websocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -361,7 +362,7 @@ func TestDurableConnectionWhenConnectionBreaksDown(t *testing.T) {
|
|||
defer conn.Shutdown()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
err := wait.PollImmediate(50*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
if err := conn.Send(testPayload); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue