mirror of https://github.com/grpc/grpc-go.git
test: clean up deadlines set in tests (#6506)
This commit is contained in:
parent
9c46304ff1
commit
4832debdaa
|
@ -29,7 +29,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -84,7 +83,7 @@ func runUnixTest(t *testing.T, address, target, expectedAuthority string, dialer
|
|||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
if err != nil {
|
||||
|
@ -202,7 +201,7 @@ func (s) TestColonPortAuthority(t *testing.T) {
|
|||
t.Fatalf("grpc.Dial(%q) = %v", ss.Target, err)
|
||||
}
|
||||
defer cc.Close()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
_, err = testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{})
|
||||
if err != nil {
|
||||
|
|
|
@ -170,7 +170,9 @@ func (s) TestCredsBundleFromBalancer(t *testing.T) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Test failed. Reason: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -244,7 +246,7 @@ func testDoneInfo(t *testing.T, e env) {
|
|||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
wantErr := detailedError
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
|
||||
|
@ -321,7 +323,7 @@ func testDoneLoads(t *testing.T) {
|
|||
|
||||
tc := testgrpc.NewTestServiceClient(ss.CC)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil)
|
||||
|
@ -438,7 +440,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) {
|
|||
t.Log("Created a ClientConn...")
|
||||
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
|
@ -450,7 +452,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) {
|
|||
t.Logf("Pushing resolver state update: %v through the manual resolver", state)
|
||||
|
||||
// The second RPC should succeed.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
||||
|
@ -519,7 +521,7 @@ func (s) TestMetadataInAddressAttributes(t *testing.T) {
|
|||
defer ss.Stop()
|
||||
|
||||
// The RPC should succeed with the expected md.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
||||
|
@ -536,7 +538,7 @@ func (s) TestMetadataInAddressAttributes(t *testing.T) {
|
|||
// TestServersSwap creates two servers and verifies the client switches between
|
||||
// them when the name resolver reports the first and then the second.
|
||||
func (s) TestServersSwap(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Initialize servers
|
||||
|
@ -592,7 +594,7 @@ func (s) TestServersSwap(t *testing.T) {
|
|||
}
|
||||
|
||||
func (s) TestWaitForReady(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Initialize server
|
||||
|
@ -1035,7 +1037,7 @@ func (s) TestBalancerProducerHonorsContext(t *testing.T) {
|
|||
// rpcErrChan is given to the LB policy to report the status of the
|
||||
// producer's one RPC.
|
||||
ctxChan := make(chan context.Context, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
ctxChan <- ctx
|
||||
|
||||
rpcErrChan := make(chan error)
|
||||
|
|
|
@ -523,7 +523,9 @@ func (s) TestCZChannelMetrics(t *testing.T) {
|
|||
cc := te.clientConn(grpc.WithResolvers(r))
|
||||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
|
||||
|
@ -540,11 +542,11 @@ func (s) TestCZChannelMetrics(t *testing.T) {
|
|||
Payload: largePayload,
|
||||
}
|
||||
|
||||
if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
|
||||
if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
|
||||
}
|
||||
|
||||
stream, err := tc.FullDuplexCall(context.Background())
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
}
|
||||
|
@ -603,7 +605,9 @@ func (s) TestCZServerMetrics(t *testing.T) {
|
|||
defer te.tearDown()
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
|
||||
|
@ -619,11 +623,11 @@ func (s) TestCZServerMetrics(t *testing.T) {
|
|||
ResponseSize: int32(smallSize),
|
||||
Payload: largePayload,
|
||||
}
|
||||
if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
|
||||
if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
|
||||
}
|
||||
|
||||
stream, err := tc.FullDuplexCall(context.Background())
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
}
|
||||
|
@ -746,7 +750,7 @@ func doServerSideFailedUnaryCall(tc testgrpc.TestServiceClient, t *testing.T) {
|
|||
}
|
||||
|
||||
func doClientSideInitiatedFailedStream(tc testgrpc.TestServiceClient, t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
|
@ -779,7 +783,9 @@ func doClientSideInitiatedFailedStream(tc testgrpc.TestServiceClient, t *testing
|
|||
|
||||
// This func is to be used to test client side counting of failed streams.
|
||||
func doServerSideInitiatedFailedStreamWithRSTStream(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) {
|
||||
stream, err := tc.FullDuplexCall(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
}
|
||||
|
@ -816,10 +822,10 @@ func doServerSideInitiatedFailedStreamWithRSTStream(tc testgrpc.TestServiceClien
|
|||
}
|
||||
|
||||
// this func is to be used to test client side counting of failed streams.
|
||||
func doServerSideInitiatedFailedStreamWithGoAway(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) {
|
||||
func doServerSideInitiatedFailedStreamWithGoAway(ctx context.Context, tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) {
|
||||
// This call is just to keep the transport from shutting down (socket will be deleted
|
||||
// in this case, and we will not be able to get metrics).
|
||||
s, err := tc.FullDuplexCall(context.Background())
|
||||
s, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
}
|
||||
|
@ -834,7 +840,7 @@ func doServerSideInitiatedFailedStreamWithGoAway(tc testgrpc.TestServiceClient,
|
|||
t.Fatalf("s.Recv() failed with error: %v", err)
|
||||
}
|
||||
|
||||
s, err = tc.FullDuplexCall(context.Background())
|
||||
s, err = tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
}
|
||||
|
@ -859,7 +865,7 @@ func doServerSideInitiatedFailedStreamWithGoAway(tc testgrpc.TestServiceClient,
|
|||
}
|
||||
|
||||
func doIdleCallToInvokeKeepAlive(tc testgrpc.TestServiceClient, t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
_, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
|
@ -870,6 +876,9 @@ func doIdleCallToInvokeKeepAlive(tc testgrpc.TestServiceClient, t *testing.T) {
|
|||
}
|
||||
|
||||
func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
czCleanup := channelz.NewChannelzStorageForTesting()
|
||||
defer czCleanupWrapper(czCleanup, t)
|
||||
e := tcpClearRREnv
|
||||
|
@ -951,7 +960,7 @@ func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
doServerSideInitiatedFailedStreamWithGoAway(tc, t, rcw)
|
||||
doServerSideInitiatedFailedStreamWithGoAway(ctx, tc, t, rcw)
|
||||
if err := verifyResultWithDelay(func() (bool, error) {
|
||||
skt := channelz.GetSocket(skID)
|
||||
sktData := skt.SocketData
|
||||
|
@ -988,7 +997,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t
|
|||
cc, dw := te.clientConnWithConnControl()
|
||||
tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
|
||||
|
@ -1534,7 +1543,7 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
awaitState(ctx, t, te.cc, connectivity.Ready)
|
||||
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}})
|
||||
|
@ -1694,7 +1703,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// make sure the connection is up
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
|
@ -1707,9 +1716,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) {
|
|||
defer close(done)
|
||||
go func() {
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
tc.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
select {
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-done:
|
||||
|
@ -1763,7 +1770,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// make sure the connection is up
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
|
@ -1862,7 +1869,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// make sure the connection is up
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
|
@ -2010,7 +2017,7 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
awaitState(ctx, t, te.cc, connectivity.Ready)
|
||||
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}})
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -109,13 +108,13 @@ func testCompressOK(t *testing.T, e env) {
|
|||
ResponseSize: respSize,
|
||||
Payload: payload,
|
||||
}
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("something", "something"))
|
||||
if _, err := tc.UnaryCall(ctx, req); err != nil {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
// Streaming RPC
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -168,13 +167,13 @@ func testIdentityEncoding(t *testing.T, e env) {
|
|||
ResponseSize: 10,
|
||||
Payload: payload,
|
||||
}
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("something", "something"))
|
||||
if _, err := tc.UnaryCall(ctx, req); err != nil {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
// Streaming RPC
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity"))
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -543,6 +542,9 @@ func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
func (s) TestClientSupportedCompressors(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, tt := range []struct {
|
||||
desc string
|
||||
ctx context.Context
|
||||
|
@ -550,12 +552,12 @@ func (s) TestClientSupportedCompressors(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
desc: "No additional grpc-accept-encoding header",
|
||||
ctx: context.Background(),
|
||||
ctx: ctx,
|
||||
want: []string{"gzip"},
|
||||
},
|
||||
{
|
||||
desc: "With additional grpc-accept-encoding header",
|
||||
ctx: metadata.AppendToOutgoingContext(context.Background(),
|
||||
ctx: metadata.AppendToOutgoingContext(ctx,
|
||||
"grpc-accept-encoding", "test-compressor-1",
|
||||
"grpc-accept-encoding", "test-compressor-2",
|
||||
),
|
||||
|
@ -563,7 +565,7 @@ func (s) TestClientSupportedCompressors(t *testing.T) {
|
|||
},
|
||||
{
|
||||
desc: "With additional empty grpc-accept-encoding header",
|
||||
ctx: metadata.AppendToOutgoingContext(context.Background(),
|
||||
ctx: metadata.AppendToOutgoingContext(ctx,
|
||||
"grpc-accept-encoding", "",
|
||||
),
|
||||
want: []string{"gzip"},
|
||||
|
@ -589,10 +591,7 @@ func (s) TestClientSupportedCompressors(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(tt.ctx, defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
_, err := ss.Client.EmptyCall(tt.ctx, &testpb.Empty{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected unary call error, got: %v, want: nil", err)
|
||||
}
|
||||
|
@ -628,13 +627,13 @@ func testCompressorRegister(t *testing.T, e env) {
|
|||
ResponseSize: respSize,
|
||||
Payload: payload,
|
||||
}
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("something", "something"))
|
||||
if _, err := tc.UnaryCall(ctx, req); err != nil {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
// Streaming RPC
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -693,7 +692,7 @@ func (s) TestGzipBadChecksum(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024))
|
||||
|
|
|
@ -62,12 +62,14 @@ func (s) TestConfigSelector(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctxDeadline := time.Now().Add(10 * time.Second)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), ctxDeadline)
|
||||
const normalTimeout = 10 * time.Second
|
||||
ctxDeadline := time.Now().Add(normalTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), normalTimeout)
|
||||
defer cancel()
|
||||
|
||||
longCtxDeadline := time.Now().Add(30 * time.Second)
|
||||
longdeadlineCtx, cancel := context.WithDeadline(context.Background(), longCtxDeadline)
|
||||
const longTimeout = 30 * time.Second
|
||||
longCtxDeadline := time.Now().Add(longTimeout)
|
||||
longdeadlineCtx, cancel := context.WithTimeout(context.Background(), longTimeout)
|
||||
defer cancel()
|
||||
shorterTimeout := 3 * time.Second
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ func (s) TestCancelWhileRecvingWithCompression(t *testing.T) {
|
|||
defer ss.Stop()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
s, err := ss.Client.FullDuplexCall(ctx, grpc.UseCompressor(gzip.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start bidi streaming RPC: %v", err)
|
||||
|
|
|
@ -567,7 +567,7 @@ func newTest(t *testing.T, e env) *test {
|
|||
e: e,
|
||||
maxStream: math.MaxUint32,
|
||||
}
|
||||
te.ctx, te.cancel = context.WithCancel(context.Background())
|
||||
te.ctx, te.cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
return te
|
||||
}
|
||||
|
||||
|
@ -689,7 +689,9 @@ type wrapHS struct {
|
|||
}
|
||||
|
||||
func (w wrapHS) GracefulStop() {
|
||||
w.s.Shutdown(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
w.s.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (w wrapHS) Stop() {
|
||||
|
@ -950,7 +952,7 @@ func (s) TestContextDeadlineNotIgnored(t *testing.T) {
|
|||
}
|
||||
cancel()
|
||||
atomic.StoreInt32(&(lc.beLazy), 1)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
t1 := time.Now()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
||||
|
@ -985,10 +987,9 @@ func testTimeoutOnDeadServer(t *testing.T, e env) {
|
|||
awaitState(ctx, t, cc, connectivity.Ready)
|
||||
te.srv.Stop()
|
||||
awaitNotState(ctx, t, cc, connectivity.Ready)
|
||||
ctx, cancel = context.WithTimeout(ctx, 5*time.Millisecond)
|
||||
_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
|
||||
cancel()
|
||||
if status.Code(err) != codes.DeadlineExceeded {
|
||||
ctx, cancel = context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
|
||||
}
|
||||
awaitNewConnLogOutput()
|
||||
|
@ -1338,13 +1339,13 @@ func (s) TestServiceConfigTimeout(t *testing.T) {
|
|||
|
||||
// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
|
||||
var err error
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
|
@ -1381,17 +1382,15 @@ func (s) TestServiceConfigTimeout(t *testing.T) {
|
|||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
|
||||
if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
|
||||
|
@ -1686,7 +1685,7 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
|
|||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
|
@ -1814,7 +1813,7 @@ func (s) TestPreloaderSenderSend(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
|
@ -2195,7 +2194,9 @@ func testFailedEmptyUnary(t *testing.T, e env) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(te.clientConn())
|
||||
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
wantErr := detailedError
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
|
||||
|
@ -2380,7 +2381,7 @@ func testPeerNegative(t *testing.T, e env) {
|
|||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
peer := new(peer.Peer)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
cancel()
|
||||
tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
|
||||
}
|
||||
|
@ -2468,7 +2469,9 @@ func testMetadataUnaryRPC(t *testing.T, e env) {
|
|||
Payload: payload,
|
||||
}
|
||||
var header, trailer metadata.MD
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
|
||||
}
|
||||
|
@ -2500,7 +2503,9 @@ func testMetadataOrderUnaryRPC(t *testing.T, e env) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(te.clientConn())
|
||||
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
|
||||
ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
|
||||
|
||||
|
@ -2553,7 +2558,9 @@ func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
|
|||
Payload: payload,
|
||||
}
|
||||
var trailer metadata.MD
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
|
||||
}
|
||||
|
@ -2575,7 +2582,9 @@ func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(te.clientConn())
|
||||
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -2625,7 +2634,9 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
|
|||
Payload: payload,
|
||||
}
|
||||
var header metadata.MD
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
|
||||
}
|
||||
|
@ -2671,7 +2682,9 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
|
|||
}
|
||||
|
||||
var header metadata.MD
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
|
||||
}
|
||||
|
@ -2715,7 +2728,9 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
|
|||
Payload: payload,
|
||||
}
|
||||
var header metadata.MD
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
|
||||
}
|
||||
|
@ -2744,7 +2759,9 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(te.clientConn())
|
||||
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -2789,7 +2806,9 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
|
|||
argSize = 1
|
||||
respSize = 1
|
||||
)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -2854,7 +2873,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
|
|||
argSize = 1
|
||||
respSize = -1
|
||||
)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
|
@ -2926,7 +2945,9 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) {
|
|||
ResponseSize: 314,
|
||||
Payload: payload,
|
||||
}
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, malformedHTTP2Metadata)
|
||||
if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
|
||||
}
|
||||
|
@ -2985,7 +3006,7 @@ func (s) TestTransparentRetry(t *testing.T) {
|
|||
}
|
||||
defer cc.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
client := testgrpc.NewTestServiceClient(cc)
|
||||
|
@ -3036,7 +3057,7 @@ func testCancel(t *testing.T, e env) {
|
|||
ResponseSize: respSize,
|
||||
Payload: payload,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
time.AfterFunc(1*time.Millisecond, cancel)
|
||||
if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
|
||||
t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
|
||||
|
@ -3063,7 +3084,7 @@ func testCancelNoIO(t *testing.T, e env) {
|
|||
// Start one blocked RPC for which we'll never send streaming
|
||||
// input. This will consume the 1 maximum concurrent streams,
|
||||
// causing future RPCs to hang.
|
||||
ctx, cancelFirst := context.WithCancel(context.Background())
|
||||
ctx, cancelFirst := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
_, err := tc.StreamingInputCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
|
||||
|
@ -3076,7 +3097,7 @@ func testCancelNoIO(t *testing.T, e env) {
|
|||
// succeeding.
|
||||
// TODO(bradfitz): add internal test hook for this (Issue 534)
|
||||
for {
|
||||
ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
ctx, cancelSecond := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
_, err := tc.StreamingInputCall(ctx)
|
||||
cancelSecond()
|
||||
if err == nil {
|
||||
|
@ -3098,7 +3119,7 @@ func testCancelNoIO(t *testing.T, e env) {
|
|||
}()
|
||||
|
||||
// This should be blocked until the 1st is canceled, then succeed.
|
||||
ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
ctx, cancelThird := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.StreamingInputCall(ctx); err != nil {
|
||||
t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
|
||||
}
|
||||
|
@ -3485,7 +3506,7 @@ func testClientStreaming(t *testing.T, e env, sizes []int) {
|
|||
defer te.tearDown()
|
||||
tc := testgrpc.NewTestServiceClient(te.clientConn())
|
||||
|
||||
ctx, cancel := context.WithTimeout(te.ctx, time.Second*30)
|
||||
ctx, cancel := context.WithTimeout(te.ctx, defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := tc.StreamingInputCall(ctx)
|
||||
if err != nil {
|
||||
|
@ -3584,7 +3605,7 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) {
|
|||
}
|
||||
// Loop until receiving the new max stream setting from the server.
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
_, err := tc.StreamingInputCall(ctx)
|
||||
if err == nil {
|
||||
|
@ -3617,14 +3638,14 @@ func testStreamsQuotaRecovery(t *testing.T, e env) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.StreamingInputCall(ctx); err != nil {
|
||||
t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
// Loop until the new max stream setting is effective.
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
_, err := tc.StreamingInputCall(ctx)
|
||||
cancel()
|
||||
if err == nil {
|
||||
|
@ -3653,7 +3674,7 @@ func testStreamsQuotaRecovery(t *testing.T, e env) {
|
|||
Payload: payload,
|
||||
}
|
||||
// No rpc should go through due to the max streams limit.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
|
@ -3664,7 +3685,7 @@ func testStreamsQuotaRecovery(t *testing.T, e env) {
|
|||
|
||||
cancel()
|
||||
// A new stream should be allowed after canceling the first one.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.StreamingInputCall(ctx); err != nil {
|
||||
t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
|
||||
|
@ -4173,7 +4194,7 @@ func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
|
|||
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// This unary call should fail, but not timeout.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
|
||||
|
@ -4189,8 +4210,6 @@ func (s) TestFlowControlLogicalRace(t *testing.T) {
|
|||
itemSize = 1 << 10
|
||||
recvCount = 2
|
||||
maxFailures = 3
|
||||
|
||||
requestTimeout = time.Second * 5
|
||||
)
|
||||
|
||||
requestCount := 10000
|
||||
|
@ -4222,7 +4241,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) {
|
|||
|
||||
failures := 0
|
||||
for i := 0; i < requestCount; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("StreamingOutputCall; err = %q", err)
|
||||
|
@ -4544,7 +4563,7 @@ func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
|
|||
}
|
||||
defer proxy.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
md := metadata.Pairs(mdkey, "val")
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
|
@ -4608,7 +4627,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
|
|||
}
|
||||
defer proxy.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
md := metadata.Pairs(mdkey, "val")
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
|
@ -4654,7 +4673,7 @@ func (s) TestStatsTagsAndTrace(t *testing.T) {
|
|||
}
|
||||
defer endpoint.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -4705,7 +4724,7 @@ func (s) TestTapTimeout(t *testing.T) {
|
|||
// This was known to be flaky; test several times.
|
||||
for i := 0; i < 10; i++ {
|
||||
// Set our own deadline in case the server hangs.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
|
||||
|
@ -4726,7 +4745,7 @@ func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
|
|||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
|
@ -5156,7 +5175,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
|
|||
}
|
||||
defer cc.Close()
|
||||
c := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Failed to send test RPC to server: %v", err)
|
||||
|
@ -5192,7 +5211,7 @@ func (s) TestStatusInvalidUTF8Message(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
|
||||
|
@ -5226,7 +5245,7 @@ func (s) TestStatusInvalidUTF8Details(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
|
@ -5324,7 +5343,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) {
|
|||
s.Serve(lis)
|
||||
}()
|
||||
defer s.Stop()
|
||||
dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
dctx, dcancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer dcancel()
|
||||
cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
|
||||
if err != nil {
|
||||
|
@ -5332,7 +5351,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) {
|
|||
}
|
||||
defer cc.Close()
|
||||
c := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
|
@ -5374,7 +5393,7 @@ func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
|
||||
var err error
|
||||
|
@ -5406,7 +5425,7 @@ func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
var err error
|
||||
if err = verifyResultWithDelay(func() (bool, error) {
|
||||
|
@ -5437,7 +5456,7 @@ func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env)
|
|||
|
||||
cc, dw := te.clientConnWithConnControl()
|
||||
tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
|
@ -5478,7 +5497,7 @@ func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env)
|
|||
defer te.tearDown()
|
||||
cc, _ := te.clientConnWithConnControl()
|
||||
tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
|
@ -5526,7 +5545,7 @@ func (s) TestNetPipeConn(t *testing.T) {
|
|||
}}
|
||||
testgrpc.RegisterTestServiceServer(s, ts)
|
||||
go s.Serve(pl)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDialer(pl.Dialer()))
|
||||
if err != nil {
|
||||
|
@ -5609,14 +5628,14 @@ func (s) TestRPCWaitsForResolver(t *testing.T) {
|
|||
cc := te.clientConn(grpc.WithResolvers(r))
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer cancel()
|
||||
// With no resolved addresses yet, this will timeout.
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
go func() {
|
||||
time.Sleep(time.Second)
|
||||
|
@ -5800,7 +5819,7 @@ func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
@ -5843,7 +5862,7 @@ func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
|
|||
defer ss.Stop()
|
||||
|
||||
const count = 1000
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -6265,7 +6284,7 @@ func (s) TestRecvWhileReturningStatus(t *testing.T) {
|
|||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
for i := 0; i < 100; i++ {
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
|
|
|
@ -79,13 +79,14 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) {
|
|||
defer cc.Close()
|
||||
c := testgrpc.NewTestServiceClient(cc)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
endTime := time.Now().Add(testTime)
|
||||
for time.Now().Before(endTime) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("EmptyCall(_, _) = _, %v; want _, <nil>", err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,7 +185,7 @@ func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) {
|
|||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.FullDuplexCall(ctx); err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
|
||||
|
@ -222,7 +223,7 @@ func testServerGoAway(t *testing.T, e env) {
|
|||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// Finish an RPC to make sure the connection is good.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
||||
|
@ -234,7 +235,7 @@ func testServerGoAway(t *testing.T, e env) {
|
|||
}()
|
||||
// Loop until the server side GoAway signal is propagated to the client.
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded {
|
||||
cancel()
|
||||
break
|
||||
|
@ -242,7 +243,7 @@ func testServerGoAway(t *testing.T, e env) {
|
|||
cancel()
|
||||
}
|
||||
// A new RPC should fail.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal)
|
||||
|
@ -273,7 +274,7 @@ func testServerGoAwayPendingRPC(t *testing.T, e env) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -291,7 +292,7 @@ func testServerGoAwayPendingRPC(t *testing.T, e env) {
|
|||
start := time.Now()
|
||||
errored := false
|
||||
for time.Since(start) < time.Second {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@ -347,7 +348,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
|
|||
|
||||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
|
@ -369,7 +370,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
|
|||
// Loop until the server side GoAway signal is propagated to the client.
|
||||
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
||||
cancel()
|
||||
break
|
||||
|
@ -494,7 +495,7 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) {
|
|||
}()
|
||||
// Loop until the server side GoAway signal is propagated to the client.
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
||||
cancel()
|
||||
break
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -148,7 +147,7 @@ func (s) TestGracefulStop(t *testing.T) {
|
|||
|
||||
// Now dial. The listener's Accept method will return a valid connection,
|
||||
// even though GracefulStop has closed the listener.
|
||||
ctx, dialCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, dialCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer dialCancel()
|
||||
cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(d))
|
||||
if err != nil {
|
||||
|
@ -160,7 +159,7 @@ func (s) TestGracefulStop(t *testing.T) {
|
|||
// 4. Send an RPC on the new connection.
|
||||
// The server would send a GOAWAY first, but we are delaying the server's
|
||||
// writes for now until the client writes more than the preface.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
if _, err = client.FullDuplexCall(ctx); err == nil || status.Code(err) != codes.Unavailable {
|
||||
t.Fatalf("FullDuplexCall= _, %v; want _, <status code Unavailable>", err)
|
||||
}
|
||||
|
|
|
@ -890,7 +890,7 @@ func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn
|
|||
// RPC, and returns the stream.
|
||||
func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service})
|
||||
if err != nil {
|
||||
|
|
|
@ -122,7 +122,7 @@ func (s) TestInvokeCancel(t *testing.T) {
|
|||
defer ss.Stop()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
cancel()
|
||||
ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{})
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) {
|
|||
defer ss.Stop()
|
||||
|
||||
ss.CC.Close()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
cancel()
|
||||
if err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}, grpc.WaitForReady(true)); err == nil {
|
||||
t.Fatal("ClientConn.Invoke() on closed connection succeeded when expected to fail")
|
||||
|
|
|
@ -103,7 +103,7 @@ func testLocalCredsE2ESucceed(network, address string) error {
|
|||
defer cc.Close()
|
||||
|
||||
c := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err = c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
||||
|
@ -198,7 +198,7 @@ func testLocalCredsE2EFail(dopts []grpc.DialOption) error {
|
|||
defer cc.Close()
|
||||
|
||||
c := testgrpc.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = c.EmptyCall(ctx, &testpb.Empty{})
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
|
@ -125,7 +124,7 @@ func (s) TestInvalidMetadata(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run("unary "+test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, test.md)
|
||||
ctx = metadata.AppendToOutgoingContext(ctx, test.appendMD...)
|
||||
|
@ -138,11 +137,11 @@ func (s) TestInvalidMetadata(t *testing.T) {
|
|||
// call the stream server's api to drive the server-side unit testing
|
||||
for _, test := range tests {
|
||||
t.Run("streaming "+test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("call ss.Client.FullDuplexCall(context.Background()) will success but got err :%v", err)
|
||||
t.Errorf("call ss.Client.FullDuplexCall got err :%v", err)
|
||||
return
|
||||
}
|
||||
if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
|
@ -57,7 +56,7 @@ func (s) TestRecvBufferPool(t *testing.T) {
|
|||
}
|
||||
defer ss.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
|
|
|
@ -90,7 +90,7 @@ func (s) TestRetryUnary(t *testing.T) {
|
|||
}
|
||||
for num, tc := range testCases {
|
||||
t.Log("Case", num)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if status.Code(err) != tc.code {
|
||||
|
@ -154,7 +154,7 @@ func (s) TestRetryThrottling(t *testing.T) {
|
|||
{codes.Unavailable, 17}, // tokens = 4.5
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if status.Code(err) != tc.code {
|
||||
|
@ -429,7 +429,8 @@ func (s) TestRetryStreaming(t *testing.T) {
|
|||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatalf("Timed out waiting for service config update")
|
||||
|
@ -439,15 +440,12 @@ func (s) TestRetryStreaming(t *testing.T) {
|
|||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
cancel()
|
||||
|
||||
for _, tc := range testCases {
|
||||
func() {
|
||||
serverOpIter = 0
|
||||
serverOps = tc.serverOps
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: Error while creating stream: %v", tc.desc, err)
|
||||
|
|
|
@ -225,7 +225,8 @@ func (s) TestRoundRobin_AllServersDown(t *testing.T) {
|
|||
|
||||
// Failfast RPCs should fail with Unavailable.
|
||||
client := testgrpc.NewTestServiceClient(cc)
|
||||
if _, err := client.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable {
|
||||
|
||||
if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
|
||||
t.Fatalf("EmptyCall got err: %v; want Unavailable", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,12 +192,12 @@ func testServiceConfigTimeoutTD(t *testing.T, e env) {
|
|||
cc := te.clientConn()
|
||||
tc := testgrpc.NewTestServiceClient(cc)
|
||||
// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
|
@ -227,17 +227,15 @@ func testServiceConfigTimeoutTD(t *testing.T, e env) {
|
|||
t.Fatalf("Timeout when waiting for service config to take effect")
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
|
||||
if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) {
|
||||
|
|
|
@ -91,7 +91,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) {
|
|||
|
||||
// 1. Make a long living stream RPC. So server's activeStream list is not
|
||||
// empty.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
stream, err := ss.Client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer"
|
||||
|
@ -46,7 +45,7 @@ func (p *tsccPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
|||
// TestSubConnEmpty tests that removing all addresses from a SubConn and then
|
||||
// re-adding them does not cause a panic and properly reconnects.
|
||||
func (s) TestSubConnEmpty(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// sc is the one SubConn used throughout the test. Created on demand and
|
||||
|
|
Loading…
Reference in New Issue