mirror of https://github.com/grpc/grpc-go.git
592 lines
18 KiB
Go
592 lines
18 KiB
Go
/*
|
|
*
|
|
* Copyright 2017 gRPC authors.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
*/
|
|
|
|
package roundrobin_test
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/balancer/roundrobin"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/connectivity"
|
|
"google.golang.org/grpc/credentials/insecure"
|
|
"google.golang.org/grpc/internal/grpctest"
|
|
imetadata "google.golang.org/grpc/internal/metadata"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/peer"
|
|
"google.golang.org/grpc/resolver"
|
|
"google.golang.org/grpc/resolver/manual"
|
|
"google.golang.org/grpc/status"
|
|
testpb "google.golang.org/grpc/test/grpc_testing"
|
|
)
|
|
|
|
const (
|
|
testMDKey = "test-md"
|
|
)
|
|
|
|
type s struct {
|
|
grpctest.Tester
|
|
}
|
|
|
|
func Test(t *testing.T) {
|
|
grpctest.RunSubTests(t, s{})
|
|
}
|
|
|
|
type testServer struct {
|
|
testpb.UnimplementedTestServiceServer
|
|
|
|
testMDChan chan []string
|
|
}
|
|
|
|
func newTestServer(mdchan bool) *testServer {
|
|
t := &testServer{}
|
|
if mdchan {
|
|
t.testMDChan = make(chan []string, 1)
|
|
}
|
|
return t
|
|
}
|
|
|
|
func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
|
|
if s.testMDChan == nil {
|
|
return &testpb.Empty{}, nil
|
|
}
|
|
md, ok := metadata.FromIncomingContext(ctx)
|
|
if !ok {
|
|
return nil, status.Errorf(codes.Internal, "no metadata in context")
|
|
}
|
|
select {
|
|
case s.testMDChan <- md[testMDKey]:
|
|
case <-ctx.Done():
|
|
return nil, ctx.Err()
|
|
}
|
|
return &testpb.Empty{}, nil
|
|
}
|
|
|
|
func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
|
|
return nil
|
|
}
|
|
|
|
type test struct {
|
|
servers []*grpc.Server
|
|
serverImpls []*testServer
|
|
addresses []string
|
|
}
|
|
|
|
func (t *test) cleanup() {
|
|
for _, s := range t.servers {
|
|
s.Stop()
|
|
}
|
|
}
|
|
|
|
func startTestServers(count int, mdchan bool) (_ *test, err error) {
|
|
t := &test{}
|
|
|
|
defer func() {
|
|
if err != nil {
|
|
t.cleanup()
|
|
}
|
|
}()
|
|
for i := 0; i < count; i++ {
|
|
lis, err := net.Listen("tcp", "localhost:0")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to listen %v", err)
|
|
}
|
|
|
|
s := grpc.NewServer()
|
|
sImpl := newTestServer(mdchan)
|
|
testpb.RegisterTestServiceServer(s, sImpl)
|
|
t.servers = append(t.servers, s)
|
|
t.serverImpls = append(t.serverImpls, sImpl)
|
|
t.addresses = append(t.addresses, lis.Addr().String())
|
|
|
|
go func(s *grpc.Server, l net.Listener) {
|
|
s.Serve(l)
|
|
}(s, lis)
|
|
}
|
|
|
|
return t, nil
|
|
}
|
|
|
|
func (s) TestOneBackend(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
test, err := startTestServers(1, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}})
|
|
// The second RPC should succeed.
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
}
|
|
|
|
func (s) TestBackendsRoundRobin(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
backendCount := 5
|
|
test, err := startTestServers(backendCount, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
var resolvedAddrs []resolver.Address
|
|
for i := 0; i < backendCount; i++ {
|
|
resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]})
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: resolvedAddrs})
|
|
var p peer.Peer
|
|
// Make sure connections to all servers are up.
|
|
for si := 0; si < backendCount; si++ {
|
|
var connected bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() == test.addresses[si] {
|
|
connected = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !connected {
|
|
t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si])
|
|
}
|
|
}
|
|
|
|
for i := 0; i < 3*backendCount; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() != test.addresses[i%backendCount] {
|
|
t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String())
|
|
}
|
|
}
|
|
}
|
|
|
|
func (s) TestAddressesRemoved(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
test, err := startTestServers(1, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}})
|
|
// The second RPC should succeed.
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{}})
|
|
|
|
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
|
defer cancel2()
|
|
// Wait for state to change to transient failure.
|
|
for src := cc.GetState(); src != connectivity.TransientFailure; src = cc.GetState() {
|
|
if !cc.WaitForStateChange(ctx2, src) {
|
|
t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.TransientFailure)
|
|
}
|
|
}
|
|
|
|
const msgWant = "produced zero addresses"
|
|
if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, Contains(Message(), %q)", err, msgWant)
|
|
}
|
|
}
|
|
|
|
func (s) TestCloseWithPendingRPC(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
test, err := startTestServers(1, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < 3; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
// This RPC blocks until cc is closed.
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded {
|
|
t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing")
|
|
}
|
|
cancel()
|
|
}()
|
|
}
|
|
cc.Close()
|
|
wg.Wait()
|
|
}
|
|
|
|
func (s) TestNewAddressWhileBlocking(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
test, err := startTestServers(1, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}})
|
|
// The second RPC should succeed.
|
|
ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, nil", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{}})
|
|
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < 3; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
// This RPC blocks until NewAddress is called.
|
|
testc.EmptyCall(context.Background(), &testpb.Empty{})
|
|
}()
|
|
}
|
|
time.Sleep(50 * time.Millisecond)
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}})
|
|
wg.Wait()
|
|
}
|
|
|
|
func (s) TestOneServerDown(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
backendCount := 3
|
|
test, err := startTestServers(backendCount, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
var resolvedAddrs []resolver.Address
|
|
for i := 0; i < backendCount; i++ {
|
|
resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]})
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: resolvedAddrs})
|
|
var p peer.Peer
|
|
// Make sure connections to all servers are up.
|
|
for si := 0; si < backendCount; si++ {
|
|
var connected bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() == test.addresses[si] {
|
|
connected = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !connected {
|
|
t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si])
|
|
}
|
|
}
|
|
|
|
for i := 0; i < 3*backendCount; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() != test.addresses[i%backendCount] {
|
|
t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String())
|
|
}
|
|
}
|
|
|
|
// Stop one server, RPCs should roundrobin among the remaining servers.
|
|
backendCount--
|
|
test.servers[backendCount].Stop()
|
|
// Loop until see server[backendCount-1] twice without seeing server[backendCount].
|
|
var targetSeen int
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
targetSeen = 0
|
|
t.Logf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
// Due to a race, this RPC could possibly get the connection that
|
|
// was closing, and this RPC may fail. Keep trying when this
|
|
// happens.
|
|
continue
|
|
}
|
|
switch p.Addr.String() {
|
|
case test.addresses[backendCount-1]:
|
|
targetSeen++
|
|
case test.addresses[backendCount]:
|
|
// Reset targetSeen if peer is server[backendCount].
|
|
targetSeen = 0
|
|
}
|
|
// Break to make sure the last picked address is server[-1], so the following for loop won't be flaky.
|
|
if targetSeen >= 2 {
|
|
break
|
|
}
|
|
}
|
|
if targetSeen != 2 {
|
|
t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]")
|
|
}
|
|
for i := 0; i < 3*backendCount; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() != test.addresses[i%backendCount] {
|
|
t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String())
|
|
}
|
|
}
|
|
}
|
|
|
|
func (s) TestAllServersDown(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
backendCount := 3
|
|
test, err := startTestServers(backendCount, false)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
// The first RPC should fail because there's no address.
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
defer cancel()
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
var resolvedAddrs []resolver.Address
|
|
for i := 0; i < backendCount; i++ {
|
|
resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]})
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: resolvedAddrs})
|
|
var p peer.Peer
|
|
// Make sure connections to all servers are up.
|
|
for si := 0; si < backendCount; si++ {
|
|
var connected bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() == test.addresses[si] {
|
|
connected = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !connected {
|
|
t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si])
|
|
}
|
|
}
|
|
|
|
for i := 0; i < 3*backendCount; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() != test.addresses[i%backendCount] {
|
|
t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String())
|
|
}
|
|
}
|
|
|
|
// All servers are stopped, failfast RPC should fail with unavailable.
|
|
for i := 0; i < backendCount; i++ {
|
|
test.servers[i].Stop()
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable {
|
|
return
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped")
|
|
}
|
|
|
|
func (s) TestUpdateAddressAttributes(t *testing.T) {
|
|
r := manual.NewBuilderWithScheme("whatever")
|
|
|
|
test, err := startTestServers(1, true)
|
|
if err != nil {
|
|
t.Fatalf("failed to start servers: %v", err)
|
|
}
|
|
defer test.cleanup()
|
|
|
|
cc, err := grpc.Dial(r.Scheme()+":///test.server",
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
grpc.WithResolvers(r),
|
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)))
|
|
if err != nil {
|
|
t.Fatalf("failed to dial: %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testc := testpb.NewTestServiceClient(cc)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
// The first RPC should fail because there's no address.
|
|
ctxShort, cancel2 := context.WithTimeout(ctx, time.Millisecond)
|
|
defer cancel2()
|
|
if _, err := testc.EmptyCall(ctxShort, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}})
|
|
// The second RPC should succeed.
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
// The second RPC should not set metadata, so there's no md in the channel.
|
|
md1 := <-test.serverImpls[0].testMDChan
|
|
if md1 != nil {
|
|
t.Fatalf("got md: %v, want empty metadata", md1)
|
|
}
|
|
|
|
const testMDValue = "test-md-value"
|
|
// Update metadata in address.
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{
|
|
imetadata.Set(resolver.Address{Addr: test.addresses[0]}, metadata.Pairs(testMDKey, testMDValue)),
|
|
}})
|
|
|
|
// A future RPC should send metadata with it. The update doesn't
|
|
// necessarily happen synchronously, so we wait some time before failing if
|
|
// some RPCs do not contain it.
|
|
for {
|
|
if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
|
|
if status.Code(err) == codes.DeadlineExceeded {
|
|
t.Fatalf("timed out waiting for metadata in response")
|
|
}
|
|
t.Fatalf("EmptyCall() = _, %v, want _, <nil>", err)
|
|
}
|
|
md2 := <-test.serverImpls[0].testMDChan
|
|
if len(md2) == 1 && md2[0] == testMDValue {
|
|
return
|
|
}
|
|
time.Sleep(10 * time.Millisecond)
|
|
}
|
|
}
|