mirror of https://github.com/grpc/grpc-go.git
				
				
				
			balancer/leastrequest: Cache atomic load and also add concurrent rpc test (#6602)
This commit is contained in:
		
							parent
							
								
									8eb4ac4c15
								
							
						
					
					
						commit
						1e0d82e9f0
					
				| 
						 | 
				
			
			@ -22,6 +22,7 @@ import (
 | 
			
		|||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -455,3 +456,57 @@ func (s) TestLeastRequestPersistsCounts(t *testing.T) {
 | 
			
		|||
		t.Fatalf("addr count (-got:, +want): %v", diff)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestConcurrentRPCs tests concurrent RPCs on the least request balancer. It
 | 
			
		||||
// configures a channel with a least request balancer as the top level balancer,
 | 
			
		||||
// and makes 100 RPCs asynchronously. This makes sure no race conditions happen
 | 
			
		||||
// in this scenario.
 | 
			
		||||
func (s) TestConcurrentRPCs(t *testing.T) {
 | 
			
		||||
	addresses := setupBackends(t)
 | 
			
		||||
 | 
			
		||||
	mr := manual.NewBuilderWithScheme("lr-e2e")
 | 
			
		||||
	defer mr.Close()
 | 
			
		||||
 | 
			
		||||
	// Configure least request as top level balancer of channel.
 | 
			
		||||
	lrscJSON := `
 | 
			
		||||
{
 | 
			
		||||
  "loadBalancingConfig": [
 | 
			
		||||
    {
 | 
			
		||||
      "least_request_experimental": {
 | 
			
		||||
        "choiceCount": 2
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  ]
 | 
			
		||||
}`
 | 
			
		||||
	sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(lrscJSON)
 | 
			
		||||
	firstTwoAddresses := []resolver.Address{
 | 
			
		||||
		{Addr: addresses[0]},
 | 
			
		||||
		{Addr: addresses[1]},
 | 
			
		||||
	}
 | 
			
		||||
	mr.InitialState(resolver.State{
 | 
			
		||||
		Addresses:     firstTwoAddresses,
 | 
			
		||||
		ServiceConfig: sc,
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials()))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("grpc.Dial() failed: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	defer cc.Close()
 | 
			
		||||
	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	testServiceClient := testgrpc.NewTestServiceClient(cc)
 | 
			
		||||
 | 
			
		||||
	var wg sync.WaitGroup
 | 
			
		||||
	for i := 0; i < 100; i++ {
 | 
			
		||||
		wg.Add(1)
 | 
			
		||||
		go func() {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
			for j := 0; j < 5; j++ {
 | 
			
		||||
				testServiceClient.EmptyCall(ctx, &testpb.Empty{})
 | 
			
		||||
			}
 | 
			
		||||
		}()
 | 
			
		||||
	}
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -155,15 +155,14 @@ type picker struct {
 | 
			
		|||
 | 
			
		||||
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
 | 
			
		||||
	var pickedSC *scWithRPCCount
 | 
			
		||||
	var pickedSCNumRPCs int32
 | 
			
		||||
	for i := 0; i < int(p.choiceCount); i++ {
 | 
			
		||||
		index := grpcranduint32() % uint32(len(p.subConns))
 | 
			
		||||
		sc := p.subConns[index]
 | 
			
		||||
		if pickedSC == nil {
 | 
			
		||||
			pickedSC = &sc
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if sc.numRPCs.Load() < pickedSC.numRPCs.Load() {
 | 
			
		||||
		n := sc.numRPCs.Load()
 | 
			
		||||
		if pickedSC == nil || n < pickedSCNumRPCs {
 | 
			
		||||
			pickedSC = &sc
 | 
			
		||||
			pickedSCNumRPCs = n
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// "The counter for a subchannel should be atomically incremented by one
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue