mirror of https://github.com/grpc/grpc-go.git
circuit breaking: keep max_count per picker, instead of globally, and add support in cluster_impl balancer (#4203)
Also changed circuit breaking counter implementation to move max_count into the picker, because this is how cluster_impl is designed. Implementation in EDS is also modified to keep max_count in picker.
This commit is contained in:
parent
425d405f39
commit
1b75f7144d
|
|
@ -34,7 +34,6 @@ import (
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/edsbalancer"
|
"google.golang.org/grpc/xds/internal/balancer/edsbalancer"
|
||||||
"google.golang.org/grpc/xds/internal/client"
|
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
"google.golang.org/grpc/xds/internal/client/bootstrap"
|
||||||
)
|
)
|
||||||
|
|
@ -328,8 +327,6 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
client.SetMaxRequests(update.cds.ServiceName, update.cds.MaxRequests)
|
|
||||||
|
|
||||||
// The first good update from the watch API leads to the instantiation of an
|
// The first good update from the watch API leads to the instantiation of an
|
||||||
// edsBalancer. Further updates/errors are propagated to the existing
|
// edsBalancer. Further updates/errors are propagated to the existing
|
||||||
// edsBalancer.
|
// edsBalancer.
|
||||||
|
|
@ -342,7 +339,10 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) {
|
||||||
b.edsLB = edsLB
|
b.edsLB = edsLB
|
||||||
b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName)
|
b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName)
|
||||||
}
|
}
|
||||||
lbCfg := &edsbalancer.EDSConfig{EDSServiceName: update.cds.ServiceName}
|
lbCfg := &edsbalancer.EDSConfig{
|
||||||
|
EDSServiceName: update.cds.ServiceName,
|
||||||
|
MaxConcurrentRequests: update.cds.MaxRequests,
|
||||||
|
}
|
||||||
if update.cds.EnableLRS {
|
if update.cds.EnableLRS {
|
||||||
// An empty string here indicates that the edsBalancer should use the
|
// An empty string here indicates that the edsBalancer should use the
|
||||||
// same xDS server for load reporting as it does for EDS
|
// same xDS server for load reporting as it does for EDS
|
||||||
|
|
|
||||||
|
|
@ -233,7 +233,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) {
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -289,7 +289,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) {
|
||||||
// newEDSBalancer function as part of test setup. No security config is
|
// newEDSBalancer function as part of test setup. No security config is
|
||||||
// passed to the CDS balancer as part of this update.
|
// passed to the CDS balancer as part of this update.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -445,7 +445,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) {
|
||||||
// create a new EDS balancer. The fake EDS balancer created above will be
|
// create a new EDS balancer. The fake EDS balancer created above will be
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -479,7 +479,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) {
|
||||||
// create a new EDS balancer. The fake EDS balancer created above will be
|
// create a new EDS balancer. The fake EDS balancer created above will be
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -510,7 +510,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) {
|
||||||
// create a new EDS balancer. The fake EDS balancer created above will be
|
// create a new EDS balancer. The fake EDS balancer created above will be
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -560,7 +560,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) {
|
||||||
// create a new EDS balancer. The fake EDS balancer created above will be
|
// create a new EDS balancer. The fake EDS balancer created above will be
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -637,7 +637,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) {
|
||||||
RootInstanceName: "default1",
|
RootInstanceName: "default1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -193,8 +193,11 @@ func cdsCCS(cluster string) balancer.ClientConnState {
|
||||||
|
|
||||||
// edsCCS is a helper function to construct a good update passed from the
|
// edsCCS is a helper function to construct a good update passed from the
|
||||||
// cdsBalancer to the edsBalancer.
|
// cdsBalancer to the edsBalancer.
|
||||||
func edsCCS(service string, enableLRS bool) balancer.ClientConnState {
|
func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState {
|
||||||
lbCfg := &edsbalancer.EDSConfig{EDSServiceName: service}
|
lbCfg := &edsbalancer.EDSConfig{
|
||||||
|
EDSServiceName: service,
|
||||||
|
MaxConcurrentRequests: countMax,
|
||||||
|
}
|
||||||
if enableLRS {
|
if enableLRS {
|
||||||
lbCfg.LrsLoadReportingServerName = new(string)
|
lbCfg.LrsLoadReportingServerName = new(string)
|
||||||
}
|
}
|
||||||
|
|
@ -350,12 +353,12 @@ func (s) TestHandleClusterUpdate(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "happy-case-with-lrs",
|
name: "happy-case-with-lrs",
|
||||||
cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true},
|
cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true},
|
||||||
wantCCS: edsCCS(serviceName, true),
|
wantCCS: edsCCS(serviceName, nil, true),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "happy-case-without-lrs",
|
name: "happy-case-without-lrs",
|
||||||
cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName},
|
cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName},
|
||||||
wantCCS: edsCCS(serviceName, false),
|
wantCCS: edsCCS(serviceName, nil, false),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -423,7 +426,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) {
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -508,7 +511,7 @@ func (s) TestResolverError(t *testing.T) {
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -557,7 +560,7 @@ func (s) TestUpdateSubConnState(t *testing.T) {
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -592,7 +595,7 @@ func (s) TestCircuitBreaking(t *testing.T) {
|
||||||
// the service's counter with the new max requests.
|
// the service's counter with the new max requests.
|
||||||
var maxRequests uint32 = 1
|
var maxRequests uint32 = 1
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, &maxRequests, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
@ -602,10 +605,10 @@ func (s) TestCircuitBreaking(t *testing.T) {
|
||||||
// Since the counter's max requests was set to 1, the first request should
|
// Since the counter's max requests was set to 1, the first request should
|
||||||
// succeed and the second should fail.
|
// succeed and the second should fail.
|
||||||
counter := client.GetServiceRequestsCounter(serviceName)
|
counter := client.GetServiceRequestsCounter(serviceName)
|
||||||
if err := counter.StartRequest(); err != nil {
|
if err := counter.StartRequest(maxRequests); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := counter.StartRequest(); err == nil {
|
if err := counter.StartRequest(maxRequests); err == nil {
|
||||||
t.Fatal("unexpected success on start request over max")
|
t.Fatal("unexpected success on start request over max")
|
||||||
}
|
}
|
||||||
counter.EndRequest()
|
counter.EndRequest()
|
||||||
|
|
@ -625,7 +628,7 @@ func (s) TestClose(t *testing.T) {
|
||||||
// returned to the CDS balancer, because we have overridden the
|
// returned to the CDS balancer, because we have overridden the
|
||||||
// newEDSBalancer function as part of test setup.
|
// newEDSBalancer function as part of test setup.
|
||||||
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName}
|
||||||
wantCCS := edsCCS(serviceName, false)
|
wantCCS := edsCCS(serviceName, nil, false)
|
||||||
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer ctxCancel()
|
defer ctxCancel()
|
||||||
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -58,9 +58,9 @@ func init() {
|
||||||
newRandomWRR = testutils.NewTestWRR
|
newRandomWRR = testutils.NewTestWRR
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDrop verifies that the balancer correctly drops the picks, and that
|
// TestDropByCategory verifies that the balancer correctly drops the picks, and
|
||||||
// the drops are reported.
|
// that the drops are reported.
|
||||||
func TestDrop(t *testing.T) {
|
func TestDropByCategory(t *testing.T) {
|
||||||
xdsC := fakeclient.NewClient()
|
xdsC := fakeclient.NewClient()
|
||||||
oldNewXDSClient := newXDSClient
|
oldNewXDSClient := newXDSClient
|
||||||
newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil }
|
newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil }
|
||||||
|
|
@ -214,3 +214,113 @@ func TestDrop(t *testing.T) {
|
||||||
t.Fatalf("got unexpected reports, diff (-got, +want): %v", diff)
|
t.Fatalf("got unexpected reports, diff (-got, +want): %v", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestDropCircuitBreaking verifies that the balancer correctly drops the picks
|
||||||
|
// due to circuit breaking, and that the drops are reported.
|
||||||
|
func TestDropCircuitBreaking(t *testing.T) {
|
||||||
|
xdsC := fakeclient.NewClient()
|
||||||
|
oldNewXDSClient := newXDSClient
|
||||||
|
newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil }
|
||||||
|
defer func() { newXDSClient = oldNewXDSClient }()
|
||||||
|
|
||||||
|
builder := balancer.Get(clusterImplName)
|
||||||
|
cc := testutils.NewTestClientConn(t)
|
||||||
|
b := builder.Build(cc, balancer.BuildOptions{})
|
||||||
|
defer b.Close()
|
||||||
|
|
||||||
|
var maxRequest uint32 = 50
|
||||||
|
if err := b.UpdateClientConnState(balancer.ClientConnState{
|
||||||
|
ResolverState: resolver.State{
|
||||||
|
Addresses: testBackendAddrs,
|
||||||
|
},
|
||||||
|
BalancerConfig: &lbConfig{
|
||||||
|
Cluster: testClusterName,
|
||||||
|
EDSServiceName: testServiceName,
|
||||||
|
LRSLoadReportingServerName: newString(testLRSServerName),
|
||||||
|
MaxConcurrentRequests: &maxRequest,
|
||||||
|
ChildPolicy: &internalserviceconfig.BalancerConfig{
|
||||||
|
Name: roundrobin.Name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("unexpected error from UpdateClientConnState: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
got, err := xdsC.WaitForReportLoad(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("xdsClient.ReportLoad failed with error: %v", err)
|
||||||
|
}
|
||||||
|
if got.Server != testLRSServerName {
|
||||||
|
t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
sc1 := <-cc.NewSubConnCh
|
||||||
|
b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
||||||
|
// This should get the connecting picker.
|
||||||
|
p0 := <-cc.NewPickerCh
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
_, err := p0.Pick(balancer.PickInfo{})
|
||||||
|
if err != balancer.ErrNoSubConnAvailable {
|
||||||
|
t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||||
|
// Test pick with one backend.
|
||||||
|
dones := []func(){}
|
||||||
|
p1 := <-cc.NewPickerCh
|
||||||
|
const rpcCount = 100
|
||||||
|
for i := 0; i < rpcCount; i++ {
|
||||||
|
gotSCSt, err := p1.Pick(balancer.PickInfo{})
|
||||||
|
if i < 50 && err != nil {
|
||||||
|
t.Errorf("The first 50%% picks should be non-drops, got error %v", err)
|
||||||
|
} else if i > 50 && err == nil {
|
||||||
|
t.Errorf("The second 50%% picks should be drops, got error <nil>")
|
||||||
|
}
|
||||||
|
dones = append(dones, func() {
|
||||||
|
if gotSCSt.Done != nil {
|
||||||
|
gotSCSt.Done(balancer.DoneInfo{})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, done := range dones {
|
||||||
|
done()
|
||||||
|
}
|
||||||
|
|
||||||
|
dones = []func(){}
|
||||||
|
// Pick without drops.
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
gotSCSt, err := p1.Pick(balancer.PickInfo{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("The third 50%% picks should be non-drops, got error %v", err)
|
||||||
|
}
|
||||||
|
dones = append(dones, func() {
|
||||||
|
if gotSCSt.Done != nil {
|
||||||
|
gotSCSt.Done(balancer.DoneInfo{})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, done := range dones {
|
||||||
|
done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump load data from the store and compare with expected counts.
|
||||||
|
loadStore := xdsC.LoadStore()
|
||||||
|
if loadStore == nil {
|
||||||
|
t.Fatal("loadStore is nil in xdsClient")
|
||||||
|
}
|
||||||
|
|
||||||
|
wantStatsData0 := []*load.Data{{
|
||||||
|
Cluster: testClusterName,
|
||||||
|
Service: testServiceName,
|
||||||
|
TotalDrops: uint64(maxRequest),
|
||||||
|
}}
|
||||||
|
|
||||||
|
gotStatsData0 := loadStore.Stats([]string{testClusterName})
|
||||||
|
if diff := cmp.Diff(gotStatsData0, wantStatsData0, cmpOpts); diff != "" {
|
||||||
|
t.Fatalf("got unexpected drop reports, diff (-got, +want): %v", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -38,8 +38,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
clusterImplName = "xds_cluster_impl_experimental"
|
clusterImplName = "xds_cluster_impl_experimental"
|
||||||
// TODO: define defaultRequestCountMax = 1024
|
defaultRequestCountMax = 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
@ -52,11 +52,12 @@ type clusterImplBB struct{}
|
||||||
|
|
||||||
func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
|
func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
|
||||||
b := &clusterImplBalancer{
|
b := &clusterImplBalancer{
|
||||||
ClientConn: cc,
|
ClientConn: cc,
|
||||||
bOpts: bOpts,
|
bOpts: bOpts,
|
||||||
closed: grpcsync.NewEvent(),
|
closed: grpcsync.NewEvent(),
|
||||||
loadWrapper: loadstore.NewWrapper(),
|
loadWrapper: loadstore.NewWrapper(),
|
||||||
pickerUpdateCh: buffer.NewUnbounded(),
|
pickerUpdateCh: buffer.NewUnbounded(),
|
||||||
|
requestCountMax: defaultRequestCountMax,
|
||||||
}
|
}
|
||||||
b.logger = prefixLogger(b)
|
b.logger = prefixLogger(b)
|
||||||
|
|
||||||
|
|
@ -105,10 +106,11 @@ type clusterImplBalancer struct {
|
||||||
// childState/drops/requestCounter can only be accessed in run(). And run()
|
// childState/drops/requestCounter can only be accessed in run(). And run()
|
||||||
// is the only goroutine that sends picker to the parent ClientConn. All
|
// is the only goroutine that sends picker to the parent ClientConn. All
|
||||||
// requests to update picker need to be sent to pickerUpdateCh.
|
// requests to update picker need to be sent to pickerUpdateCh.
|
||||||
childState balancer.State
|
childState balancer.State
|
||||||
drops []*dropper
|
drops []*dropper
|
||||||
// TODO: add serviceRequestCount and maxRequestCount for circuit breaking.
|
requestCounter *xdsclient.ServiceRequestsCounter
|
||||||
pickerUpdateCh *buffer.Unbounded
|
requestCountMax uint32
|
||||||
|
pickerUpdateCh *buffer.Unbounded
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateLoadStore checks the config for load store, and decides whether it
|
// updateLoadStore checks the config for load store, and decides whether it
|
||||||
|
|
@ -198,19 +200,28 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState
|
||||||
updatePicker = true
|
updatePicker = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: compare cluster name. And update picker if it's changed, because
|
// Compare cluster name. And update picker if it's changed, because circuit
|
||||||
// circuit breaking's stream counter will be different.
|
// breaking's stream counter will be different.
|
||||||
//
|
if cib.config == nil || cib.config.Cluster != newConfig.Cluster {
|
||||||
// Set `updatePicker` to manually update the picker.
|
cib.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster)
|
||||||
|
updatePicker = true
|
||||||
// TODO: compare upper bound of stream count. And update picker if it's
|
}
|
||||||
// changed. This is also for circuit breaking.
|
// Compare upper bound of stream count. And update picker if it's changed.
|
||||||
//
|
// This is also for circuit breaking.
|
||||||
// Set `updatePicker` to manually update the picker.
|
var newRequestCountMax uint32 = 1024
|
||||||
|
if newConfig.MaxConcurrentRequests != nil {
|
||||||
|
newRequestCountMax = *newConfig.MaxConcurrentRequests
|
||||||
|
}
|
||||||
|
if cib.requestCountMax != newRequestCountMax {
|
||||||
|
cib.requestCountMax = newRequestCountMax
|
||||||
|
updatePicker = true
|
||||||
|
}
|
||||||
|
|
||||||
if updatePicker {
|
if updatePicker {
|
||||||
cib.pickerUpdateCh.Put(&dropConfigs{
|
cib.pickerUpdateCh.Put(&dropConfigs{
|
||||||
drops: cib.drops,
|
drops: cib.drops,
|
||||||
|
requestCounter: cib.requestCounter,
|
||||||
|
requestCountMax: cib.requestCountMax,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -280,7 +291,9 @@ func (cib *clusterImplBalancer) UpdateState(state balancer.State) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type dropConfigs struct {
|
type dropConfigs struct {
|
||||||
drops []*dropper
|
drops []*dropper
|
||||||
|
requestCounter *xdsclient.ServiceRequestsCounter
|
||||||
|
requestCountMax uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cib *clusterImplBalancer) run() {
|
func (cib *clusterImplBalancer) run() {
|
||||||
|
|
@ -293,15 +306,19 @@ func (cib *clusterImplBalancer) run() {
|
||||||
cib.childState = u
|
cib.childState = u
|
||||||
cib.ClientConn.UpdateState(balancer.State{
|
cib.ClientConn.UpdateState(balancer.State{
|
||||||
ConnectivityState: cib.childState.ConnectivityState,
|
ConnectivityState: cib.childState.ConnectivityState,
|
||||||
Picker: newDropPicker(cib.childState, cib.drops, cib.loadWrapper),
|
Picker: newDropPicker(cib.childState, &dropConfigs{
|
||||||
|
drops: cib.drops,
|
||||||
|
requestCounter: cib.requestCounter,
|
||||||
|
requestCountMax: cib.requestCountMax,
|
||||||
|
}, cib.loadWrapper),
|
||||||
})
|
})
|
||||||
case *dropConfigs:
|
case *dropConfigs:
|
||||||
cib.drops = u.drops
|
cib.drops = u.drops
|
||||||
// cib.requestCounter = u.requestCounter
|
cib.requestCounter = u.requestCounter
|
||||||
if cib.childState.Picker != nil {
|
if cib.childState.Picker != nil {
|
||||||
cib.ClientConn.UpdateState(balancer.State{
|
cib.ClientConn.UpdateState(balancer.State{
|
||||||
ConnectivityState: cib.childState.ConnectivityState,
|
ConnectivityState: cib.childState.ConnectivityState,
|
||||||
Picker: newDropPicker(cib.childState, cib.drops, cib.loadWrapper),
|
Picker: newDropPicker(cib.childState, u, cib.loadWrapper),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/internal/wrr"
|
"google.golang.org/grpc/internal/wrr"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/grpc/xds/internal/client"
|
||||||
"google.golang.org/grpc/xds/internal/client/load"
|
"google.golang.org/grpc/xds/internal/client/load"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -72,14 +73,17 @@ type dropPicker struct {
|
||||||
drops []*dropper
|
drops []*dropper
|
||||||
s balancer.State
|
s balancer.State
|
||||||
loadStore loadReporter
|
loadStore loadReporter
|
||||||
// TODO: add serviceRequestCount and maxRequestCount for circuit breaking.
|
counter *client.ServiceRequestsCounter
|
||||||
|
countMax uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDropPicker(s balancer.State, drops []*dropper, loadStore load.PerClusterReporter) *dropPicker {
|
func newDropPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *dropPicker {
|
||||||
return &dropPicker{
|
return &dropPicker{
|
||||||
drops: drops,
|
drops: config.drops,
|
||||||
s: s,
|
s: s,
|
||||||
loadStore: loadStore,
|
loadStore: loadStore,
|
||||||
|
counter: config.requestCounter,
|
||||||
|
countMax: config.requestCountMax,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -98,7 +102,30 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped")
|
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO: support circuit breaking, check if d.maxRequestCount >=
|
|
||||||
// d.counter.StartRequestWithMax().
|
if d.counter != nil {
|
||||||
|
if err := d.counter.StartRequest(d.countMax); err != nil {
|
||||||
|
// Drops by circuit breaking are reported with empty category. They
|
||||||
|
// will be reported only in total drops, but not in per category.
|
||||||
|
if d.loadStore != nil {
|
||||||
|
d.loadStore.CallDropped("")
|
||||||
|
}
|
||||||
|
return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error())
|
||||||
|
}
|
||||||
|
pr, err := d.s.Picker.Pick(info)
|
||||||
|
if err != nil {
|
||||||
|
d.counter.EndRequest()
|
||||||
|
return pr, err
|
||||||
|
}
|
||||||
|
oldDone := pr.Done
|
||||||
|
pr.Done = func(doneInfo balancer.DoneInfo) {
|
||||||
|
d.counter.EndRequest()
|
||||||
|
if oldDone != nil {
|
||||||
|
oldDone(doneInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pr, err
|
||||||
|
}
|
||||||
|
|
||||||
return d.s.Picker.Pick(info)
|
return d.s.Picker.Pick(info)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,15 @@ type EDSConfig struct {
|
||||||
// Name to use in EDS query. If not present, defaults to the server
|
// Name to use in EDS query. If not present, defaults to the server
|
||||||
// name from the target URI.
|
// name from the target URI.
|
||||||
EDSServiceName string
|
EDSServiceName string
|
||||||
|
// MaxConcurrentRequests is the max number of concurrent request allowed for
|
||||||
|
// this service. If unset, default value 1024 is used.
|
||||||
|
//
|
||||||
|
// Note that this is not defined in the service config proto. And the reason
|
||||||
|
// is, we are dropping EDS and moving the features into cluster_impl. But in
|
||||||
|
// the mean time, to keep things working, we need to add this field. And it
|
||||||
|
// should be fine to add this extra field here, because EDS is only used in
|
||||||
|
// CDS today, so we have full control.
|
||||||
|
MaxConcurrentRequests *uint32
|
||||||
// LRS server to send load reports to. If not present, load reporting
|
// LRS server to send load reports to. If not present, load reporting
|
||||||
// will be disabled. If set to the empty string, load reporting will
|
// will be disabled. If set to the empty string, load reporting will
|
||||||
// be sent to the same server that we obtained CDS data from.
|
// be sent to the same server that we obtained CDS data from.
|
||||||
|
|
@ -51,6 +60,7 @@ type edsConfigJSON struct {
|
||||||
ChildPolicy []*loadBalancingConfig
|
ChildPolicy []*loadBalancingConfig
|
||||||
FallbackPolicy []*loadBalancingConfig
|
FallbackPolicy []*loadBalancingConfig
|
||||||
EDSServiceName string
|
EDSServiceName string
|
||||||
|
MaxConcurrentRequests *uint32
|
||||||
LRSLoadReportingServerName *string
|
LRSLoadReportingServerName *string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -64,6 +74,7 @@ func (l *EDSConfig) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
l.EDSServiceName = configJSON.EDSServiceName
|
l.EDSServiceName = configJSON.EDSServiceName
|
||||||
|
l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests
|
||||||
l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName
|
l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName
|
||||||
|
|
||||||
for _, lbcfg := range configJSON.ChildPolicy {
|
for _, lbcfg := range configJSON.ChildPolicy {
|
||||||
|
|
|
||||||
|
|
@ -113,9 +113,9 @@ type edsBalancerImplInterface interface {
|
||||||
handleSubConnStateChange(sc balancer.SubConn, state connectivity.State)
|
handleSubConnStateChange(sc balancer.SubConn, state connectivity.State)
|
||||||
// updateState handle a balancer state update from the priority.
|
// updateState handle a balancer state update from the priority.
|
||||||
updateState(priority priorityType, s balancer.State)
|
updateState(priority priorityType, s balancer.State)
|
||||||
// updateServiceRequestsCounter updates the service requests counter to the
|
// updateServiceRequestsConfig updates the service requests counter to the
|
||||||
// one for the given service name.
|
// one for the given service name.
|
||||||
updateServiceRequestsCounter(serviceName string)
|
updateServiceRequestsConfig(serviceName string, max *uint32)
|
||||||
// close closes the eds balancer.
|
// close closes the eds balancer.
|
||||||
close()
|
close()
|
||||||
}
|
}
|
||||||
|
|
@ -215,7 +215,7 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) {
|
||||||
x.logger.Warningf("failed to update xDS client: %v", err)
|
x.logger.Warningf("failed to update xDS client: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
x.edsImpl.updateServiceRequestsCounter(cfg.EDSServiceName)
|
x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests)
|
||||||
|
|
||||||
// We will update the edsImpl with the new child policy, if we got a
|
// We will update the edsImpl with the new child policy, if we got a
|
||||||
// different one.
|
// different one.
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,8 @@ import (
|
||||||
// TODO: make this a environment variable?
|
// TODO: make this a environment variable?
|
||||||
var defaultPriorityInitTimeout = 10 * time.Second
|
var defaultPriorityInitTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
const defaultServiceRequestCountMax = 1024
|
||||||
|
|
||||||
type localityConfig struct {
|
type localityConfig struct {
|
||||||
weight uint32
|
weight uint32
|
||||||
addrs []resolver.Address
|
addrs []resolver.Address
|
||||||
|
|
@ -101,6 +103,7 @@ type edsBalancerImpl struct {
|
||||||
drops []*dropper
|
drops []*dropper
|
||||||
innerState balancer.State // The state of the picker without drop support.
|
innerState balancer.State // The state of the picker without drop support.
|
||||||
serviceRequestsCounter *client.ServiceRequestsCounter
|
serviceRequestsCounter *client.ServiceRequestsCounter
|
||||||
|
serviceRequestCountMax uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// newEDSBalancerImpl create a new edsBalancerImpl.
|
// newEDSBalancerImpl create a new edsBalancerImpl.
|
||||||
|
|
@ -114,9 +117,10 @@ func newEDSBalancerImpl(cc balancer.ClientConn, bOpts balancer.BuildOptions, enq
|
||||||
|
|
||||||
enqueueChildBalancerStateUpdate: enqueueState,
|
enqueueChildBalancerStateUpdate: enqueueState,
|
||||||
|
|
||||||
priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig),
|
priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig),
|
||||||
priorityToState: make(map[priorityType]*balancer.State),
|
priorityToState: make(map[priorityType]*balancer.State),
|
||||||
subConnToPriority: make(map[balancer.SubConn]priorityType),
|
subConnToPriority: make(map[balancer.SubConn]priorityType),
|
||||||
|
serviceRequestCountMax: defaultServiceRequestCountMax,
|
||||||
}
|
}
|
||||||
// Don't start balancer group here. Start it when handling the first EDS
|
// Don't start balancer group here. Start it when handling the first EDS
|
||||||
// response. Otherwise the balancer group will be started with round-robin,
|
// response. Otherwise the balancer group will be started with round-robin,
|
||||||
|
|
@ -181,7 +185,7 @@ func (edsImpl *edsBalancerImpl) updateDrops(dropConfig []xdsclient.OverloadDropC
|
||||||
// Update picker with old inner picker, new drops.
|
// Update picker with old inner picker, new drops.
|
||||||
edsImpl.cc.UpdateState(balancer.State{
|
edsImpl.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: edsImpl.innerState.ConnectivityState,
|
ConnectivityState: edsImpl.innerState.ConnectivityState,
|
||||||
Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter)},
|
Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
edsImpl.pickerMu.Unlock()
|
edsImpl.pickerMu.Unlock()
|
||||||
|
|
@ -410,14 +414,17 @@ func (edsImpl *edsBalancerImpl) handleSubConnStateChange(sc balancer.SubConn, s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateConfig handles changes to the circuit breaking configuration.
|
// updateServiceRequestsConfig handles changes to the circuit breaking configuration.
|
||||||
func (edsImpl *edsBalancerImpl) updateServiceRequestsCounter(serviceName string) {
|
func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, max *uint32) {
|
||||||
if !env.CircuitBreakingSupport {
|
if !env.CircuitBreakingSupport {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName {
|
if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName {
|
||||||
edsImpl.serviceRequestsCounter = client.GetServiceRequestsCounter(serviceName)
|
edsImpl.serviceRequestsCounter = client.GetServiceRequestsCounter(serviceName)
|
||||||
}
|
}
|
||||||
|
if max != nil {
|
||||||
|
edsImpl.serviceRequestCountMax = *max
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateState first handles priority, and then wraps picker in a drop picker
|
// updateState first handles priority, and then wraps picker in a drop picker
|
||||||
|
|
@ -434,7 +441,7 @@ func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.St
|
||||||
defer edsImpl.pickerMu.Unlock()
|
defer edsImpl.pickerMu.Unlock()
|
||||||
edsImpl.innerState = s
|
edsImpl.innerState = s
|
||||||
// Don't reset drops when it's a state change.
|
// Don't reset drops when it's a state change.
|
||||||
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter)})
|
edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -487,14 +494,16 @@ type dropPicker struct {
|
||||||
p balancer.Picker
|
p balancer.Picker
|
||||||
loadStore load.PerClusterReporter
|
loadStore load.PerClusterReporter
|
||||||
counter *client.ServiceRequestsCounter
|
counter *client.ServiceRequestsCounter
|
||||||
|
countMax uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *client.ServiceRequestsCounter) *dropPicker {
|
func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *client.ServiceRequestsCounter, countMax uint32) *dropPicker {
|
||||||
return &dropPicker{
|
return &dropPicker{
|
||||||
drops: drops,
|
drops: drops,
|
||||||
p: p,
|
p: p,
|
||||||
loadStore: loadStore,
|
loadStore: loadStore,
|
||||||
counter: counter,
|
counter: counter,
|
||||||
|
countMax: countMax,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -517,7 +526,7 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped")
|
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped")
|
||||||
}
|
}
|
||||||
if d.counter != nil {
|
if d.counter != nil {
|
||||||
if err := d.counter.StartRequest(); err != nil {
|
if err := d.counter.StartRequest(d.countMax); err != nil {
|
||||||
// Drops by circuit breaking are reported with empty category. They
|
// Drops by circuit breaking are reported with empty category. They
|
||||||
// will be reported only in total drops, but not in per category.
|
// will be reported only in total drops, but not in per category.
|
||||||
if d.loadStore != nil {
|
if d.loadStore != nil {
|
||||||
|
|
|
||||||
|
|
@ -579,9 +579,8 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) {
|
||||||
cc := testutils.NewTestClientConn(t)
|
cc := testutils.NewTestClientConn(t)
|
||||||
edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil)
|
edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil)
|
||||||
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
edsb.enqueueChildBalancerStateUpdate = edsb.updateState
|
||||||
edsb.updateServiceRequestsCounter("test")
|
|
||||||
var maxRequests uint32 = 50
|
var maxRequests uint32 = 50
|
||||||
client.SetMaxRequests("test", &maxRequests)
|
edsb.updateServiceRequestsConfig("test", &maxRequests)
|
||||||
|
|
||||||
// One locality with one backend.
|
// One locality with one backend.
|
||||||
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil)
|
||||||
|
|
@ -738,7 +737,7 @@ func (s) TestDropPicker(t *testing.T) {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
|
||||||
p := newDropPicker(constPicker, tt.drops, nil, nil)
|
p := newDropPicker(constPicker, tt.drops, nil, nil, defaultServiceRequestCountMax)
|
||||||
|
|
||||||
// scCount is the number of sc's returned by pick. The opposite of
|
// scCount is the number of sc's returned by pick. The opposite of
|
||||||
// drop-count.
|
// drop-count.
|
||||||
|
|
@ -786,9 +785,8 @@ func (s) TestEDS_LoadReport(t *testing.T) {
|
||||||
cbMaxRequests = 20
|
cbMaxRequests = 20
|
||||||
)
|
)
|
||||||
var maxRequestsTemp uint32 = cbMaxRequests
|
var maxRequestsTemp uint32 = cbMaxRequests
|
||||||
client.SetMaxRequests(testServiceName, &maxRequestsTemp)
|
edsb.updateServiceRequestsConfig(testServiceName, &maxRequestsTemp)
|
||||||
defer client.ClearCounterForTesting(testServiceName)
|
defer client.ClearCounterForTesting(testServiceName)
|
||||||
edsb.updateServiceRequestsCounter(testServiceName)
|
|
||||||
|
|
||||||
backendToBalancerID := make(map[balancer.SubConn]internal.LocalityID)
|
backendToBalancerID := make(map[balancer.SubConn]internal.LocalityID)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -116,6 +116,7 @@ type fakeEDSBalancer struct {
|
||||||
subconnStateChange *testutils.Channel
|
subconnStateChange *testutils.Channel
|
||||||
edsUpdate *testutils.Channel
|
edsUpdate *testutils.Channel
|
||||||
serviceName *testutils.Channel
|
serviceName *testutils.Channel
|
||||||
|
serviceRequestMax *testutils.Channel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
|
||||||
|
|
@ -132,8 +133,9 @@ func (f *fakeEDSBalancer) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) {
|
||||||
|
|
||||||
func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {}
|
func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {}
|
||||||
|
|
||||||
func (f *fakeEDSBalancer) updateServiceRequestsCounter(serviceName string) {
|
func (f *fakeEDSBalancer) updateServiceRequestsConfig(serviceName string, max *uint32) {
|
||||||
f.serviceName.Send(serviceName)
|
f.serviceName.Send(serviceName)
|
||||||
|
f.serviceRequestMax.Send(max)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeEDSBalancer) close() {}
|
func (f *fakeEDSBalancer) close() {}
|
||||||
|
|
@ -186,6 +188,25 @@ func (f *fakeEDSBalancer) waitForCounterUpdate(ctx context.Context, wantServiceN
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fakeEDSBalancer) waitForCountMaxUpdate(ctx context.Context, want *uint32) error {
|
||||||
|
val, err := f.serviceRequestMax.Receive(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
got := val.(*uint32)
|
||||||
|
|
||||||
|
if got == nil && want == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if got != nil && want != nil {
|
||||||
|
if *got != *want {
|
||||||
|
return fmt.Errorf("got countMax %v, want %v", *got, *want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("got countMax %+v, want %+v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface {
|
func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface {
|
||||||
return &fakeEDSBalancer{
|
return &fakeEDSBalancer{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
|
|
@ -193,6 +214,7 @@ func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface {
|
||||||
subconnStateChange: testutils.NewChannelWithSize(10),
|
subconnStateChange: testutils.NewChannelWithSize(10),
|
||||||
edsUpdate: testutils.NewChannelWithSize(10),
|
edsUpdate: testutils.NewChannelWithSize(10),
|
||||||
serviceName: testutils.NewChannelWithSize(10),
|
serviceName: testutils.NewChannelWithSize(10),
|
||||||
|
serviceRequestMax: testutils.NewChannelWithSize(10),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -328,15 +350,20 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) {
|
||||||
if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil {
|
if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var testCountMax uint32 = 100
|
||||||
lbCfgB := &loadBalancingConfig{
|
lbCfgB := &loadBalancingConfig{
|
||||||
Name: fakeBalancerB,
|
Name: fakeBalancerB,
|
||||||
Config: json.RawMessage("{}"),
|
Config: json.RawMessage("{}"),
|
||||||
}
|
}
|
||||||
if err := edsB.UpdateClientConnState(balancer.ClientConnState{
|
if err := edsB.UpdateClientConnState(balancer.ClientConnState{
|
||||||
BalancerConfig: &EDSConfig{
|
BalancerConfig: &EDSConfig{
|
||||||
ChildPolicy: lbCfgB,
|
ChildPolicy: lbCfgB,
|
||||||
EDSServiceName: testServiceName,
|
EDSServiceName: testServiceName,
|
||||||
|
MaxConcurrentRequests: &testCountMax,
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("edsB.UpdateClientConnState() failed: %v", err)
|
t.Fatalf("edsB.UpdateClientConnState() failed: %v", err)
|
||||||
|
|
@ -349,6 +376,9 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) {
|
||||||
// eds_impl will compare the service names, and skip if it didn't change.
|
// eds_impl will compare the service names, and skip if it didn't change.
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if err := edsLB.waitForCountMaxUpdate(ctx, &testCountMax); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSubConnStateChange verifies if the top-level edsBalancer passes on
|
// TestSubConnStateChange verifies if the top-level edsBalancer passes on
|
||||||
|
|
@ -606,15 +636,23 @@ func (s) TestCounterUpdate(t *testing.T) {
|
||||||
}
|
}
|
||||||
defer edsB.Close()
|
defer edsB.Close()
|
||||||
|
|
||||||
|
var testCountMax uint32 = 100
|
||||||
// Update should trigger counter update with provided service name.
|
// Update should trigger counter update with provided service name.
|
||||||
if err := edsB.UpdateClientConnState(balancer.ClientConnState{
|
if err := edsB.UpdateClientConnState(balancer.ClientConnState{
|
||||||
BalancerConfig: &EDSConfig{EDSServiceName: "foobar-1"},
|
BalancerConfig: &EDSConfig{
|
||||||
|
EDSServiceName: "foobar-1",
|
||||||
|
MaxConcurrentRequests: &testCountMax,
|
||||||
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer).waitForCounterUpdate(ctx, "foobar-1"); err != nil {
|
edsI := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer)
|
||||||
|
if err := edsI.waitForCounterUpdate(ctx, "foobar-1"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := edsI.waitForCountMaxUpdate(ctx, &testCountMax); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -642,6 +680,7 @@ func (s) TestBalancerConfigParsing(t *testing.T) {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var testMaxConcurrentRequests uint32 = 123
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
js json.RawMessage
|
js json.RawMessage
|
||||||
|
|
@ -690,6 +729,7 @@ func (s) TestBalancerConfigParsing(t *testing.T) {
|
||||||
{"fake_balancer_A": {}}
|
{"fake_balancer_A": {}}
|
||||||
],
|
],
|
||||||
"edsServiceName": "eds.service",
|
"edsServiceName": "eds.service",
|
||||||
|
"maxConcurrentRequests": 123,
|
||||||
"lrsLoadReportingServerName": "lrs.server"
|
"lrsLoadReportingServerName": "lrs.server"
|
||||||
}`),
|
}`),
|
||||||
want: &EDSConfig{
|
want: &EDSConfig{
|
||||||
|
|
@ -702,6 +742,7 @@ func (s) TestBalancerConfigParsing(t *testing.T) {
|
||||||
Config: json.RawMessage("{}"),
|
Config: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
EDSServiceName: testEDSName,
|
EDSServiceName: testEDSName,
|
||||||
|
MaxConcurrentRequests: &testMaxConcurrentRequests,
|
||||||
LrsLoadReportingServerName: &testLRSName,
|
LrsLoadReportingServerName: &testLRSName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,6 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultMaxRequests uint32 = 1024
|
|
||||||
|
|
||||||
type servicesRequestsCounter struct {
|
type servicesRequestsCounter struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
services map[string]*ServiceRequestsCounter
|
services map[string]*ServiceRequestsCounter
|
||||||
|
|
@ -39,7 +37,6 @@ var src = &servicesRequestsCounter{
|
||||||
// service with the provided name.
|
// service with the provided name.
|
||||||
type ServiceRequestsCounter struct {
|
type ServiceRequestsCounter struct {
|
||||||
ServiceName string
|
ServiceName string
|
||||||
maxRequests uint32
|
|
||||||
numRequests uint32
|
numRequests uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -50,33 +47,17 @@ func GetServiceRequestsCounter(serviceName string) *ServiceRequestsCounter {
|
||||||
defer src.mu.Unlock()
|
defer src.mu.Unlock()
|
||||||
c, ok := src.services[serviceName]
|
c, ok := src.services[serviceName]
|
||||||
if !ok {
|
if !ok {
|
||||||
c = &ServiceRequestsCounter{ServiceName: serviceName, maxRequests: defaultMaxRequests}
|
c = &ServiceRequestsCounter{ServiceName: serviceName}
|
||||||
src.services[serviceName] = c
|
src.services[serviceName] = c
|
||||||
}
|
}
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMaxRequests updates the max requests for a service's counter.
|
|
||||||
func SetMaxRequests(serviceName string, maxRequests *uint32) {
|
|
||||||
src.mu.Lock()
|
|
||||||
defer src.mu.Unlock()
|
|
||||||
c, ok := src.services[serviceName]
|
|
||||||
if !ok {
|
|
||||||
c = &ServiceRequestsCounter{ServiceName: serviceName}
|
|
||||||
src.services[serviceName] = c
|
|
||||||
}
|
|
||||||
if maxRequests != nil {
|
|
||||||
c.maxRequests = *maxRequests
|
|
||||||
} else {
|
|
||||||
c.maxRequests = defaultMaxRequests
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartRequest starts a request for a service, incrementing its number of
|
// StartRequest starts a request for a service, incrementing its number of
|
||||||
// requests by 1. Returns an error if the max number of requests is exceeded.
|
// requests by 1. Returns an error if the max number of requests is exceeded.
|
||||||
func (c *ServiceRequestsCounter) StartRequest() error {
|
func (c *ServiceRequestsCounter) StartRequest(max uint32) error {
|
||||||
if atomic.LoadUint32(&c.numRequests) >= atomic.LoadUint32(&c.maxRequests) {
|
if atomic.LoadUint32(&c.numRequests) >= max {
|
||||||
return fmt.Errorf("max requests %v exceeded on service %v", c.maxRequests, c.ServiceName)
|
return fmt.Errorf("max requests %v exceeded on service %v", max, c.ServiceName)
|
||||||
}
|
}
|
||||||
atomic.AddUint32(&c.numRequests, 1)
|
atomic.AddUint32(&c.numRequests, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -97,6 +78,5 @@ func ClearCounterForTesting(serviceName string) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.maxRequests = defaultMaxRequests
|
|
||||||
c.numRequests = 0
|
c.numRequests = 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,6 @@ func resetServiceRequestsCounter() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCounter(t *testing.T, test counterTest) {
|
func testCounter(t *testing.T, test counterTest) {
|
||||||
SetMaxRequests(test.name, &test.maxRequests)
|
|
||||||
requestsStarted := make(chan struct{})
|
requestsStarted := make(chan struct{})
|
||||||
requestsSent := sync.WaitGroup{}
|
requestsSent := sync.WaitGroup{}
|
||||||
requestsSent.Add(int(test.numRequests))
|
requestsSent.Add(int(test.numRequests))
|
||||||
|
|
@ -68,7 +67,7 @@ func testCounter(t *testing.T, test counterTest) {
|
||||||
go func() {
|
go func() {
|
||||||
counter := GetServiceRequestsCounter(test.name)
|
counter := GetServiceRequestsCounter(test.name)
|
||||||
defer requestsDone.Done()
|
defer requestsDone.Done()
|
||||||
err := counter.StartRequest()
|
err := counter.StartRequest(test.maxRequests)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
atomic.AddUint32(&successes, 1)
|
atomic.AddUint32(&successes, 1)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -98,7 +97,7 @@ func testCounter(t *testing.T, test counterTest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestRequestsCounter(t *testing.T) {
|
func (s) TestRequestsCounter(t *testing.T) {
|
||||||
resetServiceRequestsCounter()
|
defer resetServiceRequestsCounter()
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testCounter(t, test)
|
testCounter(t, test)
|
||||||
|
|
@ -107,7 +106,7 @@ func (s) TestRequestsCounter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestGetServiceRequestsCounter(t *testing.T) {
|
func (s) TestGetServiceRequestsCounter(t *testing.T) {
|
||||||
resetServiceRequestsCounter()
|
defer resetServiceRequestsCounter()
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
counterA := GetServiceRequestsCounter(test.name)
|
counterA := GetServiceRequestsCounter(test.name)
|
||||||
counterB := GetServiceRequestsCounter(test.name)
|
counterB := GetServiceRequestsCounter(test.name)
|
||||||
|
|
@ -118,39 +117,40 @@ func (s) TestGetServiceRequestsCounter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsCounter) {
|
func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsCounter) {
|
||||||
SetMaxRequests(counter.ServiceName, &max)
|
|
||||||
for i := uint32(0); i < n; i++ {
|
for i := uint32(0); i < n; i++ {
|
||||||
if err := counter.StartRequest(); err != nil {
|
if err := counter.StartRequest(max); err != nil {
|
||||||
t.Fatalf("error starting initial request: %v", err)
|
t.Fatalf("error starting initial request: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestSetMaxRequestsIncreased(t *testing.T) {
|
func (s) TestSetMaxRequestsIncreased(t *testing.T) {
|
||||||
resetServiceRequestsCounter()
|
defer resetServiceRequestsCounter()
|
||||||
const serviceName string = "set-max-requests-increased"
|
const serviceName string = "set-max-requests-increased"
|
||||||
var initialMax uint32 = 16
|
var initialMax uint32 = 16
|
||||||
|
|
||||||
counter := GetServiceRequestsCounter(serviceName)
|
counter := GetServiceRequestsCounter(serviceName)
|
||||||
startRequests(t, initialMax, initialMax, counter)
|
startRequests(t, initialMax, initialMax, counter)
|
||||||
if err := counter.StartRequest(); err == nil {
|
if err := counter.StartRequest(initialMax); err == nil {
|
||||||
t.Fatal("unexpected success on start request after max met")
|
t.Fatal("unexpected success on start request after max met")
|
||||||
}
|
}
|
||||||
|
|
||||||
newMax := initialMax + 1
|
newMax := initialMax + 1
|
||||||
SetMaxRequests(counter.ServiceName, &newMax)
|
if err := counter.StartRequest(newMax); err != nil {
|
||||||
if err := counter.StartRequest(); err != nil {
|
|
||||||
t.Fatalf("unexpected error on start request after max increased: %v", err)
|
t.Fatalf("unexpected error on start request after max increased: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s) TestSetMaxRequestsDecreased(t *testing.T) {
|
func (s) TestSetMaxRequestsDecreased(t *testing.T) {
|
||||||
resetServiceRequestsCounter()
|
defer resetServiceRequestsCounter()
|
||||||
const serviceName string = "set-max-requests-decreased"
|
const serviceName string = "set-max-requests-decreased"
|
||||||
var initialMax uint32 = 16
|
var initialMax uint32 = 16
|
||||||
|
|
||||||
counter := GetServiceRequestsCounter(serviceName)
|
counter := GetServiceRequestsCounter(serviceName)
|
||||||
startRequests(t, initialMax-1, initialMax, counter)
|
startRequests(t, initialMax-1, initialMax, counter)
|
||||||
|
|
||||||
newMax := initialMax - 1
|
newMax := initialMax - 1
|
||||||
SetMaxRequests(counter.ServiceName, &newMax)
|
if err := counter.StartRequest(newMax); err == nil {
|
||||||
if err := counter.StartRequest(); err == nil {
|
|
||||||
t.Fatalf("unexpected success on start request after max decreased: %v", err)
|
t.Fatalf("unexpected success on start request after max decreased: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue