mirror of https://github.com/grpc/grpc-go.git
*: fix typos (#7178)
This commit is contained in:
parent
c7c8aa8f53
commit
273fe145d0
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package endpointsharding implements a load balancing policy that manages
|
// Package endpointsharding implements a load balancing policy that manages
|
||||||
// homogenous child policies each owning a single endpoint.
|
// homogeneous child policies each owning a single endpoint.
|
||||||
//
|
//
|
||||||
// # Experimental
|
// # Experimental
|
||||||
//
|
//
|
||||||
|
|
@ -48,7 +48,7 @@ type ChildState struct {
|
||||||
State balancer.State
|
State balancer.State
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancer returns a load balancing policy that manages homogenous child
|
// NewBalancer returns a load balancing policy that manages homogeneous child
|
||||||
// policies each owning a single endpoint.
|
// policies each owning a single endpoint.
|
||||||
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||||
es := &endpointSharding{
|
es := &endpointSharding{
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ import (
|
||||||
// TestNoNonEmptyTargetsReturnsError tests the case where the RLS Server returns
|
// TestNoNonEmptyTargetsReturnsError tests the case where the RLS Server returns
|
||||||
// a response with no non empty targets. This should be treated as an Control
|
// a response with no non empty targets. This should be treated as an Control
|
||||||
// Plane RPC failure, and thus fail Data Plane RPC's with an error with the
|
// Plane RPC failure, and thus fail Data Plane RPC's with an error with the
|
||||||
// appropriate information specfying data plane sent a response with no non
|
// appropriate information specifying data plane sent a response with no non
|
||||||
// empty targets.
|
// empty targets.
|
||||||
func (s) TestNoNonEmptyTargetsReturnsError(t *testing.T) {
|
func (s) TestNoNonEmptyTargetsReturnsError(t *testing.T) {
|
||||||
// Setup RLS Server to return a response with an empty target string.
|
// Setup RLS Server to return a response with an empty target string.
|
||||||
|
|
|
||||||
|
|
@ -693,7 +693,7 @@ type srvWeight struct {
|
||||||
const rrIterations = 100
|
const rrIterations = 100
|
||||||
|
|
||||||
// checkWeights does rrIterations RPCs and expects the different backends to be
|
// checkWeights does rrIterations RPCs and expects the different backends to be
|
||||||
// routed in a ratio as deterimined by the srvWeights passed in. Allows for
|
// routed in a ratio as determined by the srvWeights passed in. Allows for
|
||||||
// some variance (+/- 2 RPCs per backend).
|
// some variance (+/- 2 RPCs per backend).
|
||||||
func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) {
|
func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
|
||||||
|
|
@ -1751,7 +1751,7 @@ func encodeAuthority(authority string) string {
|
||||||
return false
|
return false
|
||||||
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
|
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
|
||||||
return false
|
return false
|
||||||
case ':', '[', ']', '@': // Authority related delimeters
|
case ':', '[', ']', '@': // Authority related delimiters
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Everything else must be escaped.
|
// Everything else must be escaped.
|
||||||
|
|
|
||||||
|
|
@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||||
//
|
//
|
||||||
// This API is experimental.
|
// This API is experimental.
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
// Package local implements local transport credentials.
|
// Package local implements local transport credentials.
|
||||||
// Local credentials reports the security level based on the type
|
// Local credentials reports the security level based on the type
|
||||||
// of connetion. If the connection is local TCP, NoSecurity will be
|
// of connection. If the connection is local TCP, NoSecurity will be
|
||||||
// reported, and if the connection is UDS, PrivacyAndIntegrity will be
|
// reported, and if the connection is UDS, PrivacyAndIntegrity will be
|
||||||
// reported. If local credentials is not used in local connections
|
// reported. If local credentials is not used in local connections
|
||||||
// (local TCP or UDS), it will fail.
|
// (local TCP or UDS), it will fail.
|
||||||
|
|
|
||||||
|
|
@ -1161,7 +1161,7 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// the set config MaxMetdataBytes is in between len(mdValue) and len("key")
|
// the set config MaxMetadataBytes is in between len(mdValue) and len("key")
|
||||||
// + len(mdValue), and thus shouldn't log this metadata entry.
|
// + len(mdValue), and thus shouldn't log this metadata entry.
|
||||||
md := metadata.MD{
|
md := metadata.MD{
|
||||||
"key": []string{mdValue},
|
"key": []string{mdValue},
|
||||||
|
|
|
||||||
|
|
@ -84,7 +84,7 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) {
|
||||||
mr := monitoredresource.Autodetect()
|
mr := monitoredresource.Autodetect()
|
||||||
logger.Infof("Detected MonitoredResource:: %+v", mr)
|
logger.Infof("Detected MonitoredResource:: %+v", mr)
|
||||||
var err error
|
var err error
|
||||||
// Custom labels completly overwrite any labels generated in the OpenCensus
|
// Custom labels completely overwrite any labels generated in the OpenCensus
|
||||||
// library, including their label that uniquely identifies the process.
|
// library, including their label that uniquely identifies the process.
|
||||||
// Thus, generate a unique process identifier here to uniquely identify
|
// Thus, generate a unique process identifier here to uniquely identify
|
||||||
// process for metrics exporting to function correctly.
|
// process for metrics exporting to function correctly.
|
||||||
|
|
|
||||||
|
|
@ -526,7 +526,7 @@ func (s) TestBalancerClose(t *testing.T) {
|
||||||
BalancerConfig: mockBalancerConfig{},
|
BalancerConfig: mockBalancerConfig{},
|
||||||
}
|
}
|
||||||
if err := gsb.UpdateClientConnState(ccs); err != errBalancerClosed {
|
if err := gsb.UpdateClientConnState(ccs); err != errBalancerClosed {
|
||||||
t.Fatalf("gsb.UpdateCLientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed)
|
t.Fatalf("gsb.UpdateClientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// After the graceful switch load balancer has been closed, any resolver error
|
// After the graceful switch load balancer has been closed, any resolver error
|
||||||
|
|
|
||||||
|
|
@ -314,7 +314,7 @@ func (s) TestBalancerGroup_locality_caching(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sub-balancers are put in cache when they are shut down. If balancer group is
|
// Sub-balancers are put in cache when they are shut down. If balancer group is
|
||||||
// closed within close timeout, all subconns should still be rmeoved
|
// closed within close timeout, all subconns should still be removed
|
||||||
// immediately.
|
// immediately.
|
||||||
func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
||||||
_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
|
_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
|
||||||
|
|
@ -388,7 +388,7 @@ func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *test
|
||||||
gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
|
gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
|
||||||
|
|
||||||
// Re-add sub-balancer-1, but with a different balancer builder. The
|
// Re-add sub-balancer-1, but with a different balancer builder. The
|
||||||
// sub-balancer was still in cache, but cann't be reused. This should cause
|
// sub-balancer was still in cache, but can't be reused. This should cause
|
||||||
// old sub-balancer's subconns to be shut down immediately, and new
|
// old sub-balancer's subconns to be shut down immediately, and new
|
||||||
// subconns to be created.
|
// subconns to be created.
|
||||||
gator.Add(testBalancerIDs[1], 1)
|
gator.Add(testBalancerIDs[1], 1)
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ type TruncatingMethodLogger struct {
|
||||||
callID uint64
|
callID uint64
|
||||||
idWithinCallGen *callIDGenerator
|
idWithinCallGen *callIDGenerator
|
||||||
|
|
||||||
sink Sink // TODO(blog): make this plugable.
|
sink Sink // TODO(blog): make this pluggable.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||||
|
|
@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||||
callID: idGen.next(),
|
callID: idGen.next(),
|
||||||
idWithinCallGen: &callIDGenerator{},
|
idWithinCallGen: &callIDGenerator{},
|
||||||
|
|
||||||
sink: DefaultSink, // TODO(blog): make it plugable.
|
sink: DefaultSink, // TODO(blog): make it pluggable.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool {
|
||||||
switch key {
|
switch key {
|
||||||
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
||||||
return true
|
return true
|
||||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return strings.HasPrefix(key, "grpc-")
|
return strings.HasPrefix(key, "grpc-")
|
||||||
|
|
|
||||||
|
|
@ -58,16 +58,16 @@ func (s) TestGetSocketOpt(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
l := &unix.Linger{Onoff: 1, Linger: 5}
|
l := &unix.Linger{Onoff: 1, Linger: 5}
|
||||||
recvTimout := &unix.Timeval{Sec: 100}
|
recvTimeout := &unix.Timeval{Sec: 100}
|
||||||
sendTimeout := &unix.Timeval{Sec: 8888}
|
sendTimeout := &unix.Timeval{Sec: 8888}
|
||||||
raw.Control(func(fd uintptr) {
|
raw.Control(func(fd uintptr) {
|
||||||
err := unix.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l)
|
err := unix.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to SetsockoptLinger(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l, err)
|
t.Fatalf("failed to SetsockoptLinger(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l, err)
|
||||||
}
|
}
|
||||||
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout)
|
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout, err)
|
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimeout, err)
|
||||||
}
|
}
|
||||||
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout)
|
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -78,8 +78,8 @@ func (s) TestGetSocketOpt(t *testing.T) {
|
||||||
if !reflect.DeepEqual(sktopt.Linger, l) {
|
if !reflect.DeepEqual(sktopt.Linger, l) {
|
||||||
t.Fatalf("get socket option linger, want: %v, got %v", l, sktopt.Linger)
|
t.Fatalf("get socket option linger, want: %v, got %v", l, sktopt.Linger)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimout) {
|
if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimeout) {
|
||||||
t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimout, sktopt.RecvTimeout)
|
t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimeout, sktopt.RecvTimeout)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(sktopt.SendTimeout, sendTimeout) {
|
if !reflect.DeepEqual(sktopt.SendTimeout, sendTimeout) {
|
||||||
t.Logf("get socket option send timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", sendTimeout, sktopt.SendTimeout)
|
t.Logf("get socket option send timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", sendTimeout, sktopt.SendTimeout)
|
||||||
|
|
|
||||||
|
|
@ -197,7 +197,7 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) {
|
||||||
t.Fatal("Scheduled a callback after closing the serializer")
|
t.Fatal("Scheduled a callback after closing the serializer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the lates callback is executed at this point.
|
// Ensure that the latest callback is executed at this point.
|
||||||
select {
|
select {
|
||||||
case <-time.After(defaultTestShortTimeout):
|
case <-time.After(defaultTestShortTimeout):
|
||||||
case <-done:
|
case <-done:
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ import (
|
||||||
|
|
||||||
// NetResolver groups the methods on net.Resolver that are used by the DNS
|
// NetResolver groups the methods on net.Resolver that are used by the DNS
|
||||||
// resolver implementation. This allows the default net.Resolver instance to be
|
// resolver implementation. This allows the default net.Resolver instance to be
|
||||||
// overidden from tests.
|
// overridden from tests.
|
||||||
type NetResolver interface {
|
type NetResolver interface {
|
||||||
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||||
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ type Logger interface {
|
||||||
Errorf(format string, args ...any)
|
Errorf(format string, args ...any)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolverClientConn is a fake implemetation of the resolver.ClientConn
|
// ResolverClientConn is a fake implementation of the resolver.ClientConn
|
||||||
// interface to be used in tests.
|
// interface to be used in tests.
|
||||||
type ResolverClientConn struct {
|
type ResolverClientConn struct {
|
||||||
resolver.ClientConn // Embedding the interface to avoid implementing deprecated methods.
|
resolver.ClientConn // Embedding the interface to avoid implementing deprecated methods.
|
||||||
|
|
|
||||||
|
|
@ -187,7 +187,7 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(ht.headerMD, want) {
|
if !reflect.DeepEqual(ht.headerMD, want) {
|
||||||
return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want)
|
return fmt.Errorf("metadata = %#v; want %#v", ht.headerMD, want)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateHeaderSent updates headerSent and returns true
|
// updateHeaderSent updates headerSent and returns true
|
||||||
// if it was alreay set. It is valid only on server-side.
|
// if it was already set. It is valid only on server-side.
|
||||||
func (s *Stream) updateHeaderSent() bool {
|
func (s *Stream) updateHeaderSent() bool {
|
||||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2160,7 +2160,7 @@ func (s) TestWriteHeaderConnectionError(t *testing.T) {
|
||||||
t.Fatalf("Server has %d connections from the client, want 1", len(server.conns))
|
t.Fatalf("Server has %d connections from the client, want 1", len(server.conns))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the server transport for the connecton to the client.
|
// Get the server transport for the connection to the client.
|
||||||
var serverTransport *http2Server
|
var serverTransport *http2Server
|
||||||
for k := range server.conns {
|
for k := range server.conns {
|
||||||
serverTransport = k.(*http2Server)
|
serverTransport = k.(*http2Server)
|
||||||
|
|
|
||||||
|
|
@ -172,7 +172,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||||
var rsc jsonSC
|
var rsc jsonSC
|
||||||
err := json.Unmarshal([]byte(js), &rsc)
|
err := json.Unmarshal([]byte(js), &rsc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
|
||||||
return &serviceconfig.ParseResult{Err: err}
|
return &serviceconfig.ParseResult{Err: err}
|
||||||
}
|
}
|
||||||
sc := ServiceConfig{
|
sc := ServiceConfig{
|
||||||
|
|
@ -219,7 +219,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||||
Timeout: (*time.Duration)(m.Timeout),
|
Timeout: (*time.Duration)(m.Timeout),
|
||||||
}
|
}
|
||||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
|
||||||
return &serviceconfig.ParseResult{Err: err}
|
return &serviceconfig.ParseResult{Err: err}
|
||||||
}
|
}
|
||||||
if m.MaxRequestMessageBytes != nil {
|
if m.MaxRequestMessageBytes != nil {
|
||||||
|
|
@ -239,13 +239,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||||
for i, n := range *m.Name {
|
for i, n := range *m.Name {
|
||||||
path, err := n.generatePath()
|
path, err := n.generatePath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||||
return &serviceconfig.ParseResult{Err: err}
|
return &serviceconfig.ParseResult{Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := paths[path]; ok {
|
if _, ok := paths[path]; ok {
|
||||||
err = errDuplicatedName
|
err = errDuplicatedName
|
||||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||||
return &serviceconfig.ParseResult{Err: err}
|
return &serviceconfig.ParseResult{Err: err}
|
||||||
}
|
}
|
||||||
paths[path] = struct{}{}
|
paths[path] = struct{}{}
|
||||||
|
|
|
||||||
|
|
@ -191,7 +191,7 @@ func (vi *viewInformation) Equal(vi2 *viewInformation) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// distributionDataLatencyCount checks if the view information contains the
|
// distributionDataLatencyCount checks if the view information contains the
|
||||||
// desired distrubtion latency total count that falls in buckets of 5 seconds or
|
// desired distribution latency total count that falls in buckets of 5 seconds or
|
||||||
// less. This must be called with non nil view information that is aggregated
|
// less. This must be called with non nil view information that is aggregated
|
||||||
// with distribution data. Returns a nil error if correct count information
|
// with distribution data. Returns a nil error if correct count information
|
||||||
// found, non nil error if correct information not found.
|
// found, non nil error if correct information not found.
|
||||||
|
|
@ -351,7 +351,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) {
|
||||||
wantMetrics := []struct {
|
wantMetrics := []struct {
|
||||||
metric *view.View
|
metric *view.View
|
||||||
wantVI *viewInformation
|
wantVI *viewInformation
|
||||||
wantTags [][]tag.Tag // for non determinstic (i.e. latency) metrics. First dimension represents rows.
|
wantTags [][]tag.Tag // for non deterministic (i.e. latency) metrics. First dimension represents rows.
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
metric: ClientStartedRPCsView,
|
metric: ClientStartedRPCsView,
|
||||||
|
|
@ -1113,7 +1113,7 @@ func (s) TestOpenCensusTags(t *testing.T) {
|
||||||
readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name()))
|
readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name()))
|
||||||
}
|
}
|
||||||
if val != unaryCallMethodName {
|
if val != unaryCallMethodName {
|
||||||
readerErrCh.Send(fmt.Errorf("serverMethod receieved: %v, want server method: %v", val, unaryCallMethodName))
|
readerErrCh.Send(fmt.Errorf("serverMethod received: %v, want server method: %v", val, unaryCallMethodName))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
readerErrCh.Send(fmt.Errorf("error while waiting for a tag map: %v", err))
|
readerErrCh.Send(fmt.Errorf("error while waiting for a tag map: %v", err))
|
||||||
|
|
|
||||||
|
|
@ -551,7 +551,7 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) {
|
||||||
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
||||||
}
|
}
|
||||||
if st.Length != len(b) {
|
if st.Length != len(b) {
|
||||||
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
|
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
b, err := proto.Marshal(e.requests[e.reqIdx])
|
b, err := proto.Marshal(e.requests[e.reqIdx])
|
||||||
|
|
@ -566,7 +566,7 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) {
|
||||||
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
||||||
}
|
}
|
||||||
if st.Length != len(b) {
|
if st.Length != len(b) {
|
||||||
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
|
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Below are sanity checks that WireLength and RecvTime are populated.
|
// Below are sanity checks that WireLength and RecvTime are populated.
|
||||||
|
|
@ -670,7 +670,7 @@ func checkOutPayload(t *testing.T, d *gotData, e *expectedData) {
|
||||||
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
||||||
}
|
}
|
||||||
if st.Length != len(b) {
|
if st.Length != len(b) {
|
||||||
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
|
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
b, err := proto.Marshal(e.responses[e.respIdx])
|
b, err := proto.Marshal(e.responses[e.respIdx])
|
||||||
|
|
@ -685,7 +685,7 @@ func checkOutPayload(t *testing.T, d *gotData, e *expectedData) {
|
||||||
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
t.Fatalf("st.Data = %v, want %v", st.Data, b)
|
||||||
}
|
}
|
||||||
if st.Length != len(b) {
|
if st.Length != len(b) {
|
||||||
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
|
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Below are sanity checks that WireLength and SentTime are populated.
|
// Below are sanity checks that WireLength and SentTime are populated.
|
||||||
|
|
|
||||||
|
|
@ -194,7 +194,7 @@ func (s) TestClientSideFederationWithOnlyXDSTPStyleLDS(t *testing.T) {
|
||||||
return []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}
|
return []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}
|
||||||
}(),
|
}(),
|
||||||
Routes: func() []*v3routepb.RouteConfiguration {
|
Routes: func() []*v3routepb.RouteConfiguration {
|
||||||
// RouteConfiguration will has one entry in []VirutalHosts that contains the
|
// RouteConfiguration will has one entry in []VirtualHosts that contains the
|
||||||
// "fully" escaped service name in []Domains. This is to assert that gRPC
|
// "fully" escaped service name in []Domains. This is to assert that gRPC
|
||||||
// uses the escaped service name to lookup VirtualHosts. RDS is also with
|
// uses the escaped service name to lookup VirtualHosts. RDS is also with
|
||||||
// old style name.
|
// old style name.
|
||||||
|
|
|
||||||
|
|
@ -430,7 +430,7 @@ func (s) TestServingModeChanges(t *testing.T) {
|
||||||
// doesn't get matched, and the Default Filter Chain pointing to RDS B does get
|
// doesn't get matched, and the Default Filter Chain pointing to RDS B does get
|
||||||
// matched. RDS B is of the wrong route type for server side, so RPC's are
|
// matched. RDS B is of the wrong route type for server side, so RPC's are
|
||||||
// expected to eventually fail with that information. However, any RPC's on the
|
// expected to eventually fail with that information. However, any RPC's on the
|
||||||
// old configration should be allowed to complete due to the transition being
|
// old configuration should be allowed to complete due to the transition being
|
||||||
// graceful stop.After, it receives an LDS specifying RDS A (which incoming
|
// graceful stop.After, it receives an LDS specifying RDS A (which incoming
|
||||||
// RPC's will match to). This configuration should eventually be represented in
|
// RPC's will match to). This configuration should eventually be represented in
|
||||||
// the Server's state, and RPCs should proceed successfully.
|
// the Server's state, and RPCs should proceed successfully.
|
||||||
|
|
|
||||||
|
|
@ -584,7 +584,7 @@ func (s) TestAggregatedClusterSuccess_DiamondDependency(t *testing.T) {
|
||||||
// C]; B->[C, D]). Verifies that the load balancing configuration pushed to the
|
// C]; B->[C, D]). Verifies that the load balancing configuration pushed to the
|
||||||
// cluster_resolver LB policy does not contain duplicates, and that the
|
// cluster_resolver LB policy does not contain duplicates, and that the
|
||||||
// discovery mechanism corresponding to cluster C is of higher priority than the
|
// discovery mechanism corresponding to cluster C is of higher priority than the
|
||||||
// discovery mechanism for cluser D. Also verifies that the configuration is
|
// discovery mechanism for cluster D. Also verifies that the configuration is
|
||||||
// pushed only after all child clusters are resolved.
|
// pushed only after all child clusters are resolved.
|
||||||
func (s) TestAggregatedClusterSuccess_IgnoreDups(t *testing.T) {
|
func (s) TestAggregatedClusterSuccess_IgnoreDups(t *testing.T) {
|
||||||
lbCfgCh, _, _, _ := registerWrappedClusterResolverPolicy(t)
|
lbCfgCh, _, _, _ := registerWrappedClusterResolverPolicy(t)
|
||||||
|
|
|
||||||
|
|
@ -180,7 +180,7 @@ type cdsBalancer struct {
|
||||||
|
|
||||||
// handleSecurityConfig processes the security configuration received from the
|
// handleSecurityConfig processes the security configuration received from the
|
||||||
// management server, creates appropriate certificate provider plugins, and
|
// management server, creates appropriate certificate provider plugins, and
|
||||||
// updates the HandhakeInfo which is added as an address attribute in
|
// updates the HandshakeInfo which is added as an address attribute in
|
||||||
// NewSubConn() calls.
|
// NewSubConn() calls.
|
||||||
//
|
//
|
||||||
// Only executed in the context of a serializer callback.
|
// Only executed in the context of a serializer callback.
|
||||||
|
|
|
||||||
|
|
@ -350,7 +350,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) {
|
||||||
t.Fatalf("NewSubConn got handshake info %+v, want %+v", gotHI, wantHI)
|
t.Fatalf("NewSubConn got handshake info %+v, want %+v", gotHI, wantHI)
|
||||||
}
|
}
|
||||||
if !gotHI.UseFallbackCreds() {
|
if !gotHI.UseFallbackCreds() {
|
||||||
t.Fatal("NewSubConn got hanshake info that does not specify the use of fallback creds")
|
t.Fatal("NewSubConn got handshake info that does not specify the use of fallback creds")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -136,7 +136,7 @@ func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, err
|
||||||
// double validation is present because Unmarshalling and Validating are
|
// double validation is present because Unmarshalling and Validating are
|
||||||
// coupled into one json.Unmarshal operation). We will switch this in
|
// coupled into one json.Unmarshal operation). We will switch this in
|
||||||
// the future to two separate operations.
|
// the future to two separate operations.
|
||||||
return nil, fmt.Errorf("error unmarshaling xDS LB Policy: %v", err)
|
return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err)
|
||||||
}
|
}
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -248,7 +248,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) {
|
||||||
t.Fatalf("EmptyCall() failed: %v", err)
|
t.Fatalf("EmptyCall() failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the cluster resource from the mangement server.
|
// Delete the cluster resource from the management server.
|
||||||
resources.Clusters = nil
|
resources.Clusters = nil
|
||||||
if err := managementServer.Update(ctx, resources); err != nil {
|
if err := managementServer.Update(ctx, resources); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
||||||
|
|
@ -629,7 +629,7 @@ func (s) TestEDS_ResourceRemoved(t *testing.T) {
|
||||||
t.Fatalf("EmptyCall() failed: %v", err)
|
t.Fatalf("EmptyCall() failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the endpoints resource from the mangement server.
|
// Delete the endpoints resource from the management server.
|
||||||
resources.Endpoints = nil
|
resources.Endpoints = nil
|
||||||
if err := managementServer.Update(ctx, resources); err != nil {
|
if err := managementServer.Update(ctx, resources); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ type dnsDiscoveryMechanism struct {
|
||||||
// propagated to the child policy which eventually move the channel to
|
// propagated to the child policy which eventually move the channel to
|
||||||
// transient failure.
|
// transient failure.
|
||||||
//
|
//
|
||||||
// The `dnsR` field is unset if we run into erros in this function. Therefore, a
|
// The `dnsR` field is unset if we run into errors in this function. Therefore, a
|
||||||
// nil check is required wherever we access that field.
|
// nil check is required wherever we access that field.
|
||||||
func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism {
|
func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism {
|
||||||
ret := &dnsDiscoveryMechanism{
|
ret := &dnsDiscoveryMechanism{
|
||||||
|
|
|
||||||
|
|
@ -256,7 +256,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("Add p2, it shouldn't cause any udpates.")
|
t.Log("Add p2, it shouldn't cause any updates.")
|
||||||
if err := pb.UpdateClientConnState(balancer.ClientConnState{
|
if err := pb.UpdateClientConnState(balancer.ClientConnState{
|
||||||
ResolverState: resolver.State{
|
ResolverState: resolver.State{
|
||||||
Addresses: []resolver.Address{
|
Addresses: []resolver.Address{
|
||||||
|
|
@ -341,7 +341,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) {
|
||||||
|
|
||||||
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
||||||
// Does not change the aggregate state, because round robin does not leave
|
// Does not change the aggregate state, because round robin does not leave
|
||||||
// TRANIENT_FAILURE if a subconn goes CONNECTING.
|
// TRANSIENT_FAILURE if a subconn goes CONNECTING.
|
||||||
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||||
|
|
||||||
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
|
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
|
||||||
|
|
@ -1975,7 +1975,7 @@ func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) {
|
||||||
|
|
||||||
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
||||||
// Does not change the aggregate state, because round robin does not leave
|
// Does not change the aggregate state, because round robin does not leave
|
||||||
// TRANIENT_FAILURE if a subconn goes CONNECTING.
|
// TRANSIENT_FAILURE if a subconn goes CONNECTING.
|
||||||
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||||
|
|
||||||
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
|
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -187,7 +187,7 @@ func (s) TestPickerPickTriggerTFReturnReady(t *testing.T) {
|
||||||
|
|
||||||
// TestPickerPickTriggerTFWithIdle covers that if the picked SubConn is
|
// TestPickerPickTriggerTFWithIdle covers that if the picked SubConn is
|
||||||
// TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick
|
// TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick
|
||||||
// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailre)
|
// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailure)
|
||||||
// will not queue a Connect.
|
// will not queue a Connect.
|
||||||
func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) {
|
func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) {
|
||||||
ring := newTestRing([]connectivity.State{
|
ring := newTestRing([]connectivity.State{
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ import (
|
||||||
// Tests the case where xDS client creation is expected to fail because the
|
// Tests the case where xDS client creation is expected to fail because the
|
||||||
// bootstrap configuration is not specified. The test verifies that xDS resolver
|
// bootstrap configuration is not specified. The test verifies that xDS resolver
|
||||||
// build fails as well.
|
// build fails as well.
|
||||||
func (s) TestResolverBuilder_ClientCreationFails_NoBootstap(t *testing.T) {
|
func (s) TestResolverBuilder_ClientCreationFails_NoBootstrap(t *testing.T) {
|
||||||
// Build an xDS resolver without specifying bootstrap env vars.
|
// Build an xDS resolver without specifying bootstrap env vars.
|
||||||
builder := resolver.Get(xdsresolver.Scheme)
|
builder := resolver.Get(xdsresolver.Scheme)
|
||||||
if builder == nil {
|
if builder == nil {
|
||||||
|
|
|
||||||
|
|
@ -184,7 +184,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
|
||||||
// server might respond with the requested resource before we send
|
// server might respond with the requested resource before we send
|
||||||
// out request for the same. If we don't check for `started` here,
|
// out request for the same. If we don't check for `started` here,
|
||||||
// and move the state to `received`, we will end up starting the
|
// and move the state to `received`, we will end up starting the
|
||||||
// timer when the request gets sent out. And since the mangement
|
// timer when the request gets sent out. And since the management
|
||||||
// server already sent us the resource, there is a good chance
|
// server already sent us the resource, there is a good chance
|
||||||
// that it will not send it again. This would eventually lead to
|
// that it will not send it again. This would eventually lead to
|
||||||
// the timer firing, even though we have the resource in the
|
// the timer firing, even though we have the resource in the
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
t.Fatalf("Test timed out before state transiton to %q was verified.", watchStateRequested)
|
t.Fatalf("Test timed out before state transition to %q was verified.", watchStateRequested)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Updating mgmt server with the same lds resource. Blocking on watcher's update
|
// Updating mgmt server with the same lds resource. Blocking on watcher's update
|
||||||
|
|
|
||||||
|
|
@ -126,7 +126,7 @@ func (cc ChannelCreds) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We do not expect the Marshal call to fail since we wrote to cc.Config
|
// We do not expect the Marshal call to fail since we wrote to cc.Config
|
||||||
// after a successful unmarshaling from JSON configuration. Therefore,
|
// after a successful unmarshalling from JSON configuration. Therefore,
|
||||||
// it is safe to ignore the error here.
|
// it is safe to ignore the error here.
|
||||||
b, _ := json.Marshal(cc.Config)
|
b, _ := json.Marshal(cc.Config)
|
||||||
return cc.Type + "-" + string(b)
|
return cc.Type + "-" + string(b)
|
||||||
|
|
@ -153,7 +153,7 @@ type ServerConfig struct {
|
||||||
// It is also used to dedup servers with the same server URI and creds.
|
// It is also used to dedup servers with the same server URI and creds.
|
||||||
ServerFeatures []string
|
ServerFeatures []string
|
||||||
|
|
||||||
// As part of unmarshaling the JSON config into this struct, we ensure that
|
// As part of unmarshalling the JSON config into this struct, we ensure that
|
||||||
// the credentials config is valid by building an instance of the specified
|
// the credentials config is valid by building an instance of the specified
|
||||||
// credentials and store it here as a grpc.DialOption for easy access when
|
// credentials and store it here as a grpc.DialOption for easy access when
|
||||||
// dialing this xDS server.
|
// dialing this xDS server.
|
||||||
|
|
|
||||||
|
|
@ -162,7 +162,7 @@ func TestLocalityStats(t *testing.T) {
|
||||||
|
|
||||||
func TestResetAfterStats(t *testing.T) {
|
func TestResetAfterStats(t *testing.T) {
|
||||||
// Push a bunch of drops, call stats and load stats, and leave inProgress to be non-zero.
|
// Push a bunch of drops, call stats and load stats, and leave inProgress to be non-zero.
|
||||||
// Dump the stats. Verify expexted
|
// Dump the stats. Verify expected
|
||||||
// Push the same set of loads as before
|
// Push the same set of loads as before
|
||||||
// Now dump and verify the newly expected ones.
|
// Now dump and verify the newly expected ones.
|
||||||
var (
|
var (
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type clusterNameAndServiceName struct {
|
type clusterNameAndServiceName struct {
|
||||||
clusterName, edsServcieName string
|
clusterName, edsServiceName string
|
||||||
}
|
}
|
||||||
|
|
||||||
type clusterRequestsCounter struct {
|
type clusterRequestsCounter struct {
|
||||||
|
|
@ -52,7 +52,7 @@ func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterReque
|
||||||
defer src.mu.Unlock()
|
defer src.mu.Unlock()
|
||||||
k := clusterNameAndServiceName{
|
k := clusterNameAndServiceName{
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
edsServcieName: edsServiceName,
|
edsServiceName: edsServiceName,
|
||||||
}
|
}
|
||||||
c, ok := src.clusters[k]
|
c, ok := src.clusters[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
@ -89,7 +89,7 @@ func ClearCounterForTesting(clusterName, edsServiceName string) {
|
||||||
defer src.mu.Unlock()
|
defer src.mu.Unlock()
|
||||||
k := clusterNameAndServiceName{
|
k := clusterNameAndServiceName{
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
edsServcieName: edsServiceName,
|
edsServiceName: edsServiceName,
|
||||||
}
|
}
|
||||||
c, ok := src.clusters[k]
|
c, ok := src.clusters[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
||||||
|
|
@ -107,7 +107,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU
|
||||||
}
|
}
|
||||||
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")}
|
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")}
|
||||||
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
||||||
return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff)
|
return fmt.Errorf("received unexpected diff in the cluster resource update: (-want, got):\n%s", diff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan
|
||||||
}
|
}
|
||||||
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")}
|
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")}
|
||||||
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
||||||
return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff)
|
return fmt.Errorf("received unexpected diff in the endpoints resource update: (-want, got):\n%s", diff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -167,7 +167,7 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want
|
||||||
cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"),
|
cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"),
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
||||||
return fmt.Errorf("received unepected diff in the listener resource update: (-want, got):\n%s", diff)
|
return fmt.Errorf("received unexpected diff in the listener resource update: (-want, got):\n%s", diff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -120,7 +120,7 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w
|
||||||
}
|
}
|
||||||
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")}
|
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")}
|
||||||
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
|
||||||
return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff)
|
return fmt.Errorf("received unexpected diff in the route configuration resource update: (-want, got):\n%s", diff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesC
|
||||||
// protocol version.
|
// protocol version.
|
||||||
type Transport struct {
|
type Transport struct {
|
||||||
// These fields are initialized at creation time and are read-only afterwards.
|
// These fields are initialized at creation time and are read-only afterwards.
|
||||||
cc *grpc.ClientConn // ClientConn to the mangement server.
|
cc *grpc.ClientConn // ClientConn to the management server.
|
||||||
serverURI string // URI of the management server.
|
serverURI string // URI of the management server.
|
||||||
onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer.
|
onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer.
|
||||||
onErrorHandler func(error) // To report underlying stream errors.
|
onErrorHandler func(error) // To report underlying stream errors.
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ type resourcesWithTypeURL struct {
|
||||||
|
|
||||||
// TestHandleResponseFromManagementServer covers different scenarios of the
|
// TestHandleResponseFromManagementServer covers different scenarios of the
|
||||||
// transport receiving a response from the management server. In all scenarios,
|
// transport receiving a response from the management server. In all scenarios,
|
||||||
// the trasport is expected to pass the received responses as-is to the data
|
// the transport is expected to pass the received responses as-is to the data
|
||||||
// model layer for validation and not perform any validation on its own.
|
// model layer for validation and not perform any validation on its own.
|
||||||
func (s) TestHandleResponseFromManagementServer(t *testing.T) {
|
func (s) TestHandleResponseFromManagementServer(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
|
|
|
||||||
|
|
@ -633,7 +633,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error)
|
||||||
}
|
}
|
||||||
hcm := &v3httppb.HttpConnectionManager{}
|
hcm := &v3httppb.HttpConnectionManager{}
|
||||||
if err := tc.UnmarshalTo(hcm); err != nil {
|
if err := tc.UnmarshalTo(hcm); err != nil {
|
||||||
return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err)
|
return nil, fmt.Errorf("network filters {%+v} failed unmarshalling of network filter {%+v}: %v", filters, filter, err)
|
||||||
}
|
}
|
||||||
// "Any filters after HttpConnectionManager should be ignored during
|
// "Any filters after HttpConnectionManager should be ignored during
|
||||||
// connection processing but still be considered for validity.
|
// connection processing but still be considered for validity.
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err
|
||||||
}
|
}
|
||||||
apiLis := &v3httppb.HttpConnectionManager{}
|
apiLis := &v3httppb.HttpConnectionManager{}
|
||||||
if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil {
|
if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err)
|
return nil, fmt.Errorf("failed to unmarshal api_listener: %v", err)
|
||||||
}
|
}
|
||||||
// "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and
|
// "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and
|
||||||
// HttpConnectionManager.original_ip_detection_extensions must be empty. If
|
// HttpConnectionManager.original_ip_detection_extensions must be empty. If
|
||||||
|
|
|
||||||
|
|
@ -1223,7 +1223,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
wantName: v3LDSTarget,
|
wantName: v3LDSTarget,
|
||||||
wantErr: "failed unmarshaling of network filter",
|
wantErr: "failed unmarshalling of network filter",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unexpected transport socket name",
|
name: "unexpected transport socket name",
|
||||||
|
|
|
||||||
|
|
@ -495,7 +495,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {
|
||||||
defer mgmtServer.Stop()
|
defer mgmtServer.Stop()
|
||||||
|
|
||||||
// Generate bootstrap configuration pointing to the above management server
|
// Generate bootstrap configuration pointing to the above management server
|
||||||
// with certificate provider configuration pointing to fake certifcate
|
// with certificate provider configuration pointing to fake certificate
|
||||||
// providers.
|
// providers.
|
||||||
nodeID := uuid.NewString()
|
nodeID := uuid.NewString()
|
||||||
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
|
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
|
||||||
|
|
@ -585,7 +585,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Generate bootstrap configuration pointing to the above management server
|
// Generate bootstrap configuration pointing to the above management server
|
||||||
// with certificate provider configuration pointing to fake certifcate
|
// with certificate provider configuration pointing to fake certificate
|
||||||
// providers.
|
// providers.
|
||||||
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
|
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue