*: fix typos (#7178)

This commit is contained in:
hakusai22 2024-05-03 07:54:22 +08:00 committed by GitHub
parent c7c8aa8f53
commit 273fe145d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 70 additions and 70 deletions

View File

@ -17,7 +17,7 @@
*/
// Package endpointsharding implements a load balancing policy that manages
// homogenous child policies each owning a single endpoint.
// homogeneous child policies each owning a single endpoint.
//
// # Experimental
//
@ -48,7 +48,7 @@ type ChildState struct {
State balancer.State
}
// NewBalancer returns a load balancing policy that manages homogenous child
// NewBalancer returns a load balancing policy that manages homogeneous child
// policies each owning a single endpoint.
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
es := &endpointSharding{

View File

@ -43,7 +43,7 @@ import (
// TestNoNonEmptyTargetsReturnsError tests the case where the RLS Server returns
// a response with no non empty targets. This should be treated as an Control
// Plane RPC failure, and thus fail Data Plane RPC's with an error with the
// appropriate information specfying data plane sent a response with no non
// appropriate information specifying data plane sent a response with no non
// empty targets.
func (s) TestNoNonEmptyTargetsReturnsError(t *testing.T) {
// Setup RLS Server to return a response with an empty target string.

View File

@ -693,7 +693,7 @@ type srvWeight struct {
const rrIterations = 100
// checkWeights does rrIterations RPCs and expects the different backends to be
// routed in a ratio as deterimined by the srvWeights passed in. Allows for
// routed in a ratio as determined by the srvWeights passed in. Allows for
// some variance (+/- 2 RPCs per backend).
func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) {
t.Helper()

View File

@ -1751,7 +1751,7 @@ func encodeAuthority(authority string) string {
return false
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
return false
case ':', '[', ']', '@': // Authority related delimeters
case ':', '[', ']', '@': // Authority related delimiters
return false
}
// Everything else must be escaped.

View File

@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.

View File

@ -18,7 +18,7 @@
// Package local implements local transport credentials.
// Local credentials reports the security level based on the type
// of connetion. If the connection is local TCP, NoSecurity will be
// of connection. If the connection is local TCP, NoSecurity will be
// reported, and if the connection is UDS, PrivacyAndIntegrity will be
// reported. If local credentials is not used in local connections
// (local TCP or UDS), it will fail.

View File

@ -1161,7 +1161,7 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
// the set config MaxMetdataBytes is in between len(mdValue) and len("key")
// the set config MaxMetadataBytes is in between len(mdValue) and len("key")
// + len(mdValue), and thus shouldn't log this metadata entry.
md := metadata.MD{
"key": []string{mdValue},

View File

@ -84,7 +84,7 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) {
mr := monitoredresource.Autodetect()
logger.Infof("Detected MonitoredResource:: %+v", mr)
var err error
// Custom labels completly overwrite any labels generated in the OpenCensus
// Custom labels completely overwrite any labels generated in the OpenCensus
// library, including their label that uniquely identifies the process.
// Thus, generate a unique process identifier here to uniquely identify
// process for metrics exporting to function correctly.

View File

@ -526,7 +526,7 @@ func (s) TestBalancerClose(t *testing.T) {
BalancerConfig: mockBalancerConfig{},
}
if err := gsb.UpdateClientConnState(ccs); err != errBalancerClosed {
t.Fatalf("gsb.UpdateCLientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed)
t.Fatalf("gsb.UpdateClientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed)
}
// After the graceful switch load balancer has been closed, any resolver error

View File

@ -314,7 +314,7 @@ func (s) TestBalancerGroup_locality_caching(t *testing.T) {
}
// Sub-balancers are put in cache when they are shut down. If balancer group is
// closed within close timeout, all subconns should still be rmeoved
// closed within close timeout, all subconns should still be removed
// immediately.
func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
@ -388,7 +388,7 @@ func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *test
gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
// Re-add sub-balancer-1, but with a different balancer builder. The
// sub-balancer was still in cache, but cann't be reused. This should cause
// sub-balancer was still in cache, but can't be reused. This should cause
// old sub-balancer's subconns to be shut down immediately, and new
// subconns to be created.
gator.Add(testBalancerIDs[1], 1)

View File

@ -65,7 +65,7 @@ type TruncatingMethodLogger struct {
callID uint64
idWithinCallGen *callIDGenerator
sink Sink // TODO(blog): make this plugable.
sink Sink // TODO(blog): make this pluggable.
}
// NewTruncatingMethodLogger returns a new truncating method logger.
@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
sink: DefaultSink, // TODO(blog): make it plugable.
sink: DefaultSink, // TODO(blog): make it pluggable.
}
}
@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool {
switch key {
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
return true
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
return false
}
return strings.HasPrefix(key, "grpc-")

View File

@ -58,16 +58,16 @@ func (s) TestGetSocketOpt(t *testing.T) {
}
l := &unix.Linger{Onoff: 1, Linger: 5}
recvTimout := &unix.Timeval{Sec: 100}
recvTimeout := &unix.Timeval{Sec: 100}
sendTimeout := &unix.Timeval{Sec: 8888}
raw.Control(func(fd uintptr) {
err := unix.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l)
if err != nil {
t.Fatalf("failed to SetsockoptLinger(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l, err)
}
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout)
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimeout)
if err != nil {
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout, err)
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimeout, err)
}
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout)
if err != nil {
@ -78,8 +78,8 @@ func (s) TestGetSocketOpt(t *testing.T) {
if !reflect.DeepEqual(sktopt.Linger, l) {
t.Fatalf("get socket option linger, want: %v, got %v", l, sktopt.Linger)
}
if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimout) {
t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimout, sktopt.RecvTimeout)
if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimeout) {
t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimeout, sktopt.RecvTimeout)
}
if !reflect.DeepEqual(sktopt.SendTimeout, sendTimeout) {
t.Logf("get socket option send timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", sendTimeout, sktopt.SendTimeout)

View File

@ -197,7 +197,7 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) {
t.Fatal("Scheduled a callback after closing the serializer")
}
// Ensure that the lates callback is executed at this point.
// Ensure that the latest callback is executed at this point.
select {
case <-time.After(defaultTestShortTimeout):
case <-done:

View File

@ -28,7 +28,7 @@ import (
// NetResolver groups the methods on net.Resolver that are used by the DNS
// resolver implementation. This allows the default net.Resolver instance to be
// overidden from tests.
// overridden from tests.
type NetResolver interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)

View File

@ -32,7 +32,7 @@ type Logger interface {
Errorf(format string, args ...any)
}
// ResolverClientConn is a fake implemetation of the resolver.ClientConn
// ResolverClientConn is a fake implementation of the resolver.ClientConn
// interface to be used in tests.
type ResolverClientConn struct {
resolver.ClientConn // Embedding the interface to avoid implementing deprecated methods.

View File

@ -187,7 +187,7 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) {
}
if !reflect.DeepEqual(ht.headerMD, want) {
return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want)
return fmt.Errorf("metadata = %#v; want %#v", ht.headerMD, want)
}
return nil
},

View File

@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool {
}
// updateHeaderSent updates headerSent and returns true
// if it was alreay set. It is valid only on server-side.
// if it was already set. It is valid only on server-side.
func (s *Stream) updateHeaderSent() bool {
return atomic.SwapUint32(&s.headerSent, 1) == 1
}

View File

@ -2160,7 +2160,7 @@ func (s) TestWriteHeaderConnectionError(t *testing.T) {
t.Fatalf("Server has %d connections from the client, want 1", len(server.conns))
}
// Get the server transport for the connecton to the client.
// Get the server transport for the connection to the client.
var serverTransport *http2Server
for k := range server.conns {
serverTransport = k.(*http2Server)

View File

@ -172,7 +172,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
@ -219,7 +219,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
Timeout: (*time.Duration)(m.Timeout),
}
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@ -239,13 +239,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
for i, n := range *m.Name {
path, err := n.generatePath()
if err != nil {
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
if _, ok := paths[path]; ok {
err = errDuplicatedName
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
paths[path] = struct{}{}

View File

@ -191,7 +191,7 @@ func (vi *viewInformation) Equal(vi2 *viewInformation) bool {
}
// distributionDataLatencyCount checks if the view information contains the
// desired distrubtion latency total count that falls in buckets of 5 seconds or
// desired distribution latency total count that falls in buckets of 5 seconds or
// less. This must be called with non nil view information that is aggregated
// with distribution data. Returns a nil error if correct count information
// found, non nil error if correct information not found.
@ -351,7 +351,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) {
wantMetrics := []struct {
metric *view.View
wantVI *viewInformation
wantTags [][]tag.Tag // for non determinstic (i.e. latency) metrics. First dimension represents rows.
wantTags [][]tag.Tag // for non deterministic (i.e. latency) metrics. First dimension represents rows.
}{
{
metric: ClientStartedRPCsView,
@ -1113,7 +1113,7 @@ func (s) TestOpenCensusTags(t *testing.T) {
readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name()))
}
if val != unaryCallMethodName {
readerErrCh.Send(fmt.Errorf("serverMethod receieved: %v, want server method: %v", val, unaryCallMethodName))
readerErrCh.Send(fmt.Errorf("serverMethod received: %v, want server method: %v", val, unaryCallMethodName))
}
} else {
readerErrCh.Send(fmt.Errorf("error while waiting for a tag map: %v", err))

View File

@ -551,7 +551,7 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) {
t.Fatalf("st.Data = %v, want %v", st.Data, b)
}
if st.Length != len(b) {
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
}
} else {
b, err := proto.Marshal(e.requests[e.reqIdx])
@ -566,7 +566,7 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) {
t.Fatalf("st.Data = %v, want %v", st.Data, b)
}
if st.Length != len(b) {
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
}
}
// Below are sanity checks that WireLength and RecvTime are populated.
@ -670,7 +670,7 @@ func checkOutPayload(t *testing.T, d *gotData, e *expectedData) {
t.Fatalf("st.Data = %v, want %v", st.Data, b)
}
if st.Length != len(b) {
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
}
} else {
b, err := proto.Marshal(e.responses[e.respIdx])
@ -685,7 +685,7 @@ func checkOutPayload(t *testing.T, d *gotData, e *expectedData) {
t.Fatalf("st.Data = %v, want %v", st.Data, b)
}
if st.Length != len(b) {
t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b))
t.Fatalf("st.Length = %v, want %v", st.Length, len(b))
}
}
// Below are sanity checks that WireLength and SentTime are populated.

View File

@ -194,7 +194,7 @@ func (s) TestClientSideFederationWithOnlyXDSTPStyleLDS(t *testing.T) {
return []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}
}(),
Routes: func() []*v3routepb.RouteConfiguration {
// RouteConfiguration will has one entry in []VirutalHosts that contains the
// RouteConfiguration will has one entry in []VirtualHosts that contains the
// "fully" escaped service name in []Domains. This is to assert that gRPC
// uses the escaped service name to lookup VirtualHosts. RDS is also with
// old style name.

View File

@ -430,7 +430,7 @@ func (s) TestServingModeChanges(t *testing.T) {
// doesn't get matched, and the Default Filter Chain pointing to RDS B does get
// matched. RDS B is of the wrong route type for server side, so RPC's are
// expected to eventually fail with that information. However, any RPC's on the
// old configration should be allowed to complete due to the transition being
// old configuration should be allowed to complete due to the transition being
// graceful stop.After, it receives an LDS specifying RDS A (which incoming
// RPC's will match to). This configuration should eventually be represented in
// the Server's state, and RPCs should proceed successfully.

View File

@ -584,7 +584,7 @@ func (s) TestAggregatedClusterSuccess_DiamondDependency(t *testing.T) {
// C]; B->[C, D]). Verifies that the load balancing configuration pushed to the
// cluster_resolver LB policy does not contain duplicates, and that the
// discovery mechanism corresponding to cluster C is of higher priority than the
// discovery mechanism for cluser D. Also verifies that the configuration is
// discovery mechanism for cluster D. Also verifies that the configuration is
// pushed only after all child clusters are resolved.
func (s) TestAggregatedClusterSuccess_IgnoreDups(t *testing.T) {
lbCfgCh, _, _, _ := registerWrappedClusterResolverPolicy(t)

View File

@ -180,7 +180,7 @@ type cdsBalancer struct {
// handleSecurityConfig processes the security configuration received from the
// management server, creates appropriate certificate provider plugins, and
// updates the HandhakeInfo which is added as an address attribute in
// updates the HandshakeInfo which is added as an address attribute in
// NewSubConn() calls.
//
// Only executed in the context of a serializer callback.

View File

@ -350,7 +350,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) {
t.Fatalf("NewSubConn got handshake info %+v, want %+v", gotHI, wantHI)
}
if !gotHI.UseFallbackCreds() {
t.Fatal("NewSubConn got hanshake info that does not specify the use of fallback creds")
t.Fatal("NewSubConn got handshake info that does not specify the use of fallback creds")
}
}

View File

@ -136,7 +136,7 @@ func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, err
// double validation is present because Unmarshalling and Validating are
// coupled into one json.Unmarshal operation). We will switch this in
// the future to two separate operations.
return nil, fmt.Errorf("error unmarshaling xDS LB Policy: %v", err)
return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err)
}
return cfg, nil
}

View File

@ -248,7 +248,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) {
t.Fatalf("EmptyCall() failed: %v", err)
}
// Delete the cluster resource from the mangement server.
// Delete the cluster resource from the management server.
resources.Clusters = nil
if err := managementServer.Update(ctx, resources); err != nil {
t.Fatal(err)

View File

@ -629,7 +629,7 @@ func (s) TestEDS_ResourceRemoved(t *testing.T) {
t.Fatalf("EmptyCall() failed: %v", err)
}
// Delete the endpoints resource from the mangement server.
// Delete the endpoints resource from the management server.
resources.Endpoints = nil
if err := managementServer.Update(ctx, resources); err != nil {
t.Fatal(err)

View File

@ -65,7 +65,7 @@ type dnsDiscoveryMechanism struct {
// propagated to the child policy which eventually move the channel to
// transient failure.
//
// The `dnsR` field is unset if we run into erros in this function. Therefore, a
// The `dnsR` field is unset if we run into errors in this function. Therefore, a
// nil check is required wherever we access that field.
func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism {
ret := &dnsDiscoveryMechanism{

View File

@ -256,7 +256,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) {
t.Fatal(err.Error())
}
t.Log("Add p2, it shouldn't cause any udpates.")
t.Log("Add p2, it shouldn't cause any updates.")
if err := pb.UpdateClientConnState(balancer.ClientConnState{
ResolverState: resolver.State{
Addresses: []resolver.Address{
@ -341,7 +341,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) {
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
// Does not change the aggregate state, because round robin does not leave
// TRANIENT_FAILURE if a subconn goes CONNECTING.
// TRANSIENT_FAILURE if a subconn goes CONNECTING.
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
@ -1975,7 +1975,7 @@ func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) {
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
// Does not change the aggregate state, because round robin does not leave
// TRANIENT_FAILURE if a subconn goes CONNECTING.
// TRANSIENT_FAILURE if a subconn goes CONNECTING.
sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {

View File

@ -187,7 +187,7 @@ func (s) TestPickerPickTriggerTFReturnReady(t *testing.T) {
// TestPickerPickTriggerTFWithIdle covers that if the picked SubConn is
// TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick
// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailre)
// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailure)
// will not queue a Connect.
func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) {
ring := newTestRing([]connectivity.State{

View File

@ -70,7 +70,7 @@ import (
// Tests the case where xDS client creation is expected to fail because the
// bootstrap configuration is not specified. The test verifies that xDS resolver
// build fails as well.
func (s) TestResolverBuilder_ClientCreationFails_NoBootstap(t *testing.T) {
func (s) TestResolverBuilder_ClientCreationFails_NoBootstrap(t *testing.T) {
// Build an xDS resolver without specifying bootstrap env vars.
builder := resolver.Get(xdsresolver.Scheme)
if builder == nil {

View File

@ -184,7 +184,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
// server might respond with the requested resource before we send
// out request for the same. If we don't check for `started` here,
// and move the state to `received`, we will end up starting the
// timer when the request gets sent out. And since the mangement
// timer when the request gets sent out. And since the management
// server already sent us the resource, there is a good chance
// that it will not send it again. This would eventually lead to
// the timer firing, even though we have the resource in the

View File

@ -104,7 +104,7 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) {
}
}
if ctx.Err() != nil {
t.Fatalf("Test timed out before state transiton to %q was verified.", watchStateRequested)
t.Fatalf("Test timed out before state transition to %q was verified.", watchStateRequested)
}
// Updating mgmt server with the same lds resource. Blocking on watcher's update

View File

@ -126,7 +126,7 @@ func (cc ChannelCreds) String() string {
}
// We do not expect the Marshal call to fail since we wrote to cc.Config
// after a successful unmarshaling from JSON configuration. Therefore,
// after a successful unmarshalling from JSON configuration. Therefore,
// it is safe to ignore the error here.
b, _ := json.Marshal(cc.Config)
return cc.Type + "-" + string(b)
@ -153,7 +153,7 @@ type ServerConfig struct {
// It is also used to dedup servers with the same server URI and creds.
ServerFeatures []string
// As part of unmarshaling the JSON config into this struct, we ensure that
// As part of unmarshalling the JSON config into this struct, we ensure that
// the credentials config is valid by building an instance of the specified
// credentials and store it here as a grpc.DialOption for easy access when
// dialing this xDS server.

View File

@ -162,7 +162,7 @@ func TestLocalityStats(t *testing.T) {
func TestResetAfterStats(t *testing.T) {
// Push a bunch of drops, call stats and load stats, and leave inProgress to be non-zero.
// Dump the stats. Verify expexted
// Dump the stats. Verify expected
// Push the same set of loads as before
// Now dump and verify the newly expected ones.
var (

View File

@ -25,7 +25,7 @@ import (
)
type clusterNameAndServiceName struct {
clusterName, edsServcieName string
clusterName, edsServiceName string
}
type clusterRequestsCounter struct {
@ -52,7 +52,7 @@ func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterReque
defer src.mu.Unlock()
k := clusterNameAndServiceName{
clusterName: clusterName,
edsServcieName: edsServiceName,
edsServiceName: edsServiceName,
}
c, ok := src.clusters[k]
if !ok {
@ -89,7 +89,7 @@ func ClearCounterForTesting(clusterName, edsServiceName string) {
defer src.mu.Unlock()
k := clusterNameAndServiceName{
clusterName: clusterName,
edsServcieName: edsServiceName,
edsServiceName: edsServiceName,
}
c, ok := src.clusters[k]
if !ok {

View File

@ -107,7 +107,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU
}
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")}
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff)
return fmt.Errorf("received unexpected diff in the cluster resource update: (-want, got):\n%s", diff)
}
return nil
}

View File

@ -119,7 +119,7 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan
}
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")}
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff)
return fmt.Errorf("received unexpected diff in the endpoints resource update: (-want, got):\n%s", diff)
}
return nil
}

View File

@ -167,7 +167,7 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want
cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"),
}
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
return fmt.Errorf("received unepected diff in the listener resource update: (-want, got):\n%s", diff)
return fmt.Errorf("received unexpected diff in the listener resource update: (-want, got):\n%s", diff)
}
return nil
}

View File

@ -120,7 +120,7 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w
}
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")}
if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff)
return fmt.Errorf("received unexpected diff in the route configuration resource update: (-want, got):\n%s", diff)
}
return nil
}

View File

@ -63,7 +63,7 @@ type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesC
// protocol version.
type Transport struct {
// These fields are initialized at creation time and are read-only afterwards.
cc *grpc.ClientConn // ClientConn to the mangement server.
cc *grpc.ClientConn // ClientConn to the management server.
serverURI string // URI of the management server.
onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer.
onErrorHandler func(error) // To report underlying stream errors.

View File

@ -74,7 +74,7 @@ type resourcesWithTypeURL struct {
// TestHandleResponseFromManagementServer covers different scenarios of the
// transport receiving a response from the management server. In all scenarios,
// the trasport is expected to pass the received responses as-is to the data
// the transport is expected to pass the received responses as-is to the data
// model layer for validation and not perform any validation on its own.
func (s) TestHandleResponseFromManagementServer(t *testing.T) {
const (

View File

@ -633,7 +633,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error)
}
hcm := &v3httppb.HttpConnectionManager{}
if err := tc.UnmarshalTo(hcm); err != nil {
return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err)
return nil, fmt.Errorf("network filters {%+v} failed unmarshalling of network filter {%+v}: %v", filters, filter, err)
}
// "Any filters after HttpConnectionManager should be ignored during
// connection processing but still be considered for validity.

View File

@ -72,7 +72,7 @@ func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err
}
apiLis := &v3httppb.HttpConnectionManager{}
if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil {
return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err)
return nil, fmt.Errorf("failed to unmarshal api_listener: %v", err)
}
// "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and
// HttpConnectionManager.original_ip_detection_extensions must be empty. If

View File

@ -1223,7 +1223,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
},
}),
wantName: v3LDSTarget,
wantErr: "failed unmarshaling of network filter",
wantErr: "failed unmarshalling of network filter",
},
{
name: "unexpected transport socket name",

View File

@ -495,7 +495,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {
defer mgmtServer.Stop()
// Generate bootstrap configuration pointing to the above management server
// with certificate provider configuration pointing to fake certifcate
// with certificate provider configuration pointing to fake certificate
// providers.
nodeID := uuid.NewString()
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
@ -585,7 +585,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) {
defer cancel()
// Generate bootstrap configuration pointing to the above management server
// with certificate provider configuration pointing to fake certifcate
// with certificate provider configuration pointing to fake certificate
// providers.
bootstrapContents, err := bootstrap.Contents(bootstrap.Options{
NodeID: nodeID,