*: fix more typos (#7619)

This commit is contained in:
Nathan Baulch 2024-09-17 03:58:27 +10:00 committed by GitHub
parent 04e78b0faf
commit 3b626a7b52
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 103 additions and 103 deletions

View File

@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
@ -39,7 +39,7 @@ How to get your contributions merged smoothly and quickly.
proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made
and **why** it was made. Link to a github issue if it exists.
and **why** it was made. Link to a GitHub issue if it exists.
- If you want to fix formatting or style, consider whether your changes are an
obvious improvement or might be considered a personal preference. If a style

View File

@ -83,7 +83,7 @@ performing compression and decompression.
A `Compressor` contains code to compress and decompress by wrapping `io.Writer`s
and `io.Reader`s, respectively. (The form of `Compress` and `Decompress` were
chosen to most closely match Go's standard package
[implementations](https://golang.org/pkg/compress/) of compressors. Like
[implementations](https://golang.org/pkg/compress/) of compressors). Like
`Codec`s, `Compressor`s are registered by name into a global registry maintained
in the `encoding` package.

View File

@ -1,6 +1,6 @@
# Authentication
As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it.
As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between a client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it.
# Enabling TLS on a gRPC client

View File

@ -130,7 +130,7 @@ type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
// If it's in the list, the connection will be kept.
// If it's not in the list, the connection will gracefully closed, and
// If it's not in the list, the connection will gracefully close, and
// a new connection will be created.
//
// This will trigger a state transition for the SubConn.

View File

@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
// If resolver state contains no addresses, return an error so ClientConn
// will trigger re-resolve. Also records this as an resolver error, so when
// will trigger re-resolve. Also records this as a resolver error, so when
// the overall state turns transient failure, the error message will have
// the zero address information.
if len(s.ResolverState.Addresses) == 0 {

View File

@ -133,7 +133,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
// Return first error found, and always commit full processing of
// updating children. If desired to process more specific errors
// across all endpoints, caller should make these specific
// validations, this is a current limitation for simplicities sake.
// validations, this is a current limitation for simplicity sake.
ret = err
}
}

View File

@ -197,7 +197,7 @@ type lbBalancer struct {
// manualResolver is used in the remote LB ClientConn inside grpclb. When
// resolved address updates are received by grpclb, filtered updates will be
// send to remote LB ClientConn through this resolver.
// sent to remote LB ClientConn through this resolver.
manualResolver *manual.Resolver
// The ClientConn to talk to the remote balancer.
ccRemoteLB *remoteBalancerCCWrapper

View File

@ -189,7 +189,7 @@ func (b builder) Equal(a builder) bool {
// Protobuf serialization maintains the order of repeated fields. Matchers
// are specified as a repeated field inside the KeyBuilder proto. If the
// order changes, it means that the order in the protobuf changed. We report
// this case as not being equal even though the builders could possible be
// this case as not being equal even though the builders could possibly be
// functionally equal.
for i, bMatcher := range b.headerKeys {
aMatcher := a.headerKeys[i]

View File

@ -212,7 +212,7 @@ func (s) TestBalancer_OneAddress(t *testing.T) {
// balancer startup case which triggers the first picker and scheduler update
// before any load reports are received.
//
// Note that this test and others, metrics emission asssertions are a snapshot
// Note that this test and others, metrics emission assertions are a snapshot
// of the most recently emitted metrics. This is due to the nondeterminism of
// scheduler updates with respect to test bodies, so the assertions made are
// from the most recently synced state of the system (picker/scheduler) from the

View File

@ -276,7 +276,7 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() {
}
}
// DoUnaryCall performs an unary RPC with given stub and request and response sizes.
// DoUnaryCall performs a unary RPC with given stub and request and response sizes.
func DoUnaryCall(tc testgrpc.BenchmarkServiceClient, reqSize, respSize int) error {
pl := NewPayload(testpb.PayloadType_COMPRESSABLE, reqSize)
req := &testpb.SimpleRequest{

View File

@ -21,7 +21,7 @@ To format the benchmark result:
go run benchmark/benchresult/main.go resultfile
To see the performance change based on a old result:
To see the performance change based on an old result:
go run benchmark/benchresult/main.go resultfile_old resultfile

View File

@ -1292,7 +1292,7 @@ func (ac *addrConn) resetTransportAndUnlock() {
ac.mu.Unlock()
}
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {

View File

@ -117,7 +117,7 @@ func (ts *testServer) start() error {
return nil
}
// handleconn accepts a new raw connection, and invokes the test provided
// handleConn accepts a new raw connection, and invokes the test provided
// handshake function to perform TLS handshake, and returns the result on the
// `hsResult` channel.
func (ts *testServer) handleConn() {

View File

@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption {
// option to true from the Control field. For a concrete example of how to do
// this, see internal.NetDialerWithTCPKeepalive().
//
// For more information, please see [issue 23459] in the Go github repo.
// For more information, please see [issue 23459] in the Go GitHub repo.
//
// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {

View File

@ -114,7 +114,7 @@ func (c *errProtoCodec) Name() string {
func (s) TestEncodeDoesntPanicOnServer(t *testing.T) {
grpctest.TLogger.ExpectError("grpc: server failed to encode response")
// Create an codec that errors when encoding messages.
// Create a codec that errors when encoding messages.
encodingErr := errors.New("encoding failed")
ec := &errProtoCodec{name: t.Name(), encodingErr: encodingErr}
@ -150,7 +150,7 @@ func (s) TestEncodeDoesntPanicOnServer(t *testing.T) {
// Tests the case where decoding fails on the server. Verifies that there is
// no panic and that the decoding error is propagated to the client.
func (s) TestDecodeDoesntPanicOnServer(t *testing.T) {
// Create an codec that errors when decoding messages.
// Create a codec that errors when decoding messages.
decodingErr := errors.New("decoding failed")
ec := &errProtoCodec{name: t.Name(), decodingErr: decodingErr}
@ -192,7 +192,7 @@ func (s) TestEncodeDoesntPanicOnClient(t *testing.T) {
backend := stubserver.StartTestService(t, nil)
defer backend.Stop()
// Create an codec that errors when encoding messages.
// Create a codec that errors when encoding messages.
encodingErr := errors.New("encoding failed")
ec := &errProtoCodec{name: t.Name(), encodingErr: encodingErr}
@ -228,7 +228,7 @@ func (s) TestDecodeDoesntPanicOnClient(t *testing.T) {
backend := stubserver.StartTestService(t, nil)
defer backend.Stop()
// Create an codec that errors when decoding messages.
// Create a codec that errors when decoding messages.
decodingErr := errors.New("decoding failed")
ec := &errProtoCodec{name: t.Name(), decodingErr: decodingErr}
@ -283,7 +283,7 @@ func (p *countingProtoCodec) Name() string {
// Tests the case where ForceServerCodec option is used on the server. Verifies
// that encoding and decoding happen once per RPC.
func (s) TestForceServerCodec(t *testing.T) {
// Create an server with the counting proto codec.
// Create a server with the counting proto codec.
codec := &countingProtoCodec{name: t.Name()}
backend := stubserver.StartTestService(t, nil, grpc.ForceServerCodecV2(codec))
defer backend.Stop()

View File

@ -151,7 +151,7 @@ func makeCRLProvider(crlDirectory string) *advancedtls.FileWatcherCRLProvider {
}
// --- Custom Verification ---
func customVerificaitonSucceed(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
func customVerificationSucceed(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
// Looks at info for what you care about as the custom verification implementer
if info.ServerName != "localhost:50051" {
return nil, fmt.Errorf("expected servername of localhost:50051, got %v", info.ServerName)
@ -159,7 +159,7 @@ func customVerificaitonSucceed(info *advancedtls.HandshakeVerificationInfo) (*ad
return &advancedtls.PostHandshakeVerificationResults{}, nil
}
func customVerificaitonFail(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
func customVerificationFail(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
// Looks at info for what you care about as the custom verification implementer
if info.ServerName != "ExampleDesignedToFail" {
return nil, fmt.Errorf("expected servername of ExampleDesignedToFail, got %v", info.ServerName)
@ -191,7 +191,7 @@ func runClientWithCustomVerification(credsDirectory string, port string) {
},
// Tell the client to verify the server cert
VerificationType: advancedtls.CertVerification,
AdditionalPeerVerification: customVerificaitonSucceed,
AdditionalPeerVerification: customVerificationSucceed,
}
clientTLSCreds, err := advancedtls.NewClientCreds(options)
@ -215,7 +215,7 @@ func runClientWithCustomVerification(credsDirectory string, port string) {
},
// Tell the client to verify the server cert
VerificationType: advancedtls.CertVerification,
AdditionalPeerVerification: customVerificaitonFail,
AdditionalPeerVerification: customVerificationFail,
}
clientTLSCreds, err := advancedtls.NewClientCreds(options)

View File

@ -1,5 +1,5 @@
base_dir = .
certificate = $base_dir/ca_cert.pem # The CA certifcate
certificate = $base_dir/ca_cert.pem # The CA certificate
private_key = $base_dir/ca_key.pem # The CA private key
new_certs_dir = $base_dir # Location for new certs after signing
database = $base_dir/index.txt # Database index file

View File

@ -1,5 +1,5 @@
base_dir = .
certificate = $base_dir/ca_cert.pem # The CA certifcate
certificate = $base_dir/ca_cert.pem # The CA certificate
private_key = $base_dir/ca_key.pem # The CA private key
new_certs_dir = $base_dir # Location for new certs after signing
database = $base_dir/index.txt # Database index file

View File

@ -63,7 +63,7 @@ func main() {
defer cc.Close()
c := echo.NewEchoClient(cc)
// Make a RPC every second. This should trigger telemetry to be emitted from
// Make an RPC every second. This should trigger telemetry to be emitted from
// the client and the server.
for {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)

View File

@ -56,7 +56,7 @@ func main() {
}
defer conn.Close()
// Make a echo client and send an RPC.
// Make an echo client and send an RPC.
rgc := ecpb.NewEchoClient(conn)
callUnaryEcho(rgc, "hello world")
}

View File

@ -60,7 +60,7 @@ func main() {
}
defer conn.Close()
// Make a echo client and send an RPC.
// Make an echo client and send an RPC.
rgc := ecpb.NewEchoClient(conn)
callUnaryEcho(rgc, "hello world")
}

View File

@ -159,7 +159,7 @@ func main() {
}
defer conn.Close()
// Make a echo client and send RPCs.
// Make an echo client and send RPCs.
rgc := ecpb.NewEchoClient(conn)
callUnaryEcho(rgc, "hello world")
callBidiStreamingEcho(rgc)

View File

@ -60,7 +60,7 @@ func main() {
defer cc.Close()
c := echo.NewEchoClient(cc)
// Make a RPC every second. This should trigger telemetry to be emitted from
// Make an RPC every second. This should trigger telemetry to be emitted from
// the client and the server.
for {
r, err := c.UnaryEcho(ctx, &echo.EchoRequest{Message: "this is examples/opentelemetry"})

View File

@ -6,7 +6,7 @@ This tutorial provides a basic Go programmer's introduction to working with gRPC
- Generate server and client code using the protocol buffer compiler.
- Use the Go gRPC API to write a simple client and server for your service.
It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository.
It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers GitHub repository.
This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon.

View File

@ -245,8 +245,8 @@ type cloudLogging struct {
type cloudMonitoring struct{}
type cloudTrace struct {
// SamplingRate is the global setting that controls the probability of a RPC
// being traced. For example, 0.05 means there is a 5% chance for a RPC to
// SamplingRate is the global setting that controls the probability of an RPC
// being traced. For example, 0.05 means there is a 5% chance for an RPC to
// be traced, 1.0 means trace every call, 0 means dont start new traces. By
// default, the sampling_rate is 0.
SamplingRate float64 `json:"sampling_rate,omitempty"`

View File

@ -87,7 +87,7 @@ func (s) TestSuccessfulFirstUpdate(t *testing.T) {
// TestTwoBalancersSameType tests the scenario where there is a graceful switch
// load balancer setup with a current and pending load balancer of the same
// type. Any ClientConn update should be forwarded to the current lb if there is
// a current lb and no pending lb, and the only the pending lb if the graceful
// a current lb and no pending lb, and only the pending lb if the graceful
// switch balancer contains both a current lb and a pending lb. The pending load
// balancer should also swap into current whenever it updates with a
// connectivity state other than CONNECTING.
@ -725,7 +725,7 @@ func (s) TestUpdateSubConnStateRace(t *testing.T) {
}
// TestInlineCallbackInBuild tests the scenario where a balancer calls back into
// the balancer.ClientConn API inline from it's build function.
// the balancer.ClientConn API inline from its build function.
func (s) TestInlineCallbackInBuild(t *testing.T) {
tcc, gsb := setup(t)
// This build call should cause all of the inline updates to forward to the

View File

@ -168,7 +168,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) {
// The callback will try to hold the same lock again, which will cause a
// deadlock.
//
// This test starts the balancer group with a test balancer, will updates picker
// This test starts the balancer group with a test balancer, will update picker
// whenever it gets an address update. It's expected that start() doesn't block
// because of deadlock.
func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) {

View File

@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
// TrySchedule tries to schedules the provided callback function f to be
// TrySchedule tries to schedule the provided callback function f to be
// executed in the order it was added. This is a best-effort operation. If the
// context passed to NewCallbackSerializer was canceled before this method is
// called, the callback will not be scheduled.

View File

@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
}
// baseContentType is the base content-type for gRPC. This is a valid
// content-type on it's own, but can also include a content-subtype such as
// content-type on its own, but can also include a content-subtype such as
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.

View File

@ -226,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
// here. The first one to get the lock would got the channel to exit idle.
// here. The first one to get the lock would get the channel to exit idle.
// - Channel is not in idle mode, and the user calls Connect which calls
// m.ExitIdleMode.
//

View File

@ -138,7 +138,7 @@ func (s) TestManager_Enabled_TimerFires(t *testing.T) {
defer mgr.Close()
mgr.ExitIdleMode()
// Ensure that the timer callback fires within a appropriate amount of time.
// Ensure that the timer callback fires within an appropriate amount of time.
select {
case <-callbackCh:
case <-time.After(2 * defaultTestIdleTimeout):
@ -306,7 +306,7 @@ const (
stateActiveRPCs
)
// racyIdlnessEnforcer is a test idleness enforcer used specifically to test the
// racyEnforcer is a test idleness enforcer used specifically to test the
// race between idle timeout and incoming RPCs.
type racyEnforcer struct {
t *testing.T

View File

@ -237,7 +237,7 @@ var (
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md

View File

@ -273,7 +273,7 @@ func CheckGoroutines(logger Logger, timeout time.Duration) {
}
}
// LeakChecker captures an Logger and is returned by NewLeakChecker as a
// LeakChecker captures a Logger and is returned by NewLeakChecker as a
// convenient method to set up leak check tests in a unit test.
type LeakChecker struct {
logger Logger

View File

@ -48,7 +48,7 @@ type queue struct {
written uint32
}
// Allocates and returns a new *queue. size needs to be a exponent of two.
// Allocates and returns a new *queue. size needs to be an exponent of two.
func newQueue(size uint32) *queue {
return &queue{
arr: make([]unsafe.Pointer, size),
@ -103,7 +103,7 @@ func (qp *queuePair) switchQueues() *queue {
// by other exponents of two, we use floorCPUCount number of queuePairs within
// each CircularBuffer.
//
// Floor of the number of CPUs (and not the ceiling) was found to the be the
// Floor of the number of CPUs (and not the ceiling) was found to be the
// optimal number through experiments.
func floorCPUCount() uint32 {
floorExponent := bits.Len32(uint32(runtime.NumCPU())) - 1
@ -129,7 +129,7 @@ var numCircularBufferPairs = floorCPUCount()
type CircularBuffer struct {
drainMutex sync.Mutex
qp []*queuePair
// qpn is an monotonically incrementing counter that's used to determine
// qpn is a monotonically incrementing counter that's used to determine
// which queuePair a Push operation should write to. This approach's
// performance was found to be better than writing to a random queue.
qpn uint32

View File

@ -189,7 +189,7 @@ func (stat *Stat) AppendTimer(timer *Timer) {
var statsInitialized int32
// Stats for the last defaultStreamStatsBufsize RPCs will be stored in memory.
// This is can be configured by the registering server at profiling service
// This can be configured by the registering server at profiling service
// initialization with google.golang.org/grpc/profiling/service.ProfilingConfig
const defaultStreamStatsSize uint32 = 16 << 10

View File

@ -177,7 +177,7 @@ type dnsResolver struct {
// finished. Otherwise, data race will be possible. [Race Example] in
// dns_resolver_test we replace the real lookup functions with mocked ones to
// facilitate testing. If Close() doesn't wait for watcher() goroutine
// finishes, race detector sometimes will warns lookup (READ the lookup
// finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup

View File

@ -209,7 +209,7 @@ func (fr *FramerBridge) WritePing(ack bool, data [8]byte) error {
return fr.framer.WritePing(ack, data)
}
// WriteGoAway writes a GoAway Frame to the unerlying writer.
// WriteGoAway writes a GoAway Frame to the underlying writer.
func (fr *FramerBridge) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
return fr.framer.WriteGoAway(maxStreamID, http2.ErrCode(code), debugData)
}

View File

@ -222,7 +222,7 @@ type HeaderContainsMatcher struct {
// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP
// Header key to match on, and contains is the value that the header should
// should contain for a successful match. An empty contains string does not
// contain for a successful match. An empty contains string does not
// work, use HeaderPresentMatcher in that case.
func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher {
return &HeaderContainsMatcher{key: key, contains: contains, invert: invert}

View File

@ -404,7 +404,7 @@ func DoPerRPCCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAc
}
}
// DoGoogleDefaultCredentials performs an unary RPC with google default credentials
// DoGoogleDefaultCredentials performs a unary RPC with google default credentials
func DoGoogleDefaultCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) {
pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)
req := &testpb.SimpleRequest{
@ -423,7 +423,7 @@ func DoGoogleDefaultCredentials(ctx context.Context, tc testgrpc.TestServiceClie
}
}
// DoComputeEngineChannelCredentials performs an unary RPC with compute engine channel credentials
// DoComputeEngineChannelCredentials performs a unary RPC with compute engine channel credentials
func DoComputeEngineChannelCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) {
pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)
req := &testpb.SimpleRequest{

View File

@ -199,7 +199,7 @@ func (s) TestBuffer_RefAfterFree(t *testing.T) {
buf := newBuffer([]byte("abcd"), mem.NopBufferPool{})
buf.Ref()
// This first call should not panc and bring the ref counter down to 1
// This first call should not panic and bring the ref counter down to 1
buf.Free()
// This second call actually frees the buffer
buf.Free()

View File

@ -401,7 +401,7 @@ func crlPemToDer(crlBytes []byte) []byte {
// extractCRLIssuer extracts the raw ASN.1 encoding of the CRL issuer. Due to the design of
// pkix.CertificateList and pkix.RDNSequence, it is not possible to reliably marshal the
// parsed Issuer to it's original raw encoding.
// parsed Issuer to its original raw encoding.
func extractCRLIssuer(crlBytes []byte) ([]byte, error) {
if bytes.HasPrefix(crlBytes, crlPemPrefix) {
crlBytes = crlPemToDer(crlBytes)

View File

@ -1060,7 +1060,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) {
// TestOpenCensusTags tests this instrumentation code's ability to propagate
// OpenCensus tags across the wire. It also tests the server stats handler's
// functionality of adding the server method tag for the application to see. The
// test makes an Unary RPC without a tag map and with a tag map, and expects to
// test makes a Unary RPC without a tag map and with a tag map, and expects to
// see a tag map at the application layer with server method tag in the first
// case, and a tag map at the application layer with the populated tag map plus
// server method tag in second case.

View File

@ -45,7 +45,7 @@ import (
// the bootstrap env var to a bootstrap file with a nodeID provided. It sets CSM
// Env Vars as well, and mocks the resource detector's returned attribute set to
// simulate the environment. It registers a cleanup function on the provided t
// to restore the environment to it's original state.
// to restore the environment to its original state.
func setupEnv(t *testing.T, resourceDetectorEmissions map[string]string, nodeID, csmCanonicalServiceName, csmWorkloadName string) {
bootstrapContents := e2e.DefaultBootstrapContents(t, nodeID, "xds_server_uri")
testutils.CreateBootstrapFileForTesting(t, bootstrapContents)

View File

@ -147,7 +147,7 @@ func (w *blockingListenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDo
writeOnDone(w.testCtxDone, w.onDoneCh, onDone)
}
// writeOnDone attempts to writes the onDone callback on the onDone channel. It
// writeOnDone attempts to write the onDone callback on the onDone channel. It
// returns when it can successfully write to the channel or when the test is
// done, which is signalled by testCtxDone being closed.
func writeOnDone(testCtxDone <-chan struct{}, onDoneCh chan xdsresource.OnDoneFunc, onDone xdsresource.OnDoneFunc) {
@ -407,7 +407,7 @@ func (s) TestCSDS(t *testing.T) {
//
// This test does a bunch of similar things to the previous test, but has
// reduced complexity because of having to deal with a single resource type.
// This makes is possible to test the NACKing a resource (which results in
// This makes it possible to test the NACKing a resource (which results in
// continuous resending of the resource by the go-control-plane management
// server), in an easier and less flaky way.
func (s) TestCSDS_NACK(t *testing.T) {

View File

@ -66,7 +66,7 @@ func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clus
// Tests the case where the cluster resource requested by the cds LB policy is a
// leaf cluster. The management server sends two updates for the same leaf
// cluster resource. The test verifies that the load balancing configuration
// pushed to the cluster_resolver LB policy is contains the expected discovery
// pushed to the cluster_resolver LB policy contains the expected discovery
// mechanism corresponding to the leaf cluster, on both occasions.
func (s) TestAggregateClusterSuccess_LeafNode(t *testing.T) {
tests := []struct {

View File

@ -134,7 +134,7 @@ func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, err
// This will never occur, valid configuration is emitted from the xDS
// Client. Validity is already checked in the xDS Client, however, this
// double validation is present because Unmarshalling and Validating are
// coupled into one json.Unmarshal operation). We will switch this in
// coupled into one json.Unmarshal operation. We will switch this in
// the future to two separate operations.
return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err)
}

View File

@ -37,7 +37,7 @@ import (
const million = 1000000
// priorityConfig is config for one priority. For example, if there an EDS and a
// priorityConfig is config for one priority. For example, if there's an EDS and a
// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}].
//
// Each priorityConfig corresponds to one discovery mechanism from the LBConfig
@ -171,7 +171,7 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint
}
// Localities of length 0 is triggered by an NACK or resource-not-found
// error before update, or a empty localities list in a update. In either
// error before update, or an empty localities list in an update. In either
// case want to create a priority, and send down empty address list, causing
// TF for that priority. "If any discovery mechanism instance experiences an
// error retrieving data, and it has not previously reported any results, it

View File

@ -109,7 +109,7 @@ func setupAndDial(t *testing.T, bootstrapContents []byte) (*grpc.ClientConn, fun
}
// TestErrorFromParentLB_ConnectionError tests the case where the parent of the
// clusterresolver LB policy sends its a connection error. The parent policy,
// clusterresolver LB policy sends it a connection error. The parent policy,
// CDS LB policy, sends a connection error when the ADS stream to the management
// server breaks. The test verifies that there is no perceivable effect because
// of this connection error, and that RPCs continue to work (because the LB
@ -193,7 +193,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) {
// notify the test about the following events:
// - an EDS requested with the expected resource name is requested
// - EDS resource is unrequested, i.e, an EDS request with no resource name
// is received, which indicates that we are not longer interested in that
// is received, which indicates that we are no longer interested in that
// resource.
edsResourceRequestedCh := make(chan struct{}, 1)
edsResourceCanceledCh := make(chan struct{}, 1)

View File

@ -596,7 +596,7 @@ func (s) TestEDS_ResourceRemoved(t *testing.T) {
// notify the test about the following events:
// - an EDS requested with the expected resource name is requested
// - EDS resource is unrequested, i.e, an EDS request with no resource name
// is received, which indicates that we are not longer interested in that
// is received, which indicates that we are no longer interested in that
// resource.
edsResourceRequestedCh := make(chan struct{}, 1)
edsResourceCanceledCh := make(chan struct{}, 1)
@ -847,7 +847,7 @@ func (s) TestEDS_ClusterResourceUpdates(t *testing.T) {
t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName)
}
// Make a RPC, and ensure that it gets routed to second backend,
// Make an RPC, and ensure that it gets routed to second backend,
// corresponding to the cluster_name.
for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) {
if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil {

View File

@ -77,7 +77,7 @@ type endpointsResolver interface {
// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so
// that the same resource resolver can be reused (e.g. when there are two
// mechanisms, both for the same EDS resource, but has different circuit
// breaking config.
// breaking config).
type discoveryMechanismKey struct {
typ DiscoveryMechanismType
name string

View File

@ -657,7 +657,7 @@ func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) {
func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) {
lbCfg := u.lbCfg
noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil
// If the child has sent it's first update and this config flips the noop
// If the child has sent its first update and this config flips the noop
// bit compared to the most recent picker update sent upward, then a new
// picker with this updated bit needs to be forwarded upward. If a child
// update was received during the suppression of child updates within

View File

@ -561,7 +561,7 @@ type emptyChildConfig struct {
}
// TestChildBasicOperations tests basic operations of the Outlier Detection
// Balancer and it's interaction with it's child. The following scenarios are
// Balancer and its interaction with its child. The following scenarios are
// tested, in a step by step fashion:
// 1. The Outlier Detection Balancer receives it's first good configuration. The
// balancer is expected to create a child and sent the child it's configuration.
@ -606,7 +606,7 @@ func (s) TestChildBasicOperations(t *testing.T) {
od, tcc, _ := setup(t)
// This first config update should cause a child to be built and forwarded
// it's first update.
// its first update.
od.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &LBConfig{
ChildPolicy: &iserviceconfig.BalancerConfig{
@ -627,7 +627,7 @@ func (s) TestChildBasicOperations(t *testing.T) {
}
// This Update Client Conn State call should cause the first child balancer
// to close, and a new child to be created and also forwarded it's first
// to close, and a new child to be created and also forwarded its first
// config update.
od.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &LBConfig{
@ -654,7 +654,7 @@ func (s) TestChildBasicOperations(t *testing.T) {
if _, err = closeCh.Receive(ctx); err != nil {
t.Fatalf("timed out waiting for the first child balancer to be closed: %v", err)
}
// Verify the second child balancer received it's first config update.
// Verify the second child balancer received its first config update.
if _, err = ccsCh.Receive(ctx); err != nil {
t.Fatalf("timed out waiting for UpdateClientConnState on the second child balancer: %v", err)
}

View File

@ -1596,11 +1596,11 @@ func (s) TestRingHash_TransientFailureSkipToAvailableReady(t *testing.T) {
})
defer restartableServer2.Stop()
nonExistantBackends := makeNonExistentBackends(t, 2)
nonExistentBackends := makeNonExistentBackends(t, 2)
const clusterName = "cluster"
backends := []string{restartableServer1.Address, restartableServer2.Address}
backends = append(backends, nonExistantBackends...)
backends = append(backends, nonExistentBackends...)
endpoints := endpointResource(t, clusterName, backends)
cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{
ClusterName: clusterName,
@ -1862,11 +1862,11 @@ func (s) TestRingHash_SwitchToLowerPriorityAndThenBack(t *testing.T) {
// so for only one subchannel at a time.
func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *testing.T) {
backends := startTestServiceBackends(t, 1)
nonExistantBackends := makeNonExistentBackends(t, 3)
nonExistentBackends := makeNonExistentBackends(t, 3)
const clusterName = "cluster"
endpoints := endpointResource(t, clusterName, append(nonExistantBackends, backends...))
endpoints := endpointResource(t, clusterName, append(nonExistentBackends, backends...))
cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{
ClusterName: clusterName,
ServiceName: clusterName,
@ -1897,15 +1897,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
defer conn.Close()
client := testgrpc.NewTestServiceClient(conn)
holdNonExistant0 := dialer.Hold(nonExistantBackends[0])
holdNonExistant1 := dialer.Hold(nonExistantBackends[1])
holdNonExistant2 := dialer.Hold(nonExistantBackends[2])
holdNonExistent0 := dialer.Hold(nonExistentBackends[0])
holdNonExistent1 := dialer.Hold(nonExistentBackends[1])
holdNonExistent2 := dialer.Hold(nonExistentBackends[2])
holdGood := dialer.Hold(backends[0])
rpcCtx, rpcCancel := context.WithCancel(ctx)
errCh := make(chan error, 1)
go func() {
rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistantBackends[0]+"_0"))
rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentBackends[0]+"_0"))
_, err := client.EmptyCall(rpcCtx, &testpb.Empty{})
if status.Code(err) == codes.Canceled {
errCh <- nil
@ -1916,7 +1916,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
// Wait for the RPC to trigger a connection attempt to the first address,
// then cancel the RPC. No other connection attempts should be started yet.
if !holdNonExistant0.Wait(ctx) {
if !holdNonExistent0.Wait(ctx) {
t.Fatalf("Timeout waiting for connection attempt to backend 0")
}
rpcCancel()
@ -1926,10 +1926,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
// Since the connection attempt to the first address is still blocked, no
// other connection attempts should be started yet.
if holdNonExistant1.IsStarted() {
if holdNonExistent1.IsStarted() {
t.Errorf("Got connection attempt to backend 1, expected no connection attempt.")
}
if holdNonExistant2.IsStarted() {
if holdNonExistent2.IsStarted() {
t.Errorf("Got connection attempt to backend 2, expected no connection attempt.")
}
if holdGood.IsStarted() {
@ -1939,15 +1939,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
// Allow the connection attempt to the first address to resume and wait for
// the attempt for the second address. No other connection attempts should
// be started yet.
holdNonExistant0Again := dialer.Hold(nonExistantBackends[0])
holdNonExistant0.Resume()
if !holdNonExistant1.Wait(ctx) {
holdNonExistent0Again := dialer.Hold(nonExistentBackends[0])
holdNonExistent0.Resume()
if !holdNonExistent1.Wait(ctx) {
t.Fatalf("Timeout waiting for connection attempt to backend 1")
}
if holdNonExistant0Again.IsStarted() {
if holdNonExistent0Again.IsStarted() {
t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.")
}
if holdNonExistant2.IsStarted() {
if holdNonExistent2.IsStarted() {
t.Errorf("Got connection attempt to backend 2, expected no connection attempt.")
}
if holdGood.IsStarted() {
@ -1957,15 +1957,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
// Allow the connection attempt to the second address to resume and wait for
// the attempt for the third address. No other connection attempts should
// be started yet.
holdNonExistant1Again := dialer.Hold(nonExistantBackends[1])
holdNonExistant1.Resume()
if !holdNonExistant2.Wait(ctx) {
holdNonExistent1Again := dialer.Hold(nonExistentBackends[1])
holdNonExistent1.Resume()
if !holdNonExistent2.Wait(ctx) {
t.Fatalf("Timeout waiting for connection attempt to backend 2")
}
if holdNonExistant0Again.IsStarted() {
if holdNonExistent0Again.IsStarted() {
t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.")
}
if holdNonExistant1Again.IsStarted() {
if holdNonExistent1Again.IsStarted() {
t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.")
}
if holdGood.IsStarted() {
@ -1975,18 +1975,18 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
// Allow the connection attempt to the third address to resume and wait
// for the attempt for the final address. No other connection attempts
// should be started yet.
holdNonExistant2Again := dialer.Hold(nonExistantBackends[2])
holdNonExistant2.Resume()
holdNonExistent2Again := dialer.Hold(nonExistentBackends[2])
holdNonExistent2.Resume()
if !holdGood.Wait(ctx) {
t.Fatalf("Timeout waiting for connection attempt to good backend")
}
if holdNonExistant0Again.IsStarted() {
if holdNonExistent0Again.IsStarted() {
t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.")
}
if holdNonExistant1Again.IsStarted() {
if holdNonExistent1Again.IsStarted() {
t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.")
}
if holdNonExistant2Again.IsStarted() {
if holdNonExistent2Again.IsStarted() {
t.Errorf("Got connection attempt to backend 2 again, expected no connection attempt.")
}
@ -1997,13 +1997,13 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes
testutils.AwaitState(ctx, t, conn, connectivity.Ready)
// No other connection attempts should have been started
if holdNonExistant0Again.IsStarted() {
if holdNonExistent0Again.IsStarted() {
t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.")
}
if holdNonExistant1Again.IsStarted() {
if holdNonExistent1Again.IsStarted() {
t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.")
}
if holdNonExistant2Again.IsStarted() {
if holdNonExistent2Again.IsStarted() {
t.Errorf("Got connection attempt to backend 2 again, expected no connection attempt.")
}
}