mirror of https://github.com/grpc/grpc-go.git
parent
3cb33421c5
commit
c8951abc16
|
@ -1,3 +1,3 @@
|
|||
# Security Policy
|
||||
|
||||
For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
|
||||
For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
|
||||
|
|
|
@ -39,7 +39,7 @@ type Config struct {
|
|||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig is a backoff configuration with the default values specfied
|
||||
// DefaultConfig is a backoff configuration with the default values specified
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This should be useful for callers who want to configure backoff with
|
||||
|
|
|
@ -219,7 +219,7 @@ type lbBalancer struct {
|
|||
// All backends addresses, with metadata set to nil. This list contains all
|
||||
// backend addresses in the same order and with the same duplicates as in
|
||||
// serverlist. When generating picker, a SubConn slice with the same order
|
||||
// but with only READY SCs will be gerenated.
|
||||
// but with only READY SCs will be generated.
|
||||
backendAddrsWithoutMetadata []resolver.Address
|
||||
// Roundrobin functionalities.
|
||||
state connectivity.State
|
||||
|
|
|
@ -252,7 +252,7 @@ func (s) TestLBCache_ShutdownTimer_New_Race(t *testing.T) {
|
|||
go func() {
|
||||
for i := 0; i < 1000; i++ {
|
||||
// Shutdown starts a timer with 1 ns timeout, the NewSubConn will
|
||||
// race with with the timer.
|
||||
// race with the timer.
|
||||
sc.Shutdown()
|
||||
sc, _ = ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{})
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||
// Endpoints not set, process addresses until we migrate resolver
|
||||
// emissions fully to Endpoints. The top channel does wrap emitted
|
||||
// addresses with endpoints, however some balancers such as weighted
|
||||
// target do not forwarrd the corresponding correct endpoints down/split
|
||||
// target do not forward the corresponding correct endpoints down/split
|
||||
// endpoints properly. Once all balancers correctly forward endpoints
|
||||
// down, can delete this else conditional.
|
||||
addrs = state.ResolverState.Addresses
|
||||
|
|
|
@ -47,7 +47,7 @@ type cacheEntry struct {
|
|||
// headerData is received in the RLS response and is to be sent in the
|
||||
// X-Google-RLS-Data header for matching RPCs.
|
||||
headerData string
|
||||
// expiryTime is the absolute time at which this cache entry entry stops
|
||||
// expiryTime is the absolute time at which this cache entry stops
|
||||
// being valid. When an RLS request succeeds, this is set to the current
|
||||
// time plus the max_age field from the LB policy config.
|
||||
expiryTime time.Time
|
||||
|
@ -223,7 +223,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) {
|
|||
backoffCancelled = true
|
||||
}
|
||||
}
|
||||
dc.deleteAndcleanup(key, entry)
|
||||
dc.deleteAndCleanup(key, entry)
|
||||
}
|
||||
dc.maxSize = size
|
||||
return backoffCancelled
|
||||
|
@ -249,7 +249,7 @@ func (dc *dataCache) evictExpiredEntries() bool {
|
|||
if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) {
|
||||
continue
|
||||
}
|
||||
dc.deleteAndcleanup(key, entry)
|
||||
dc.deleteAndCleanup(key, entry)
|
||||
evicted = true
|
||||
}
|
||||
return evicted
|
||||
|
@ -339,7 +339,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
dc.deleteAndcleanup(key, entry)
|
||||
dc.deleteAndCleanup(key, entry)
|
||||
}
|
||||
|
||||
// deleteAndCleanup performs actions required at the time of deleting an entry
|
||||
|
@ -347,7 +347,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) {
|
|||
// - the entry is removed from the map of entries
|
||||
// - current size of the data cache is update
|
||||
// - the key is removed from the LRU
|
||||
func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) {
|
||||
func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) {
|
||||
delete(dc.entries, key)
|
||||
dc.currentSize -= entry.size
|
||||
dc.keys.removeEntry(key)
|
||||
|
@ -355,7 +355,7 @@ func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) {
|
|||
|
||||
func (dc *dataCache) stop() {
|
||||
for key, entry := range dc.entries {
|
||||
dc.deleteAndcleanup(key, entry)
|
||||
dc.deleteAndCleanup(key, entry)
|
||||
}
|
||||
dc.shutdown.Fire()
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s) TestControlChannelThrottled(t *testing.T) {
|
|||
|
||||
select {
|
||||
case <-rlsReqCh:
|
||||
t.Fatal("RouteLookup RPC invoked when control channel is throtlled")
|
||||
t.Fatal("RouteLookup RPC invoked when control channel is throttled")
|
||||
case <-time.After(defaultTestShortTimeout):
|
||||
}
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ type matcher struct {
|
|||
names []string
|
||||
}
|
||||
|
||||
// Equal reports if m and are are equivalent headerKeys.
|
||||
// Equal reports if m and a are equivalent headerKeys.
|
||||
func (m matcher) Equal(a matcher) bool {
|
||||
if m.key != a.key {
|
||||
return false
|
||||
|
|
|
@ -125,7 +125,7 @@ func (b *bal) Close() {
|
|||
|
||||
// run is a dummy goroutine to make sure that child policies are closed at the
|
||||
// end of tests. If they are not closed, these goroutines will be picked up by
|
||||
// the leakcheker and tests will fail.
|
||||
// the leak checker and tests will fail.
|
||||
func (b *bal) run() {
|
||||
<-b.done.Done()
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info bala
|
|||
state := (*balancer.State)(atomic.LoadPointer(&cpw.state))
|
||||
// Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if
|
||||
// it is the last one (which handles the case of delegating to the last
|
||||
// child picker if all child polcies are in TRANSIENT_FAILURE).
|
||||
// child picker if all child policies are in TRANSIENT_FAILURE).
|
||||
if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 {
|
||||
// Any header data received from the RLS server is stored in the
|
||||
// cache entry and needs to be sent to the actual backend in the
|
||||
|
|
|
@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr
|
|||
}
|
||||
|
||||
// Start starts the aggregator. It can be called after Stop to restart the
|
||||
// aggretator.
|
||||
// aggregator.
|
||||
func (wbsa *Aggregator) Start() {
|
||||
wbsa.mu.Lock()
|
||||
defer wbsa.mu.Unlock()
|
||||
|
|
|
@ -110,11 +110,11 @@ var (
|
|||
useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
|
||||
enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+
|
||||
"Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.")
|
||||
clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a a comma-separated list")
|
||||
clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list")
|
||||
serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list")
|
||||
serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list")
|
||||
sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list")
|
||||
clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a comma-separated list")
|
||||
clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a comma-separated list")
|
||||
serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a comma-separated list")
|
||||
serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a comma-separated list")
|
||||
sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a comma-separated list")
|
||||
connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams")
|
||||
recvBufferPool = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools)
|
||||
sharedWriteBuffer = flags.StringWithAllowedValues("sharedWriteBuffer", toggleModeOff,
|
||||
|
|
|
@ -65,7 +65,7 @@ type Network struct {
|
|||
var (
|
||||
//Local simulates local network.
|
||||
Local = Network{0, 0, 0}
|
||||
//LAN simulates local area network network.
|
||||
//LAN simulates local area network.
|
||||
LAN = Network{100 * 1024, 2 * time.Millisecond, 1500}
|
||||
//WAN simulates wide area network.
|
||||
WAN = Network{20 * 1024, 30 * time.Millisecond, 1500}
|
||||
|
|
|
@ -46,9 +46,9 @@ type bufConn struct {
|
|||
func (bufConn) Close() error { panic("unimplemented") }
|
||||
func (bufConn) LocalAddr() net.Addr { panic("unimplemented") }
|
||||
func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") }
|
||||
func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
func (bufConn) SetDeadline(t time.Time) error { panic("unimplemented") }
|
||||
func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemented") }
|
||||
func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemented") }
|
||||
|
||||
func restoreHooks() func() {
|
||||
s := sleep
|
||||
|
|
|
@ -152,7 +152,7 @@ func benchmarkIncrementUint64Map(b *testing.B, f func() incrementUint64Map) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkMapWithSyncMutexContetion(b *testing.B) {
|
||||
func BenchmarkMapWithSyncMutexContention(b *testing.B) {
|
||||
benchmarkIncrementUint64Map(b, newMapWithLock)
|
||||
}
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ type RunData struct {
|
|||
Fiftieth time.Duration
|
||||
// Ninetieth is the 90th percentile latency.
|
||||
Ninetieth time.Duration
|
||||
// Ninetyninth is the 99th percentile latency.
|
||||
// NinetyNinth is the 99th percentile latency.
|
||||
NinetyNinth time.Duration
|
||||
// Average is the average latency.
|
||||
Average time.Duration
|
||||
|
|
|
@ -141,7 +141,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer)
|
|||
var bc *benchmarkClient
|
||||
defer func() {
|
||||
// Shut down benchmark client when stream ends.
|
||||
logger.Infof("shuting down benchmark client")
|
||||
logger.Infof("shutting down benchmark client")
|
||||
if bc != nil {
|
||||
bc.shutdown()
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer)
|
|||
case *testpb.ClientArgs_Setup:
|
||||
logger.Infof("client setup received:")
|
||||
if bc != nil {
|
||||
logger.Infof("client setup received when client already exists, shuting down the existing client")
|
||||
logger.Infof("client setup received when client already exists, shutting down the existing client")
|
||||
bc.shutdown()
|
||||
}
|
||||
bc, err = startBenchmarkClient(t.Setup)
|
||||
|
|
|
@ -78,7 +78,7 @@ func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error {
|
|||
|
||||
func (s *testBinLogSink) Close() error { return nil }
|
||||
|
||||
// Returns all client entris if client is true, otherwise return all server
|
||||
// Returns all client entries if client is true, otherwise return all server
|
||||
// entries.
|
||||
func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry {
|
||||
logger := binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
|
|
|
@ -1576,7 +1576,7 @@ func (ac *addrConn) tearDown(err error) {
|
|||
} else {
|
||||
// Hard close the transport when the channel is entering idle or is
|
||||
// being shutdown. In the case where the channel is being shutdown,
|
||||
// closing of transports is also taken care of by cancelation of cc.ctx.
|
||||
// closing of transports is also taken care of by cancellation of cc.ctx.
|
||||
// But in the case where the channel is entering idle, we need to
|
||||
// explicitly close the transports here. Instead of distinguishing
|
||||
// between these two cases, it is simpler to close the transport
|
||||
|
|
|
@ -642,7 +642,7 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) {
|
|||
defer conn.Close()
|
||||
|
||||
if got := conn.dopts.minConnectTimeout(); got != mct {
|
||||
t.Errorf("unexpect minConnectTimeout on the connection: %v, want %v", got, mct)
|
||||
t.Errorf("unexpected minConnectTimeout on the connection: %v, want %v", got, mct)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func (k KeySizeError) Error() string {
|
|||
|
||||
// newRekeyAEAD creates a new instance of aes128gcm with rekeying.
|
||||
// The key argument should be 44 bytes, the first 32 bytes are used as a key
|
||||
// for HKDF-expand and the remainining 12 bytes are used as a random mask for
|
||||
// for HKDF-expand and the remaining 12 bytes are used as a random mask for
|
||||
// the counter.
|
||||
func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
|
||||
k := len(key)
|
||||
|
|
|
@ -51,7 +51,7 @@ type aes128gcmRekey struct {
|
|||
|
||||
// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
|
||||
// for ALTS record. The key argument should be 44 bytes, the first 32 bytes
|
||||
// are used as a key for HKDF-expand and the remainining 12 bytes are used
|
||||
// are used as a key for HKDF-expand and the remaining 12 bytes are used
|
||||
// as a random mask for the counter.
|
||||
func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
|
||||
inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
|
||||
|
|
|
@ -248,7 +248,7 @@ func testWriteLargeData(t *testing.T, rp string) {
|
|||
// buffer size.
|
||||
clientConn, serverConn := newConnPair(rp, nil, nil)
|
||||
// Message size is intentionally chosen to not be multiple of
|
||||
// payloadLengthLimtit.
|
||||
// payloadLengthLimit.
|
||||
msgSize := altsWriteBufferMaxSize + (100 * 1024)
|
||||
clientMsg := make([]byte, msgSize)
|
||||
for i := 0; i < msgSize; i++ {
|
||||
|
|
|
@ -108,7 +108,7 @@ func (c *localTC) Clone() credentials.TransportCredentials {
|
|||
}
|
||||
|
||||
// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
|
||||
// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effet for local credentials.
|
||||
// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effect for local credentials.
|
||||
func (c *localTC) OverrideServerName(serverNameOverride string) error {
|
||||
c.info.ServerName = serverNameOverride
|
||||
return nil
|
||||
|
|
|
@ -367,7 +367,7 @@ type requestParameters struct {
|
|||
ActorTokenType string `json:"actor_token_type,omitempty"`
|
||||
}
|
||||
|
||||
// nesponseParameters stores all attributes sent as JSON in a successful STS
|
||||
// responseParameters stores all attributes sent as JSON in a successful STS
|
||||
// response. These attributes are defined in
|
||||
// https://tools.ietf.org/html/rfc8693#section-2.2.1.
|
||||
type responseParameters struct {
|
||||
|
|
|
@ -188,7 +188,7 @@ for example in ${EXAMPLES[@]}; do
|
|||
$(cat $CLIENT_LOG)
|
||||
"
|
||||
else
|
||||
pass "client successfully communitcated with server"
|
||||
pass "client successfully communicated with server"
|
||||
fi
|
||||
|
||||
# Check server log for expected output if expecting an
|
||||
|
|
|
@ -61,8 +61,8 @@ this is examples/load_balancing (from :50051)
|
|||
|
||||
The second client is configured to use `round_robin`. `round_robin` connects to
|
||||
all the addresses it sees, and sends an RPC to each backend one at a time in
|
||||
order. E.g. the first RPC will be sent to backend-1, the second RPC will be be
|
||||
sent to backend-2, and the third RPC will be be sent to backend-1 again.
|
||||
order. E.g. the first RPC will be sent to backend-1, the second RPC will be
|
||||
sent to backend-2, and the third RPC will be sent to backend-1 again.
|
||||
|
||||
```
|
||||
this is examples/load_balancing (from :50051)
|
||||
|
|
|
@ -72,7 +72,7 @@ func main() {
|
|||
|
||||
fmt.Println()
|
||||
fmt.Println("--- calling routeguide.RouteGuide/GetFeature ---")
|
||||
// Make a routeguild client with the same ClientConn.
|
||||
// Make a routeguide client with the same ClientConn.
|
||||
rgc := ecpb.NewEchoClient(conn)
|
||||
callUnaryEcho(rgc, "this is examples/multiplex")
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func main() {
|
|||
ticker := time.NewTicker(time.Second)
|
||||
for range ticker.C {
|
||||
func() {
|
||||
// Use an anonymous function to ensure context cancelation via defer.
|
||||
// Use an anonymous function to ensure context cancellation via defer.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "test echo message"}); err != nil {
|
||||
|
|
|
@ -248,7 +248,7 @@ type binaryMethodLogger struct {
|
|||
clientSide bool
|
||||
}
|
||||
|
||||
// buildGCPLoggingEntry converts the binary log log entry into a gcp logging
|
||||
// buildGCPLoggingEntry converts the binary log entry into a gcp logging
|
||||
// entry.
|
||||
func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog.LogEntryConfig) gcplogging.Entry {
|
||||
binLogEntry := bml.mlb.Build(c)
|
||||
|
|
|
@ -548,7 +548,7 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) {
|
|||
// Client and Server RPC Events configured to log. Both sides should log and
|
||||
// share the exporter, so the exporter should receive the collective amount of
|
||||
// calls for both a client stream (corresponding to a Client RPC Event) and a
|
||||
// server stream (corresponding ot a Server RPC Event). The specificity of the
|
||||
// server stream (corresponding to a Server RPC Event). The specificity of the
|
||||
// entries are tested in previous tests.
|
||||
func (s) TestBothClientAndServerRPCEvents(t *testing.T) {
|
||||
fle := &fakeLoggingExporter{
|
||||
|
|
|
@ -191,7 +191,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestRefuseStartWithExcludeAndWildCardAll tests the sceanrio where an
|
||||
// TestRefuseStartWithExcludeAndWildCardAll tests the scenario where an
|
||||
// observability configuration is provided with client RPC event specifying to
|
||||
// exclude, and which matches on the '*' wildcard (any). This should cause an
|
||||
// error when trying to start the observability system.
|
||||
|
|
|
@ -103,7 +103,7 @@ func Fatalf(format string, args ...any) {
|
|||
}
|
||||
|
||||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||
// It calle os.Exit()) with exit code 1.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalln(args ...any) {
|
||||
grpclog.Logger.Fatalln(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
|
|
|
@ -604,7 +604,7 @@ func (s) TestPendingReplacedByAnotherPending(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("error constructing newSubConn in gsb: %v", err)
|
||||
}
|
||||
// This picker never returns an error, which can help this this test verify
|
||||
// This picker never returns an error, which can help this test verify
|
||||
// whether this cached state will get cleared on a new pending balancer
|
||||
// (will replace it with a picker that always errors).
|
||||
pendBal.updateState(balancer.State{
|
||||
|
@ -672,7 +672,7 @@ func (p *neverErrPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro
|
|||
|
||||
// TestUpdateSubConnStateRace tests the race condition when the graceful switch
|
||||
// load balancer receives a SubConnUpdate concurrently with an UpdateState()
|
||||
// call, which can cause the balancer to forward the update to to be closed and
|
||||
// call, which can cause the balancer to forward the update to be closed and
|
||||
// cleared. The balancer API guarantees to never call any method the balancer
|
||||
// after a Close() call, and the test verifies that doesn't happen within the
|
||||
// graceful switch load balancer.
|
||||
|
|
|
@ -204,7 +204,7 @@ type BalancerGroup struct {
|
|||
// after it's closed.
|
||||
//
|
||||
// We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer
|
||||
// may call back to balancer group inline. It causes deaclock if they
|
||||
// may call back to balancer group inline. It causes deadlock if they
|
||||
// require the same mutex).
|
||||
//
|
||||
// We should never need to hold multiple locks at the same time in this
|
||||
|
@ -218,7 +218,7 @@ type BalancerGroup struct {
|
|||
// guards the map from SubConn to balancer ID, so updateSubConnState needs
|
||||
// to hold it shortly to potentially delete from the map.
|
||||
//
|
||||
// UpdateState is called by the balancer state aggretator, and it will
|
||||
// UpdateState is called by the balancer state aggregator, and it will
|
||||
// decide when and whether to call.
|
||||
//
|
||||
// The corresponding boolean incomingStarted is used to stop further updates
|
||||
|
@ -292,7 +292,7 @@ func (bg *BalancerGroup) Start() {
|
|||
// AddWithClientConn adds a balancer with the given id to the group. The
|
||||
// balancer is built with a balancer builder registered with balancerName. The
|
||||
// given ClientConn is passed to the newly built balancer instead of the
|
||||
// onepassed to balancergroup.New().
|
||||
// one passed to balancergroup.New().
|
||||
//
|
||||
// TODO: Get rid of the existing Add() API and replace it with this.
|
||||
func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
|
||||
|
|
|
@ -165,7 +165,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) {
|
|||
// - hold a lock and send updates to balancer (e.g. update resolved addresses)
|
||||
// - the balancer calls back (NewSubConn or update picker) in line
|
||||
//
|
||||
// The callback will try to hold hte same lock again, which will cause a
|
||||
// The callback will try to hold the same lock again, which will cause a
|
||||
// deadlock.
|
||||
//
|
||||
// This test starts the balancer group with a test balancer, will updates picker
|
||||
|
@ -345,7 +345,7 @@ func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
|
|||
|
||||
// Sub-balancers in cache will be closed if not re-added within timeout, and
|
||||
// subConns will be shut down.
|
||||
func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching_not_read_within_timeout(t *testing.T) {
|
||||
_, _, cc, addrToSC := initBalancerGroupForCachingTest(t, time.Second)
|
||||
|
||||
// The sub-balancer is not re-added within timeout. The subconns should be
|
||||
|
@ -385,7 +385,7 @@ func (*noopBalancerBuilderWrapper) Name() string {
|
|||
|
||||
// After removing a sub-balancer, re-add with same ID, but different balancer
|
||||
// builder. Old subconns should be shut down, and new subconns should be created.
|
||||
func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
|
||||
func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testing.T) {
|
||||
gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
|
||||
|
||||
// Re-add sub-balancer-1, but with a different balancer builder. The
|
||||
|
|
|
@ -46,7 +46,7 @@ type entry interface {
|
|||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
//
|
||||
// Methods of channelMap can be divided in two two categories with respect to
|
||||
// Methods of channelMap can be divided into two categories with respect to
|
||||
// locking.
|
||||
//
|
||||
// 1. Methods acquire the global lock.
|
||||
|
|
|
@ -153,7 +153,7 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) {
|
|||
<-ctx.Done()
|
||||
})
|
||||
|
||||
// Schedule a bunch of callbacks. These should be exeuted since the are
|
||||
// Schedule a bunch of callbacks. These should be executed since they are
|
||||
// scheduled before the serializer is closed.
|
||||
const numCallbacks = 10
|
||||
callbackCh := make(chan int, numCallbacks)
|
||||
|
|
|
@ -124,7 +124,7 @@ func (s) TestManager_Disabled(t *testing.T) {
|
|||
|
||||
// The idleness manager is explicitly not closed here. But since the manager
|
||||
// is disabled, it will not start the run goroutine, and hence we expect the
|
||||
// leakchecker to not find any leaked goroutines.
|
||||
// leak checker to not find any leaked goroutines.
|
||||
}
|
||||
|
||||
// TestManager_Enabled_TimerFires tests the case where the idle manager
|
||||
|
@ -242,7 +242,7 @@ func (s) TestManager_Enabled_ActiveSinceLastCheck(t *testing.T) {
|
|||
case <-time.After(defaultTestShortTimeout):
|
||||
}
|
||||
|
||||
// Since the unrary RPC terminated and we have no other active RPCs, the
|
||||
// Since the unary RPC terminated and we have no other active RPCs, the
|
||||
// channel must move to idle eventually.
|
||||
select {
|
||||
case <-enforcer.enterIdleCh:
|
||||
|
|
|
@ -1024,7 +1024,7 @@ func (s) TestCustomAuthority(t *testing.T) {
|
|||
wantAuthority: "[::1]:53",
|
||||
},
|
||||
{
|
||||
name: "ipv6 authority with brackers and non-default DNS port",
|
||||
name: "ipv6 authority with brackets and non-default DNS port",
|
||||
authority: "[::1]:123",
|
||||
wantAuthority: "[::1]:123",
|
||||
},
|
||||
|
|
|
@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
|
|||
// combination of unconditionally enabling TCP keepalives here, and
|
||||
// disabling the overriding of TCP keepalive parameters by setting the
|
||||
// KeepAlive field to a negative value above, results in OS defaults for
|
||||
// the TCP keealive interval and time parameters.
|
||||
// the TCP keepalive interval and time parameters.
|
||||
Control: func(_, _ string, c syscall.RawConn) error {
|
||||
return c.Control(func(fd uintptr) {
|
||||
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
|
||||
|
|
|
@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
|
|||
// combination of unconditionally enabling TCP keepalives here, and
|
||||
// disabling the overriding of TCP keepalive parameters by setting the
|
||||
// KeepAlive field to a negative value above, results in OS defaults for
|
||||
// the TCP keealive interval and time parameters.
|
||||
// the TCP keepalive interval and time parameters.
|
||||
Control: func(_, _ string, c syscall.RawConn) error {
|
||||
return c.Control(func(fd uintptr) {
|
||||
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
|
||||
|
|
|
@ -317,7 +317,7 @@ func (tcc *BalancerClientConn) WaitForPicker(ctx context.Context, f func(balance
|
|||
// iteration until where it goes wrong.
|
||||
//
|
||||
// Step 2. the return values of f should be repetitions of the same permutation.
|
||||
// E.g. if want is {a,a,b}, the check failes if f returns:
|
||||
// E.g. if want is {a,a,b}, the check fails if f returns:
|
||||
// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not
|
||||
// repeating the first iteration.
|
||||
//
|
||||
|
|
|
@ -37,7 +37,7 @@ type StateChanger interface {
|
|||
}
|
||||
|
||||
// StayConnected makes sc stay connected by repeatedly calling sc.Connect()
|
||||
// until the state becomes Shutdown or until ithe context expires.
|
||||
// until the state becomes Shutdown or until the context expires.
|
||||
func StayConnected(ctx context.Context, sc StateChanger) {
|
||||
for {
|
||||
state := sc.GetState()
|
||||
|
|
|
@ -63,7 +63,7 @@ const (
|
|||
// is required. Only the server presents an identity certificate in this
|
||||
// configuration.
|
||||
SecurityLevelTLS
|
||||
// SecurityLevelMTLS is used when security ocnfiguration corresponding to
|
||||
// SecurityLevelMTLS is used when security configuration corresponding to
|
||||
// mTLS is required. Both client and server present identity certificates in
|
||||
// this configuration.
|
||||
SecurityLevelMTLS
|
||||
|
@ -789,7 +789,7 @@ func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoad
|
|||
|
||||
// DefaultServerListenerWithRouteConfigName returns a basic xds Listener
|
||||
// resource to be used on the server side. The returned Listener resource
|
||||
// contains a RouteCongiguration resource name that needs to be resolved.
|
||||
// contains a RouteConfiguration resource name that needs to be resolved.
|
||||
func DefaultServerListenerWithRouteConfigName(host string, port uint32, secLevel SecurityLevel, routeName string) *v3listenerpb.Listener {
|
||||
return defaultServerListenerCommon(host, port, secLevel, routeName, false)
|
||||
}
|
||||
|
|
|
@ -485,7 +485,7 @@ const (
|
|||
// stream maintains a queue of data frames; as loopy receives data frames
|
||||
// it gets added to the queue of the relevant stream.
|
||||
// Loopy goes over this list of active streams by processing one node every iteration,
|
||||
// thereby closely resemebling to a round-robin scheduling over all streams. While
|
||||
// thereby closely resembling a round-robin scheduling over all streams. While
|
||||
// processing a stream, loopy writes out data bytes from this stream capped by the min
|
||||
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
|
||||
type loopyWriter struct {
|
||||
|
|
|
@ -44,7 +44,7 @@ const (
|
|||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeIndaequateSecurity ErrCode = 0xc
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
|
@ -61,7 +61,7 @@ var errorCodeNames = map[ErrCode]string{
|
|||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeIndaequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (s) TestErrorCodeString(t *testing.T) {
|
|||
{err: ErrCodeCompression, want: "COMPRESSION_ERROR"},
|
||||
{err: ErrCodeConnect, want: "CONNECT_ERROR"},
|
||||
{err: ErrCodeEnhanceYourCalm, want: "ENHANCE_YOUR_CALM"},
|
||||
{err: ErrCodeIndaequateSecurity, want: "INADEQUATE_SECURITY"},
|
||||
{err: ErrCodeInadequateSecurity, want: "INADEQUATE_SECURITY"},
|
||||
{err: ErrCodeHTTP11Required, want: "HTTP_1_1_REQUIRED"},
|
||||
// Type casting known error case
|
||||
{err: ErrCode(0x1), want: "PROTOCOL_ERROR"},
|
||||
|
|
|
@ -229,7 +229,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
}
|
||||
}(conn)
|
||||
|
||||
// The following defer and goroutine monitor the connectCtx for cancelation
|
||||
// The following defer and goroutine monitor the connectCtx for cancellation
|
||||
// and deadline. On context expiration, the connection is hard closed and
|
||||
// this function will naturally fail as a result. Otherwise, the defer
|
||||
// waits for the goroutine to exit to prevent the context from being
|
||||
|
@ -1222,7 +1222,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
if statusCode == codes.Canceled {
|
||||
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
|
||||
// Our deadline was already exceeded, and that was likely the cause
|
||||
// of this cancelation. Alter the status code accordingly.
|
||||
// of this cancellation. Alter the status code accordingly.
|
||||
statusCode = codes.DeadlineExceeded
|
||||
}
|
||||
}
|
||||
|
@ -1307,7 +1307,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
id := f.LastStreamID
|
||||
if id > 0 && id%2 == 0 {
|
||||
t.mu.Unlock()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
|
||||
t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id))
|
||||
return
|
||||
}
|
||||
// A client can receive multiple GoAways from the server (see
|
||||
|
|
|
@ -408,7 +408,7 @@ func (s *Stream) TrailersOnly() bool {
|
|||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// Trailer returns the cached trailer metadata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
// It can be safely read only after stream has ended that is either read
|
||||
|
@ -509,7 +509,7 @@ func (s *Stream) Read(p []byte) (n int, err error) {
|
|||
return io.ReadFull(s.trReader, p)
|
||||
}
|
||||
|
||||
// tranportReader reads all the data available for this Stream from the transport and
|
||||
// transportReader reads all the data available for this Stream from the transport and
|
||||
// passes them into the decoder, which converts them into a gRPC message stream.
|
||||
// The error is io.EOF when the stream is done or another non-nil error if
|
||||
// the stream broke.
|
||||
|
@ -798,7 +798,7 @@ var (
|
|||
// connection is draining. This could be caused by goaway or balancer
|
||||
// removing the address.
|
||||
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
|
||||
// errStreamDone is returned from write at the client side to indiacte application
|
||||
// errStreamDone is returned from write at the client side to indicate application
|
||||
// layer of an error.
|
||||
errStreamDone = errors.New("the stream is done")
|
||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||
|
|
|
@ -718,7 +718,7 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) {
|
|||
t.Fatalf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
|
||||
return
|
||||
}
|
||||
// Wait for server's handerler to be initialized
|
||||
// Wait for server's handler to be initialized
|
||||
select {
|
||||
case <-ready:
|
||||
case <-ctx.Done():
|
||||
|
@ -870,7 +870,7 @@ func (s) TestLargeMessageSuspension(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to open stream: %v", err)
|
||||
}
|
||||
// Launch a goroutine simillar to the stream monitoring goroutine in
|
||||
// Launch a goroutine similar to the stream monitoring goroutine in
|
||||
// stream.go to keep track of context timeout and call CloseStream.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
|
@ -1315,7 +1315,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
// Test context cancelation.
|
||||
// Test context cancellation.
|
||||
timeBefore := time.Now()
|
||||
connectCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
time.AfterFunc(100*time.Millisecond, cancel)
|
||||
|
@ -1328,7 +1328,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) {
|
|||
}
|
||||
t.Logf("NewClientTransport() = _, %v", err)
|
||||
if time.Since(timeBefore) > 3*time.Second {
|
||||
t.Fatalf("NewClientTransport returned > 2.9s after context cancelation")
|
||||
t.Fatalf("NewClientTransport returned > 2.9s after context cancellation")
|
||||
}
|
||||
|
||||
// Test context deadline.
|
||||
|
|
|
@ -513,7 +513,7 @@ func (c *Config) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
bc, err := parser.ParseConfig(nameAndConfig.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xds: config parsing for certifcate provider plugin %q failed during bootstrap: %v", name, err)
|
||||
return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err)
|
||||
}
|
||||
cpcCfgs[instance] = bc
|
||||
}
|
||||
|
|
|
@ -319,7 +319,7 @@ func (s) TestNewChainEngine(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MatcherToNotPrinicipal",
|
||||
name: "MatcherToNotPrincipal",
|
||||
policies: []*v3rbacpb.RBAC{
|
||||
{
|
||||
Action: v3rbacpb.RBAC_ALLOW,
|
||||
|
@ -336,7 +336,7 @@ func (s) TestNewChainEngine(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
// PrinicpalProductViewer tests the construction of a chained engine
|
||||
// PrincipalProductViewer tests the construction of a chained engine
|
||||
// with a policy that allows any downstream to send a GET request on a
|
||||
// certain path.
|
||||
{
|
||||
|
|
|
@ -60,7 +60,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
caFile = flag.String("ca_file", "", "The file containning the CA root cert file")
|
||||
caFile = flag.String("ca_file", "", "The file containing the CA root cert file")
|
||||
useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true")
|
||||
useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)")
|
||||
customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS")
|
||||
|
|
|
@ -132,7 +132,7 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin
|
|||
for time.Now().Before(fallbackDeadline) {
|
||||
g := doRPCAndGetPath(client, 20*time.Second)
|
||||
if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK {
|
||||
infoLog.Println("Made one successul RPC to a fallback. Now expect the same for the rest.")
|
||||
infoLog.Println("Made one successful RPC to a fallback. Now expect the same for the rest.")
|
||||
fellBack = true
|
||||
break
|
||||
} else if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND {
|
||||
|
|
|
@ -136,7 +136,7 @@ func newTestBufferPool() *testBufferPool {
|
|||
|
||||
// Tests that a buffer created with Copy, which when later freed, returns the underlying
|
||||
// byte slice to the buffer pool.
|
||||
func (s) TestBufer_CopyAndFree(t *testing.T) {
|
||||
func (s) TestBuffer_CopyAndFree(t *testing.T) {
|
||||
data := "abcd"
|
||||
testPool := newTestBufferPool()
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
|||
return copyOf(v)
|
||||
}
|
||||
for k, v := range md {
|
||||
// Case insenitive comparison: MD is a map, and there's no guarantee
|
||||
// Case insensitive comparison: MD is a map, and there's no guarantee
|
||||
// that the MD attached to the context is created using our helper
|
||||
// functions.
|
||||
if strings.EqualFold(k, key) {
|
||||
|
|
|
@ -108,7 +108,7 @@ type ServerMetricsRecorder interface {
|
|||
// SetMemoryUtilization sets the memory utilization server metric. Must be
|
||||
// in the range [0, 1].
|
||||
SetMemoryUtilization(float64)
|
||||
// DeleteMemoryUtilization deletes the memory utiliztion server metric to
|
||||
// DeleteMemoryUtilization deletes the memory utilization server metric to
|
||||
// prevent it from being sent.
|
||||
DeleteMemoryUtilization()
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ func timerBeginIsBefore(ti *ppb.Timer, tj *ppb.Timer) bool {
|
|||
return ti.BeginSec < tj.BeginSec
|
||||
}
|
||||
|
||||
// streamStatsCatapulJSON receives a *snapshot and the name of a JSON file to
|
||||
// streamStatsCatapultJSON receives a *snapshot and the name of a JSON file to
|
||||
// write to. The grpc-go profiling snapshot is processed and converted to a
|
||||
// JSON format that can be understood by trace-viewer.
|
||||
func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string) (err error) {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
// Package internal contains code that is shared by both reflection package and
|
||||
// the test package. The packages are split in this way inorder to avoid
|
||||
// depenedency to deprecated package github.com/golang/protobuf.
|
||||
// dependency to deprecated package github.com/golang/protobuf.
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
|
|
@ -605,9 +605,9 @@ func testFileContainingSymbol(t *testing.T, stream v1reflectiongrpc.ServerReflec
|
|||
|
||||
func testFileContainingSymbolError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) {
|
||||
for _, test := range []string{
|
||||
"grpc.testing.SerchService",
|
||||
"grpc.testing.SearchService_",
|
||||
"grpc.testing.SearchService.SearchE",
|
||||
"grpc.tesing.SearchResponse",
|
||||
"grpc.testing_.SearchResponse",
|
||||
"gpc.testing.ToBeExtended",
|
||||
} {
|
||||
if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{
|
||||
|
|
|
@ -120,7 +120,7 @@ XXXXX PleaseIgnoreUnused'
|
|||
# Error for any package comments not in generated code.
|
||||
noret_grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:"
|
||||
|
||||
# Ignore a false positive when operands have side affectes.
|
||||
# Ignore a false positive when operands have side affects.
|
||||
# TODO(https://github.com/dominikh/go-tools/issues/54): Remove this once the issue is fixed in staticcheck.
|
||||
noret_grep "(SA4000)" "${SC_OUT}" | not grep -v -e "crl.go:[0-9]\+:[0-9]\+: identical expressions on the left and right side of the '||' operator (SA4000)"
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
base_dir = .
|
||||
certificate = $base_dir/cacert.pem # The CA certifcate
|
||||
certificate = $base_dir/cacert.pem # The CA certificate
|
||||
private_key = $base_dir/cakey.pem # The CA private key
|
||||
new_certs_dir = $base_dir # Location for new certs after signing
|
||||
database = $base_dir/index.txt # Database index file
|
||||
|
|
|
@ -334,7 +334,7 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) {
|
|||
// apply the grpclb policy. But since grpclb is not registered, it should
|
||||
// fallback to the default LB policy which is pick_first. The ClientConn is
|
||||
// also expected to filter out the grpclb address when sending the addresses
|
||||
// list fo pick_first.
|
||||
// list for pick_first.
|
||||
grpclbAddr := []resolver.Address{{Addr: "non-existent-grpclb-server-address"}}
|
||||
grpclbConfig := parseServiceConfig(t, r, `{"loadBalancingPolicy": "grpclb"}`)
|
||||
state := resolver.State{ServiceConfig: grpclbConfig, Addresses: addrs}
|
||||
|
|
|
@ -109,7 +109,7 @@ type pipe struct {
|
|||
mu sync.Mutex
|
||||
|
||||
// buf contains the data in the pipe. It is a ring buffer of fixed capacity,
|
||||
// with r and w pointing to the offset to read and write, respsectively.
|
||||
// with r and w pointing to the offset to read and write, respectively.
|
||||
//
|
||||
// Data is read between [r, w) and written to [w, r), wrapping around the end
|
||||
// of the slice if necessary.
|
||||
|
|
|
@ -548,7 +548,7 @@ func (s) TestCZServerListenSocketDeletion(t *testing.T) {
|
|||
s.Stop()
|
||||
}
|
||||
|
||||
func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) {
|
||||
func (s) TestCZRecursiveDeletionOfEntry(t *testing.T) {
|
||||
// +--+TopChan+---+
|
||||
// | |
|
||||
// v v
|
||||
|
@ -1525,7 +1525,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) {
|
|||
trace := tcs[0].Trace()
|
||||
for _, e := range trace.Events {
|
||||
if e.RefID == nestedConn && e.RefType != channelz.RefChannel {
|
||||
return false, fmt.Errorf("nested channel trace event shoud have RefChannel as RefType")
|
||||
return false, fmt.Errorf("nested channel trace event should have RefChannel as RefType")
|
||||
}
|
||||
}
|
||||
ncm := channelz.GetChannel(nestedConn)
|
||||
|
@ -1608,7 +1608,7 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) {
|
|||
trace := tcs[0].Trace()
|
||||
for _, e := range trace.Events {
|
||||
if e.RefID == subConn && e.RefType != channelz.RefSubChannel {
|
||||
return false, fmt.Errorf("subchannel trace event shoud have RefType to be RefSubChannel")
|
||||
return false, fmt.Errorf("subchannel trace event should have RefType to be RefSubChannel")
|
||||
}
|
||||
}
|
||||
scm := channelz.GetSubChannel(subConn)
|
||||
|
@ -1989,7 +1989,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) {
|
|||
|
||||
// example:
|
||||
// Channel Created
|
||||
// Adressses resolved (from empty address state): "localhost:40467"
|
||||
// Addresses resolved (from empty address state): "localhost:40467"
|
||||
// SubChannel (id: 4[]) Created
|
||||
// Channel's connectivity state changed to CONNECTING
|
||||
// Channel's connectivity state changed to READY
|
||||
|
|
|
@ -511,13 +511,13 @@ type test struct {
|
|||
customDialOptions []grpc.DialOption
|
||||
resolverScheme string
|
||||
|
||||
// These are are set once startServer is called. The common case is to have
|
||||
// These are set once startServer is called. The common case is to have
|
||||
// only one testServer.
|
||||
srv stopper
|
||||
hSrv healthgrpc.HealthServer
|
||||
srvAddr string
|
||||
|
||||
// These are are set once startServers is called.
|
||||
// These are set once startServers is called.
|
||||
srvs []stopper
|
||||
hSrvs []healthgrpc.HealthServer
|
||||
srvAddrs []string
|
||||
|
@ -2919,7 +2919,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestMalformedHTTP2Metadata verfies the returned error when the client
|
||||
// TestMalformedHTTP2Metadata verifies the returned error when the client
|
||||
// sends an illegal metadata.
|
||||
func (s) TestMalformedHTTP2Metadata(t *testing.T) {
|
||||
for _, e := range listTestEnv() {
|
||||
|
@ -5849,7 +5849,7 @@ func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
|
|||
t.Fatalf("Unexpected frame: %v", f)
|
||||
}
|
||||
|
||||
// Flood settings frames until a timeout occurs, indiciating the server has
|
||||
// Flood settings frames until a timeout occurs, indicating the server has
|
||||
// stopped reading from the connection, then close the conn.
|
||||
for {
|
||||
conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
|
||||
|
|
|
@ -1008,7 +1008,7 @@ func testHealthWatchMultipleClients(t *testing.T, e env) {
|
|||
healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
}
|
||||
|
||||
// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server
|
||||
// TestHealthWatchSameStatus makes a streaming Watch() RPC on the health server
|
||||
// and makes sure that the health status of the server is as expected after
|
||||
// multiple calls to SetServingStatus with the same status.
|
||||
func (s) TestHealthWatchSameStatus(t *testing.T) {
|
||||
|
|
|
@ -114,7 +114,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) {
|
|||
// It will close the connection if there's no active streams. This won't
|
||||
// happen because of the pending stream. But if there's a bug in stream
|
||||
// cleanup that causes stream to be removed too aggressively, the connection
|
||||
// will be closd and the stream will be broken.
|
||||
// will be closed and the stream will be broken.
|
||||
gracefulStopDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(gracefulStopDone)
|
||||
|
|
|
@ -150,7 +150,7 @@ func (s) TestClientSideFederation(t *testing.T) {
|
|||
// supported with new xdstp style names for LDS only while using the old style
|
||||
// for other resources. This test in addition also checks that when service name
|
||||
// contains escapable characters, we "fully" encode it for looking up
|
||||
// VirtualHosts in xDS RouteConfigurtion.
|
||||
// VirtualHosts in xDS RouteConfiguration.
|
||||
func (s) TestClientSideFederationWithOnlyXDSTPStyleLDS(t *testing.T) {
|
||||
// Start a management server as a sophisticated authority.
|
||||
const authority = "traffic-manager.xds.notgoogleapis.com"
|
||||
|
|
|
@ -595,7 +595,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) {
|
|||
// This test tests that an RBAC Config with Action.LOG configured allows
|
||||
// every RPC through. This maps to the line "At this time, if the
|
||||
// RBAC.action is Action.LOG then the policy will be completely ignored,
|
||||
// as if RBAC was not configurated." from A41
|
||||
// as if RBAC was not configured." from A41
|
||||
{
|
||||
name: "action-log",
|
||||
rbacCfg: &rpb.RBAC{
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
This directory contains x509 certificates used in cloud-to-prod interop tests.
|
||||
For tests within gRPC-Go repo, please use the files in testsdata/x509
|
||||
For tests within gRPC-Go repo, please use the files in testdata/x509
|
||||
directory.
|
||||
|
|
|
@ -445,7 +445,7 @@ func (s) TestAggregatedClusterSuccess_SwitchBetweenLeafAndAggregate(t *testing.T
|
|||
}
|
||||
|
||||
// Tests the scenario where an aggregate cluster exceeds the maximum depth,
|
||||
// which is 16. Verfies that the channel moves to TRANSIENT_FAILURE, and the
|
||||
// which is 16. Verifies that the channel moves to TRANSIENT_FAILURE, and the
|
||||
// error is propagated to RPC callers. The test then modifies the graph to no
|
||||
// longer exceed maximum depth, but be at the maximum allowed depth, and
|
||||
// verifies that an RPC can be made successfully.
|
||||
|
@ -678,7 +678,7 @@ func (s) TestAggregatedClusterSuccess_IgnoreDups(t *testing.T) {
|
|||
// cluster (EDS or Logical DNS), no configuration should be pushed to the child
|
||||
// policy. The channel is expected to move to TRANSIENT_FAILURE and RPCs are
|
||||
// expected to fail with code UNAVAILABLE and an error message specifying that
|
||||
// the aggregate cluster grpah no leaf clusters. Then the test updates A -> B,
|
||||
// the aggregate cluster graph has no leaf clusters. Then the test updates A -> B,
|
||||
// where B is a leaf EDS cluster. Verifies that configuration is pushed to the
|
||||
// child policy and that an RPC can be successfully made.
|
||||
func (s) TestAggregatedCluster_NodeChildOfItself(t *testing.T) {
|
||||
|
|
|
@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub
|
|||
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
|
||||
}
|
||||
|
||||
// Closes all registered cluster wathers and removes them from the internal map.
|
||||
// Closes all registered cluster watchers and removes them from the internal map.
|
||||
//
|
||||
// Only executed in the context of a serializer callback.
|
||||
func (b *cdsBalancer) closeAllWatchers() {
|
||||
|
|
|
@ -372,7 +372,7 @@ func (s) TestSecurityConfigNotFoundInBootstrap(t *testing.T) {
|
|||
mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
|
||||
|
||||
// Create bootstrap configuration pointing to the above management server,
|
||||
// and one that does not have ceritificate providers configuration.
|
||||
// and one that does not have certificate providers configuration.
|
||||
nodeID := uuid.New().String()
|
||||
bootstrapContents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
|
||||
Servers: []json.RawMessage{[]byte(fmt.Sprintf(`{
|
||||
|
@ -405,7 +405,7 @@ func (s) TestSecurityConfigNotFoundInBootstrap(t *testing.T) {
|
|||
testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure)
|
||||
}
|
||||
|
||||
// A ceritificate provider builder that returns a nil Provider from the starter
|
||||
// A certificate provider builder that returns a nil Provider from the starter
|
||||
// func passed to certprovider.NewBuildableConfig().
|
||||
type errCertProviderBuilder struct{}
|
||||
|
||||
|
@ -433,7 +433,7 @@ func (s) TestCertproviderStoreError(t *testing.T) {
|
|||
mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
|
||||
|
||||
// Create bootstrap configuration pointing to the above management server
|
||||
// and one that includes ceritificate providers configuration for
|
||||
// and one that includes certificate providers configuration for
|
||||
// errCertProviderBuilder.
|
||||
nodeID := uuid.New().String()
|
||||
providerCfg := json.RawMessage(fmt.Sprintf(`{
|
||||
|
@ -481,7 +481,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) {
|
|||
mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
|
||||
|
||||
// Create bootstrap configuration pointing to the above management server
|
||||
// and one that includes ceritificate providers configuration.
|
||||
// and one that includes certificate providers configuration.
|
||||
nodeID := uuid.New().String()
|
||||
bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address)
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ func Test(t *testing.T) {
|
|||
// TestConfigUpdateWithSameLoadReportingServerConfig tests the scenario where
|
||||
// the clusterimpl LB policy receives a config update with no change in the load
|
||||
// reporting server configuration. The test verifies that the existing load
|
||||
// repoting stream is not terminated and that a new load reporting stream is not
|
||||
// reporting stream is not terminated and that a new load reporting stream is not
|
||||
// created.
|
||||
func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) {
|
||||
// Create an xDS management server that serves ADS and LRS requests.
|
||||
|
|
|
@ -91,7 +91,7 @@ func Test_nameGenerator_generate(t *testing.T) {
|
|||
},
|
||||
input2: [][]xdsresource.Locality{
|
||||
{{ID: internal.LocalityID{Zone: "L0"}}},
|
||||
{{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, sice "0-0" was already picked.
|
||||
{{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, since "0-0" was already picked.
|
||||
{{ID: internal.LocalityID{Zone: "L2"}}},
|
||||
},
|
||||
want: []string{"priority-0-0", "priority-0-2", "priority-0-1"},
|
||||
|
|
|
@ -36,7 +36,7 @@ func NewWrapper() *Wrapper {
|
|||
// update its internal perCluster store so that new stats will be added to the
|
||||
// correct perCluster.
|
||||
//
|
||||
// Note that this struct is a temporary walkaround before we implement graceful
|
||||
// Note that this struct is a temporary workaround before we implement graceful
|
||||
// switch for EDS. Any update to the clusterName and serviceName is too early,
|
||||
// the perfect timing is when the picker is updated with the new connection.
|
||||
// This early update could cause picks for the old SubConn being reported to the
|
||||
|
|
|
@ -148,7 +148,7 @@ func (s) TestRingHash_ReconnectToMoveOutOfTransientFailure(t *testing.T) {
|
|||
t.Fatal("EmptyCall RPC succeeded when the channel is in TRANSIENT_FAILURE")
|
||||
}
|
||||
|
||||
// Restart the server listener. The ring_hash LB polcy is expected to
|
||||
// Restart the server listener. The ring_hash LB policy is expected to
|
||||
// attempt to reconnect on its own and come out of TRANSIENT_FAILURE, even
|
||||
// without an RPC attempt.
|
||||
lis.Restart()
|
||||
|
@ -795,7 +795,7 @@ func computeIdealNumberOfRPCs(t *testing.T, p, errorTolerance float64) int {
|
|||
t.Fatal("p must be in (0, 1)")
|
||||
}
|
||||
numRPCs := math.Ceil(p * (1 - p) * 5. * 5. / errorTolerance / errorTolerance)
|
||||
return int(numRPCs + 1000.) // add 1k as a buffer to avoid flakyness.
|
||||
return int(numRPCs + 1000.) // add 1k as a buffer to avoid flakiness.
|
||||
}
|
||||
|
||||
// setRingHashLBPolicyWithHighMinRingSize sets the ring hash policy with a high
|
||||
|
@ -1394,10 +1394,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) {
|
|||
})
|
||||
defer backend.Stop()
|
||||
|
||||
nonExistantServerAddr := makeNonExistentBackends(t, 1)[0]
|
||||
nonExistentServerAddr := makeNonExistentBackends(t, 1)[0]
|
||||
|
||||
const clusterName = "cluster"
|
||||
endpoints := endpointResource(t, clusterName, []string{backend.Address, nonExistantServerAddr})
|
||||
endpoints := endpointResource(t, clusterName, []string{backend.Address, nonExistentServerAddr})
|
||||
cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{
|
||||
ClusterName: clusterName,
|
||||
ServiceName: clusterName,
|
||||
|
@ -1431,7 +1431,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) {
|
|||
|
||||
rpcCtx, rpcCancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistantServerAddr+"_0"))
|
||||
rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentServerAddr+"_0"))
|
||||
_, err := client.EmptyCall(rpcCtx, &testpb.Empty{})
|
||||
if status.Code(err) != codes.Canceled {
|
||||
t.Errorf("Expected RPC to be canceled, got error: %v", err)
|
||||
|
|
|
@ -55,7 +55,7 @@ func (s) TestRingNew(t *testing.T) {
|
|||
r := newRing(testSubConnMap, min, max, nil)
|
||||
totalCount := len(r.items)
|
||||
if totalCount < int(min) || totalCount > int(max) {
|
||||
t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max)
|
||||
t.Fatalf("unexpected size %v, want min %v, max %v", totalCount, min, max)
|
||||
}
|
||||
for _, a := range testAddrs {
|
||||
var count int
|
||||
|
|
|
@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) {
|
|||
// "If absent, no enforcing RBAC policy will be applied" - RBAC
|
||||
// Documentation for Rules field.
|
||||
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||
// completely ignored, as if RBAC was not configurated." - A41
|
||||
// completely ignored, as if RBAC was not configured." - A41
|
||||
if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG {
|
||||
return config{}, nil
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) {
|
|||
ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "")
|
||||
if err != nil {
|
||||
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||
// completely ignored, as if RBAC was not configurated." - A41
|
||||
// completely ignored, as if RBAC was not configured." - A41
|
||||
if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG {
|
||||
return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err)
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http
|
|||
// "If absent, no enforcing RBAC policy will be applied" - RBAC
|
||||
// Documentation for Rules field.
|
||||
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||
// completely ignored, as if RBAC was not configurated." - A41
|
||||
// completely ignored, as if RBAC was not configured." - A41
|
||||
if c.chainEngine == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ type connWrapper struct {
|
|||
// The specific filter chain picked for handling this connection.
|
||||
filterChain *xdsresource.FilterChain
|
||||
|
||||
// A reference fo the listenerWrapper on which this connection was accepted.
|
||||
// A reference to the listenerWrapper on which this connection was accepted.
|
||||
parent *listenerWrapper
|
||||
|
||||
// The certificate providers created for this connection.
|
||||
|
|
|
@ -596,7 +596,7 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re
|
|||
|
||||
// sendDiscoveryRequestLocked sends a discovery request for the specified
|
||||
// resource type and resource names. Even though this method does not directly
|
||||
// access the resource cache, it is important that `resourcesMu` be beld when
|
||||
// access the resource cache, it is important that `resourcesMu` be held when
|
||||
// calling this method to ensure that a consistent snapshot of resource names is
|
||||
// being requested.
|
||||
func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) {
|
||||
|
|
|
@ -211,7 +211,7 @@ func (s) TestReportLoad(t *testing.T) {
|
|||
}
|
||||
|
||||
// Cancel the first load reporting call, and ensure that the stream does not
|
||||
// close (because we have aother call open).
|
||||
// close (because we have another call open).
|
||||
cancelLRS1()
|
||||
sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
|
|
|
@ -64,7 +64,7 @@ const perRPCVerbosityLevel = 9
|
|||
|
||||
// Transport provides a resource-type agnostic implementation of the xDS
|
||||
// transport protocol. At this layer, resource contents are supposed to be
|
||||
// opaque blobs which should be be meaningful only to the xDS data model layer
|
||||
// opaque blobs which should be meaningful only to the xDS data model layer
|
||||
// which is implemented by the `xdsresource` package.
|
||||
//
|
||||
// Under the hood, it owns the gRPC connection to a single management server and
|
||||
|
@ -86,7 +86,7 @@ type Transport struct {
|
|||
lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine.
|
||||
|
||||
// These channels enable synchronization amongst the different goroutines
|
||||
// spawned by the transport, and between asynchorous events resulting from
|
||||
// spawned by the transport, and between asynchronous events resulting from
|
||||
// receipt of responses from the management server.
|
||||
adsStreamCh chan adsStream // New ADS streams are pushed here.
|
||||
adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here.
|
||||
|
|
|
@ -278,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) {
|
|||
// the received Cluster resource.
|
||||
func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) {
|
||||
if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 {
|
||||
return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm)
|
||||
return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm)
|
||||
}
|
||||
// The Cluster resource contains a `transport_socket` field, which contains
|
||||
// a oneof `typed_config` field of type `protobuf.Any`. The any proto
|
||||
|
@ -477,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC
|
|||
case len(validationCtx.GetVerifyCertificateHash()) != 0:
|
||||
return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common)
|
||||
case validationCtx.GetRequireSignedCertificateTimestamp().GetValue():
|
||||
return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common)
|
||||
return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common)
|
||||
case validationCtx.GetCrl() != nil:
|
||||
return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common)
|
||||
case validationCtx.GetCustomValidatorConfig() != nil:
|
||||
|
|
|
@ -355,7 +355,7 @@ func (s) TestSecurityConfigFromCommonTLSContextUsingNewFields_ErrorCases(t *test
|
|||
},
|
||||
},
|
||||
},
|
||||
wantErr: "unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message",
|
||||
wantErr: "unsupported require_signed_certificate_timestamp field in CommonTlsContext message",
|
||||
},
|
||||
{
|
||||
name: "unsupported-field-crl-in-validation-context",
|
||||
|
|
Loading…
Reference in New Issue