mirror of https://github.com/grpc/grpc-go.git
testing: update Go to 1.19 (#5717)
This commit is contained in:
parent
eb8aa3192b
commit
778860e606
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.19
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
|
|
@ -44,31 +44,31 @@ jobs:
|
|||
matrix:
|
||||
include:
|
||||
- type: vet+tests
|
||||
goversion: 1.18
|
||||
goversion: 1.19
|
||||
|
||||
- type: tests
|
||||
goversion: 1.18
|
||||
goversion: 1.19
|
||||
testflags: -race
|
||||
|
||||
- type: tests
|
||||
goversion: 1.18
|
||||
goversion: 1.19
|
||||
goarch: 386
|
||||
|
||||
- type: tests
|
||||
goversion: 1.18
|
||||
goversion: 1.19
|
||||
goarch: arm64
|
||||
|
||||
- type: tests
|
||||
goversion: 1.18
|
||||
|
||||
- type: tests
|
||||
goversion: 1.17
|
||||
|
||||
- type: tests
|
||||
goversion: 1.16
|
||||
|
||||
- type: tests
|
||||
goversion: 1.15
|
||||
|
||||
- type: extras
|
||||
goversion: 1.18
|
||||
goversion: 1.19
|
||||
|
||||
steps:
|
||||
# Setup the environment.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
//
|
||||
// - CSDS: https://github.com/grpc/proposal/blob/master/A40-csds-support.md
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
// Package authz exposes methods to manage authorization within gRPC.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed
|
||||
// in a later release.
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ type BackoffConfig struct {
|
|||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
|
|||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(b.mergeErrors())
|
||||
|
|
|
|||
|
|
@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct {
|
|||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||||
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||||
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||||
//
|
||||
// Shutdown is not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ type LoadBalanceRequest struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to LoadBalanceRequestType:
|
||||
//
|
||||
// *LoadBalanceRequest_InitialRequest
|
||||
// *LoadBalanceRequest_ClientStats
|
||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
||||
|
|
@ -340,6 +341,7 @@ type LoadBalanceResponse struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to LoadBalanceResponseType:
|
||||
//
|
||||
// *LoadBalanceResponse_InitialResponse
|
||||
// *LoadBalanceResponse_ServerList
|
||||
// *LoadBalanceResponse_FallbackResponse
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@
|
|||
// Package grpclb defines a grpclb balancer.
|
||||
//
|
||||
// To install grpclb balancer, import this package as:
|
||||
// import _ "google.golang.org/grpc/balancer/grpclb"
|
||||
//
|
||||
// import _ "google.golang.org/grpc/balancer/grpclb"
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
|
|
@ -229,8 +230,9 @@ type lbBalancer struct {
|
|||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
|
||||
// it. The picker
|
||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - does two layer roundrobin pick otherwise.
|
||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - does two layer roundrobin pick otherwise.
|
||||
//
|
||||
// Caller must hold lb.mu.
|
||||
func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
|
||||
if lb.state == connectivity.TransientFailure {
|
||||
|
|
@ -290,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
|
|||
// fallback and grpclb). lb.scState contains states for all SubConns, including
|
||||
// those in cache (SubConns are cached for 10 seconds after remove).
|
||||
//
|
||||
// The aggregated state is:
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting;
|
||||
// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE,
|
||||
// they start to connect immediately. But there's a race between the overall
|
||||
// state is reported, and when the new SubConn state arrives. And SubConns
|
||||
// never go back to IDLE.
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
// The aggregated state is:
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting;
|
||||
// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE,
|
||||
// they start to connect immediately. But there's a race between the overall
|
||||
// state is reported, and when the new SubConn state arrives. And SubConns
|
||||
// never go back to IDLE.
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
|
||||
var numConnecting uint64
|
||||
|
||||
|
|
|
|||
|
|
@ -426,7 +426,6 @@ func (b *rlsBalancer) ExitIdle() {
|
|||
|
||||
// sendNewPickerLocked pushes a new picker on to the channel.
|
||||
//
|
||||
//
|
||||
// Note that regardless of what connectivity state is reported, the policy will
|
||||
// return its own picker, and not a picker that unconditionally queues
|
||||
// (typically used for IDLE or CONNECTING) or a picker that unconditionally
|
||||
|
|
@ -485,14 +484,14 @@ func (b *rlsBalancer) sendNewPicker() {
|
|||
}
|
||||
|
||||
// The aggregated connectivity state reported is determined as follows:
|
||||
// - If there is at least one child policy in state READY, the connectivity
|
||||
// state is READY.
|
||||
// - Otherwise, if there is at least one child policy in state CONNECTING, the
|
||||
// connectivity state is CONNECTING.
|
||||
// - Otherwise, if there is at least one child policy in state IDLE, the
|
||||
// connectivity state is IDLE.
|
||||
// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the
|
||||
// connectivity state is TRANSIENT_FAILURE.
|
||||
// - If there is at least one child policy in state READY, the connectivity
|
||||
// state is READY.
|
||||
// - Otherwise, if there is at least one child policy in state CONNECTING, the
|
||||
// connectivity state is CONNECTING.
|
||||
// - Otherwise, if there is at least one child policy in state IDLE, the
|
||||
// connectivity state is IDLE.
|
||||
// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the
|
||||
// connectivity state is TRANSIENT_FAILURE.
|
||||
//
|
||||
// If the RLS policy has no child policies and no configured default target,
|
||||
// then we will report connectivity state IDLE.
|
||||
|
|
@ -542,9 +541,9 @@ func (b *rlsBalancer) UpdateState(id string, state balancer.State) {
|
|||
// This method is invoked by the BalancerGroup whenever a child policy sends a
|
||||
// state update. We cache the child policy's connectivity state and picker for
|
||||
// two reasons:
|
||||
// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states
|
||||
// other than READY
|
||||
// - to delegate picks to child policies
|
||||
// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states
|
||||
// other than READY
|
||||
// - to delegate picks to child policies
|
||||
func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) {
|
||||
b.stateMu.Lock()
|
||||
defer b.stateMu.Unlock()
|
||||
|
|
|
|||
|
|
@ -113,33 +113,36 @@ type lbConfigJSON struct {
|
|||
// ParseConfig parses the JSON load balancer config provided into an
|
||||
// internal form or returns an error if the config is invalid.
|
||||
//
|
||||
// When parsing a config update, the following validations are performed:
|
||||
// - routeLookupConfig:
|
||||
// - grpc_keybuilders field:
|
||||
// - must have at least one entry
|
||||
// - must not have two entries with the same `Name`
|
||||
// - within each entry:
|
||||
// - must have at least one `Name`
|
||||
// - must not have a `Name` with the `service` field unset or empty
|
||||
// - within each `headers` entry:
|
||||
// - must not have `required_match` set
|
||||
// - must not have `key` unset or empty
|
||||
// - across all `headers`, `constant_keys` and `extra_keys` fields:
|
||||
// - must not have the same `key` specified twice
|
||||
// - no `key` must be the empty string
|
||||
// - `lookup_service` field must be set and must parse as a target URI
|
||||
// - if `max_age` > 5m, it should be set to 5 minutes
|
||||
// - if `stale_age` > `max_age`, ignore it
|
||||
// - if `stale_age` is set, then `max_age` must also be set
|
||||
// - ignore `valid_targets` field
|
||||
// - `cache_size_bytes` field must have a value greater than 0, and if its
|
||||
// value is greater than 5M, we cap it at 5M
|
||||
// - routeLookupChannelServiceConfig:
|
||||
// - if specified, must parse as valid service config
|
||||
// - childPolicy:
|
||||
// - must find a valid child policy with a valid config
|
||||
// - childPolicyConfigTargetFieldName:
|
||||
// - must be set and non-empty
|
||||
// When parsing a config update, the following validations are performed:
|
||||
// - routeLookupConfig:
|
||||
// - grpc_keybuilders field:
|
||||
// - must have at least one entry
|
||||
// - must not have two entries with the same `Name`
|
||||
// - within each entry:
|
||||
// - must have at least one `Name`
|
||||
// - must not have a `Name` with the `service` field unset or empty
|
||||
// - within each `headers` entry:
|
||||
// - must not have `required_match` set
|
||||
// - must not have `key` unset or empty
|
||||
// - across all `headers`, `constant_keys` and `extra_keys` fields:
|
||||
// - must not have the same `key` specified twice
|
||||
// - no `key` must be the empty string
|
||||
// - `lookup_service` field must be set and must parse as a target URI
|
||||
// - if `max_age` > 5m, it should be set to 5 minutes
|
||||
// - if `stale_age` > `max_age`, ignore it
|
||||
// - if `stale_age` is set, then `max_age` must also be set
|
||||
// - ignore `valid_targets` field
|
||||
// - `cache_size_bytes` field must have a value greater than 0, and if its
|
||||
// value is greater than 5M, we cap it at 5M
|
||||
//
|
||||
// - routeLookupChannelServiceConfig:
|
||||
// - if specified, must parse as valid service config
|
||||
//
|
||||
// - childPolicy:
|
||||
// - must find a valid child policy with a valid config
|
||||
//
|
||||
// - childPolicyConfigTargetFieldName:
|
||||
// - must be set and non-empty
|
||||
func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
logger.Infof("Received JSON service config: %v", pretty.ToJSON(c))
|
||||
cfgJSON := &lbConfigJSON{}
|
||||
|
|
|
|||
|
|
@ -220,12 +220,12 @@ func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manu
|
|||
//
|
||||
// There are many instances where it can take a while before the attempted RPC
|
||||
// reaches the expected backend. Examples include, but are not limited to:
|
||||
// - control channel is changed in a config update. The RLS LB policy creates a
|
||||
// new control channel, and sends a new picker to gRPC. But it takes a while
|
||||
// before gRPC actually starts using the new picker.
|
||||
// - test is waiting for a cache entry to expire after which we expect a
|
||||
// different behavior because we have configured the fake RLS server to return
|
||||
// different backends.
|
||||
// - control channel is changed in a config update. The RLS LB policy creates a
|
||||
// new control channel, and sends a new picker to gRPC. But it takes a while
|
||||
// before gRPC actually starts using the new picker.
|
||||
// - test is waiting for a cache entry to expire after which we expect a
|
||||
// different behavior because we have configured the fake RLS server to return
|
||||
// different backends.
|
||||
//
|
||||
// Therefore, we do not return an error when the RPC fails. Instead, we wait for
|
||||
// the context to expire before failing.
|
||||
|
|
|
|||
|
|
@ -45,21 +45,21 @@ const (
|
|||
// The throttler has the following knobs for which we will use defaults for
|
||||
// now. If there is a need to make them configurable at a later point in time,
|
||||
// support for the same will be added.
|
||||
// * Duration: amount of recent history that will be taken into account for
|
||||
// making client-side throttling decisions. A default of 30 seconds is used.
|
||||
// * Bins: number of bins to be used for bucketing historical data. A default
|
||||
// of 100 is used.
|
||||
// * RatioForAccepts: ratio by which accepts are multiplied, typically a value
|
||||
// slightly larger than 1.0. This is used to make the throttler behave as if
|
||||
// the backend had accepted more requests than it actually has, which lets us
|
||||
// err on the side of sending to the backend more requests than we think it
|
||||
// will accept for the sake of speeding up the propagation of state. A
|
||||
// default of 2.0 is used.
|
||||
// * RequestsPadding: is used to decrease the (client-side) throttling
|
||||
// probability in the low QPS regime (to speed up propagation of state), as
|
||||
// well as to safeguard against hitting a client-side throttling probability
|
||||
// of 100%. The weight of this value decreases as the number of requests in
|
||||
// recent history grows. A default of 8 is used.
|
||||
// - Duration: amount of recent history that will be taken into account for
|
||||
// making client-side throttling decisions. A default of 30 seconds is used.
|
||||
// - Bins: number of bins to be used for bucketing historical data. A default
|
||||
// of 100 is used.
|
||||
// - RatioForAccepts: ratio by which accepts are multiplied, typically a value
|
||||
// slightly larger than 1.0. This is used to make the throttler behave as if
|
||||
// the backend had accepted more requests than it actually has, which lets us
|
||||
// err on the side of sending to the backend more requests than we think it
|
||||
// will accept for the sake of speeding up the propagation of state. A
|
||||
// default of 2.0 is used.
|
||||
// - RequestsPadding: is used to decrease the (client-side) throttling
|
||||
// probability in the low QPS regime (to speed up propagation of state), as
|
||||
// well as to safeguard against hitting a client-side throttling probability
|
||||
// of 100%. The weight of this value decreases as the number of requests in
|
||||
// recent history grows. A default of 8 is used.
|
||||
//
|
||||
// The adaptive throttler attempts to estimate the probability that a request
|
||||
// will be throttled using recent history. Server requests (both throttled and
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ func (a AddrInfo) Equal(o interface{}) bool {
|
|||
// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is
|
||||
// updated with addrInfo.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -57,7 +57,7 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address {
|
|||
// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of
|
||||
// addr.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ Package main provides benchmark with setting flags.
|
|||
|
||||
An example to run some benchmarks with profiling enabled:
|
||||
|
||||
go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
|
||||
-compression=gzip -maxConcurrentCalls=1 -trace=off \
|
||||
-reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
|
||||
-cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
|
||||
go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
|
||||
-compression=gzip -maxConcurrentCalls=1 -trace=off \
|
||||
-reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
|
||||
-cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
|
||||
|
||||
As a suggestion, when creating a branch, you can run this benchmark and save the result
|
||||
file "-resultFile=basePerf", and later when you at the middle of the work or finish the
|
||||
|
|
@ -32,10 +32,11 @@ work, you can get the benchmark result and compare it with the base anytime.
|
|||
|
||||
Assume there are two result files names as "basePerf" and "curPerf" created by adding
|
||||
-resultFile=basePerf and -resultFile=curPerf.
|
||||
To format the curPerf, run:
|
||||
go run benchmark/benchresult/main.go curPerf
|
||||
To observe how the performance changes based on a base result, run:
|
||||
go run benchmark/benchresult/main.go basePerf curPerf
|
||||
|
||||
To format the curPerf, run:
|
||||
go run benchmark/benchresult/main.go curPerf
|
||||
To observe how the performance changes based on a base result, run:
|
||||
go run benchmark/benchresult/main.go basePerf curPerf
|
||||
*/
|
||||
package main
|
||||
|
||||
|
|
|
|||
|
|
@ -18,12 +18,14 @@
|
|||
|
||||
/*
|
||||
To format the benchmark result:
|
||||
go run benchmark/benchresult/main.go resultfile
|
||||
|
||||
go run benchmark/benchresult/main.go resultfile
|
||||
|
||||
To see the performance change based on a old result:
|
||||
go run benchmark/benchresult/main.go resultfile_old resultfile
|
||||
It will print the comparison result of intersection benchmarks between two files.
|
||||
|
||||
go run benchmark/benchresult/main.go resultfile_old resultfile
|
||||
|
||||
It will print the comparison result of intersection benchmarks between two files.
|
||||
*/
|
||||
package main
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
Package main provides a server used for benchmarking. It launches a server
|
||||
which is listening on port 50051. An example to start the server can be found
|
||||
at:
|
||||
|
||||
go run benchmark/server/main.go -test_name=grpc_test
|
||||
|
||||
After starting the server, the client can be run separately and used to test
|
||||
|
|
|
|||
|
|
@ -261,6 +261,7 @@ type GrpcLogEntry struct {
|
|||
// according to the type of the log entry.
|
||||
//
|
||||
// Types that are assignable to Payload:
|
||||
//
|
||||
// *GrpcLogEntry_ClientHeader
|
||||
// *GrpcLogEntry_ServerHeader
|
||||
// *GrpcLogEntry_Message
|
||||
|
|
@ -694,12 +695,12 @@ func (x *Message) GetData() []byte {
|
|||
// Header keys added by gRPC are omitted. To be more specific,
|
||||
// implementations will not log the following entries, and this is
|
||||
// not to be treated as a truncation:
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
//
|
||||
// Implementations must always log grpc-trace-bin if it is present.
|
||||
// Practically speaking it will only be visible on server side because
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||||
// the `internal/channelz` package.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -514,6 +514,7 @@ type ChannelTraceEvent struct {
|
|||
// created.
|
||||
//
|
||||
// Types that are assignable to ChildRef:
|
||||
//
|
||||
// *ChannelTraceEvent_ChannelRef
|
||||
// *ChannelTraceEvent_SubchannelRef
|
||||
ChildRef isChannelTraceEvent_ChildRef `protobuf_oneof:"child_ref"`
|
||||
|
|
@ -1338,6 +1339,7 @@ type Address struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Address:
|
||||
//
|
||||
// *Address_TcpipAddress
|
||||
// *Address_UdsAddress_
|
||||
// *Address_OtherAddress_
|
||||
|
|
@ -1433,6 +1435,7 @@ type Security struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Model:
|
||||
//
|
||||
// *Security_Tls_
|
||||
// *Security_Other
|
||||
Model isSecurity_Model `protobuf_oneof:"model"`
|
||||
|
|
@ -2908,6 +2911,7 @@ type Security_Tls struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to CipherSuite:
|
||||
//
|
||||
// *Security_Tls_StandardName
|
||||
// *Security_Tls_OtherName
|
||||
CipherSuite isSecurity_Tls_CipherSuite `protobuf_oneof:"cipher_suite"`
|
||||
|
|
|
|||
|
|
@ -503,7 +503,7 @@ type ClientConn struct {
|
|||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||
// ctx expires. A true value is returned in former case and false in latter.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
|
|||
|
||||
// GetState returns the connectivity.State of ClientConn.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||||
// release.
|
||||
|
|
@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State {
|
|||
// the channel is idle. Does not wait for the connection attempts to begin
|
||||
// before returning.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||||
// release.
|
||||
|
|
@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
|
|||
|
||||
// Target returns the target string of the ClientConn.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -831,9 +831,9 @@ func equalAddresses(a, b []resolver.Address) bool {
|
|||
//
|
||||
// If ac is Ready, it checks whether current connected address of ac is in the
|
||||
// new addrs list.
|
||||
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||||
// the existing connection.
|
||||
// - If false, it does nothing and returns false.
|
||||
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||||
// the existing connection.
|
||||
// - If false, it does nothing and returns false.
|
||||
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
|
|
@ -998,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
|||
// However, if a previously unavailable network becomes available, this may be
|
||||
// used to trigger an immediate reconnect.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,14 +19,17 @@
|
|||
// protoc-gen-go-grpc is a plugin for the Google protocol buffer compiler to
|
||||
// generate Go code. Install it by building this program and making it
|
||||
// accessible within your PATH with the name:
|
||||
//
|
||||
// protoc-gen-go-grpc
|
||||
//
|
||||
// The 'go-grpc' suffix becomes part of the argument for the protocol compiler,
|
||||
// such that it can be invoked as:
|
||||
//
|
||||
// protoc --go-grpc_out=. path/to/file.proto
|
||||
//
|
||||
// This generates Go service definitions for the protocol buffer defined by
|
||||
// file.proto. With that input, the output will be written to:
|
||||
//
|
||||
// path/to/file_grpc.pb.go
|
||||
package main
|
||||
|
||||
|
|
|
|||
|
|
@ -216,6 +216,7 @@ type Identity struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to IdentityOneof:
|
||||
//
|
||||
// *Identity_ServiceAccount
|
||||
// *Identity_Hostname
|
||||
IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"`
|
||||
|
|
@ -664,6 +665,7 @@ type HandshakerReq struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to ReqOneof:
|
||||
//
|
||||
// *HandshakerReq_ClientStart
|
||||
// *HandshakerReq_ServerStart
|
||||
// *HandshakerReq_Next
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com"
|
|||
// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_",
|
||||
// use TLS
|
||||
// - otherwise, use ALTS
|
||||
//
|
||||
// - else, do TLS
|
||||
//
|
||||
// On the server, ServerHandshake always does TLS.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
// reported. If local credentials is not used in local connections
|
||||
// (local TCP or UDS), it will fail.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package sts implements call credentials using STS (Security Token Service) as
|
||||
// defined in https://tools.ietf.org/html/rfc8693.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be changed or
|
||||
// removed in a later release.
|
||||
|
|
@ -245,12 +245,12 @@ func (c *callCreds) cachedMetadata() map[string]string {
|
|||
|
||||
// constructRequest creates the STS request body in JSON based on the provided
|
||||
// options.
|
||||
// - Contents of the subjectToken are read from the file specified in
|
||||
// options. If we encounter an error here, we bail out.
|
||||
// - Contents of the actorToken are read from the file specified in options.
|
||||
// If we encounter an error here, we ignore this field because this is
|
||||
// optional.
|
||||
// - Most of the other fields in the request come directly from options.
|
||||
// - Contents of the subjectToken are read from the file specified in
|
||||
// options. If we encounter an error here, we bail out.
|
||||
// - Contents of the actorToken are read from the file specified in options.
|
||||
// If we encounter an error here, we ignore this field because this is
|
||||
// optional.
|
||||
// - Most of the other fields in the request come directly from options.
|
||||
//
|
||||
// A new HTTP request is created by calling http.NewRequestWithContext() and
|
||||
// passing the provided context, thereby enforcing any timeouts specified in
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
|||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -31,11 +31,11 @@ import (
|
|||
//
|
||||
// Provider implementations which choose to use a Distributor should do the
|
||||
// following:
|
||||
// - create a new Distributor using the NewDistributor() function.
|
||||
// - invoke the Set() method whenever they have new key material or errors to
|
||||
// report.
|
||||
// - delegate to the distributor when handing calls to KeyMaterial().
|
||||
// - invoke the Stop() method when they are done using the distributor.
|
||||
// - create a new Distributor using the NewDistributor() function.
|
||||
// - invoke the Set() method whenever they have new key material or errors to
|
||||
// report.
|
||||
// - delegate to the distributor when handing calls to KeyMaterial().
|
||||
// - invoke the Stop() method when they are done using the distributor.
|
||||
type Distributor struct {
|
||||
// mu protects the underlying key material.
|
||||
mu sync.Mutex
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package pemfile provides a file watching certificate provider plugin
|
||||
// implementation which works for files with PEM contents.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
// Package certprovider defines APIs for Certificate Providers in gRPC.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package gzip implements and registers the gzip compressor
|
||||
// during the initialization.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package observability implements the tracing, metrics, and logging data
|
||||
// collection, and provides controlling knobs via a config file.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool {
|
|||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -52,7 +52,8 @@ func TestLoggerV2Severity(t *testing.T) {
|
|||
}
|
||||
|
||||
// check if b is in the format of:
|
||||
// 2017/04/07 14:55:42 WARNING: WARNING
|
||||
//
|
||||
// 2017/04/07 14:55:42 WARNING: WARNING
|
||||
func checkLogForSeverity(s int, b []byte) error {
|
||||
expected := regexp.MustCompile(fmt.Sprintf(`^[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s: %s\n$`, severityName[s], severityName[s]))
|
||||
if m := expected.Match(b); !m {
|
||||
|
|
|
|||
|
|
@ -185,19 +185,19 @@ func (sbc *subBalancerWrapper) stopBalancer() {
|
|||
// intended to be used directly as a balancer. It's expected to be used as a
|
||||
// sub-balancer manager by a high level balancer.
|
||||
//
|
||||
// Updates from ClientConn are forwarded to sub-balancers
|
||||
// - service config update
|
||||
// - address update
|
||||
// - subConn state change
|
||||
// - find the corresponding balancer and forward
|
||||
// Updates from ClientConn are forwarded to sub-balancers
|
||||
// - service config update
|
||||
// - address update
|
||||
// - subConn state change
|
||||
// - find the corresponding balancer and forward
|
||||
//
|
||||
// Actions from sub-balances are forwarded to parent ClientConn
|
||||
// - new/remove SubConn
|
||||
// - picker update and health states change
|
||||
// - sub-pickers are sent to an aggregator provided by the parent, which
|
||||
// will group them into a group-picker. The aggregated connectivity state is
|
||||
// also handled by the aggregator.
|
||||
// - resolveNow
|
||||
// Actions from sub-balances are forwarded to parent ClientConn
|
||||
// - new/remove SubConn
|
||||
// - picker update and health states change
|
||||
// - sub-pickers are sent to an aggregator provided by the parent, which
|
||||
// will group them into a group-picker. The aggregated connectivity state is
|
||||
// also handled by the aggregator.
|
||||
// - resolveNow
|
||||
//
|
||||
// Sub-balancers are only built when the balancer group is started. If the
|
||||
// balancer group is closed, the sub-balancers are also closed. And it's
|
||||
|
|
|
|||
|
|
@ -167,8 +167,9 @@ func (s) TestBalancerGroup_start_close(t *testing.T) {
|
|||
// into balancer group inline when it gets an update.
|
||||
//
|
||||
// The potential deadlock can happen if we
|
||||
// - hold a lock and send updates to balancer (e.g. update resolved addresses)
|
||||
// - the balancer calls back (NewSubConn or update picker) in line
|
||||
// - hold a lock and send updates to balancer (e.g. update resolved addresses)
|
||||
// - the balancer calls back (NewSubConn or update picker) in line
|
||||
//
|
||||
// The callback will try to hold hte same lock again, which will cause a
|
||||
// deadlock.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -30,15 +30,15 @@ import (
|
|||
// to build a new logger and assign it to binarylog.Logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the previous config.
|
||||
|
|
|
|||
|
|
@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
|
|||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
|
|
@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
|||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ type LoggerV2 interface {
|
|||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -79,10 +79,12 @@ func getTestFunc(t *testing.T, xv reflect.Value, name string) func(*testing.T) {
|
|||
// functions, respectively.
|
||||
//
|
||||
// For example usage, see example_test.go. Run it using:
|
||||
// $ go test -v -run TestExample .
|
||||
//
|
||||
// $ go test -v -run TestExample .
|
||||
//
|
||||
// To run a specific test/subtest:
|
||||
// $ go test -v -run 'TestExample/^Something$' .
|
||||
//
|
||||
// $ go test -v -run 'TestExample/^Something$' .
|
||||
func RunSubTests(t *testing.T, x interface{}) {
|
||||
xt := reflect.TypeOf(x)
|
||||
xv := reflect.ValueOf(x)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
|
|
|
|||
|
|
@ -70,26 +70,29 @@ func Set(addr resolver.Address, path []string) resolver.Address {
|
|||
//
|
||||
// Input:
|
||||
// [
|
||||
// {addr0, path: [p0, wt0]}
|
||||
// {addr1, path: [p0, wt1]}
|
||||
// {addr2, path: [p1, wt2]}
|
||||
// {addr3, path: [p1, wt3]}
|
||||
//
|
||||
// {addr0, path: [p0, wt0]}
|
||||
// {addr1, path: [p0, wt1]}
|
||||
// {addr2, path: [p1, wt2]}
|
||||
// {addr3, path: [p1, wt3]}
|
||||
//
|
||||
// ]
|
||||
//
|
||||
// Addresses will be split into p0/p1, and the p0/p1 will be removed from the
|
||||
// path.
|
||||
//
|
||||
// Output:
|
||||
// {
|
||||
// p0: [
|
||||
// {addr0, path: [wt0]},
|
||||
// {addr1, path: [wt1]},
|
||||
// ],
|
||||
// p1: [
|
||||
// {addr2, path: [wt2]},
|
||||
// {addr3, path: [wt3]},
|
||||
// ],
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// p0: [
|
||||
// {addr0, path: [wt0]},
|
||||
// {addr1, path: [wt1]},
|
||||
// ],
|
||||
// p1: [
|
||||
// {addr2, path: [wt2]},
|
||||
// {addr3, path: [wt3]},
|
||||
// ],
|
||||
// }
|
||||
//
|
||||
// If hierarchical path is not set, or has no path in it, the address is
|
||||
// dropped.
|
||||
|
|
|
|||
|
|
@ -33,48 +33,48 @@ import (
|
|||
//
|
||||
// Several other approaches were considered before arriving at this:
|
||||
//
|
||||
// 1. Using a CGO module: CGO usually has access to some things that regular
|
||||
// Go does not. Till go1.4, CGO used to have access to the goroutine struct
|
||||
// because the Go runtime was written in C. However, 1.5+ uses a native Go
|
||||
// runtime; as a result, CGO does not have access to the goroutine structure
|
||||
// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated
|
||||
// to be ~170ns/op). This would also make building grpc require a C
|
||||
// compiler, which isn't a requirement currently, breaking a lot of stuff.
|
||||
// 1. Using a CGO module: CGO usually has access to some things that regular
|
||||
// Go does not. Till go1.4, CGO used to have access to the goroutine struct
|
||||
// because the Go runtime was written in C. However, 1.5+ uses a native Go
|
||||
// runtime; as a result, CGO does not have access to the goroutine structure
|
||||
// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated
|
||||
// to be ~170ns/op). This would also make building grpc require a C
|
||||
// compiler, which isn't a requirement currently, breaking a lot of stuff.
|
||||
//
|
||||
// 2. Using runtime.Stack stacktrace: While this would remove the need for a
|
||||
// modified Go runtime, this is ridiculously slow, thanks to the all the
|
||||
// string processing shenanigans required to extract the goroutine ID (about
|
||||
// ~2000ns/op).
|
||||
// 2. Using runtime.Stack stacktrace: While this would remove the need for a
|
||||
// modified Go runtime, this is ridiculously slow, thanks to the all the
|
||||
// string processing shenanigans required to extract the goroutine ID (about
|
||||
// ~2000ns/op).
|
||||
//
|
||||
// 3. Using Go version-specific build tags: For any given Go version, the
|
||||
// goroutine struct has a fixed structure. As a result, the goroutine ID
|
||||
// could be extracted if we know the offset using some assembly. This would
|
||||
// be faster then #1 and #2, but is harder to maintain. This would require
|
||||
// special Go code that's both architecture-specific and go version-specific
|
||||
// (a quadratic number of variants to maintain).
|
||||
// 3. Using Go version-specific build tags: For any given Go version, the
|
||||
// goroutine struct has a fixed structure. As a result, the goroutine ID
|
||||
// could be extracted if we know the offset using some assembly. This would
|
||||
// be faster then #1 and #2, but is harder to maintain. This would require
|
||||
// special Go code that's both architecture-specific and go version-specific
|
||||
// (a quadratic number of variants to maintain).
|
||||
//
|
||||
// 4. This approach, which requires a simple modification [1] to the Go runtime
|
||||
// to expose the current goroutine's ID. This is the chosen approach and it
|
||||
// takes about ~2 ns/op, which is negligible in the face of the tens of
|
||||
// microseconds that grpc takes to complete a RPC request.
|
||||
// 4. This approach, which requires a simple modification [1] to the Go runtime
|
||||
// to expose the current goroutine's ID. This is the chosen approach and it
|
||||
// takes about ~2 ns/op, which is negligible in the face of the tens of
|
||||
// microseconds that grpc takes to complete a RPC request.
|
||||
//
|
||||
// [1] To make the goroutine ID visible to Go programs apply the following
|
||||
// change to the runtime2.go file in your Go runtime installation:
|
||||
//
|
||||
// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
|
||||
// --- a/src/runtime/runtime2.go
|
||||
// +++ b/src/runtime/runtime2.go
|
||||
// @@ -392,6 +392,10 @@ type stack struct {
|
||||
// hi uintptr
|
||||
// }
|
||||
// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
|
||||
// --- a/src/runtime/runtime2.go
|
||||
// +++ b/src/runtime/runtime2.go
|
||||
// @@ -392,6 +392,10 @@ type stack struct {
|
||||
// hi uintptr
|
||||
// }
|
||||
//
|
||||
// +func Goid() int64 {
|
||||
// + return getg().goid
|
||||
// +}
|
||||
// +
|
||||
// type g struct {
|
||||
// // Stack parameters.
|
||||
// // stack describes the actual stack memory: [stack.lo, stack.hi).
|
||||
// +func Goid() int64 {
|
||||
// + return getg().goid
|
||||
// +}
|
||||
// +
|
||||
// type g struct {
|
||||
// // Stack parameters.
|
||||
// // stack describes the actual stack memory: [stack.lo, stack.hi).
|
||||
//
|
||||
// The exposed runtime.Goid() function will return a int64 goroutine ID.
|
||||
func goid() int64 {
|
||||
|
|
|
|||
|
|
@ -204,8 +204,10 @@ func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string {
|
|||
//
|
||||
// For a service where the project id can be expressed either as a subdomain or
|
||||
// in the path, separate HttpKeyBuilders must be used:
|
||||
// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**'
|
||||
// host_pattern: '{id}.example.com' path_pattern: '/{object}/**'
|
||||
//
|
||||
// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**'
|
||||
// host_pattern: '{id}.example.com' path_pattern: '/{object}/**'
|
||||
//
|
||||
// If the host is exactly 'example.com', the first path segment will be used as
|
||||
// the id and the second segment as the object. If the host has a subdomain, the
|
||||
// subdomain will be used as the id and the first segment as the object. If
|
||||
|
|
@ -223,7 +225,7 @@ type HttpKeyBuilder struct {
|
|||
// - "*": Matches any single label.
|
||||
// - "**": Matches zero or more labels (first or last part of host only).
|
||||
// - "{<name>=...}": One or more label capture, where "..." can be any
|
||||
// template that does not include a capture.
|
||||
// template that does not include a capture.
|
||||
// - "{<name>}": A single label capture. Identical to {<name>=*}.
|
||||
//
|
||||
// Examples:
|
||||
|
|
@ -242,8 +244,9 @@ type HttpKeyBuilder struct {
|
|||
// - "*": Matches any single segment.
|
||||
// - "**": Matches zero or more segments (first or last part of path only).
|
||||
// - "{<name>=...}": One or more segment capture, where "..." can be any
|
||||
// template that does not include a capture.
|
||||
// template that does not include a capture.
|
||||
// - "{<name>}": A single segment capture. Identical to {<name>=*}.
|
||||
//
|
||||
// A custom method may also be specified by appending ":" and the custom
|
||||
// method name or "*" to indicate any custom method (including no custom
|
||||
// method). For example, "/*/projects/{project_id}/**:*" extracts
|
||||
|
|
|
|||
|
|
@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
|||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
|
|
|
|||
|
|
@ -290,16 +290,16 @@ func (tcc *TestClientConn) WaitForPicker(ctx context.Context, f func(balancer.Pi
|
|||
// Step 1. the return values of f should form a permutation of all elements in
|
||||
// want, but not necessary in the same order. E.g. if want is {a,a,b}, the check
|
||||
// fails if f returns:
|
||||
// - {a,a,a}: third a is returned before b
|
||||
// - {a,b,b}: second b is returned before the second a
|
||||
// - {a,a,a}: third a is returned before b
|
||||
// - {a,b,b}: second b is returned before the second a
|
||||
//
|
||||
// If error is found in this step, the returned error contains only the first
|
||||
// iteration until where it goes wrong.
|
||||
//
|
||||
// Step 2. the return values of f should be repetitions of the same permutation.
|
||||
// E.g. if want is {a,a,b}, the check failes if f returns:
|
||||
// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not
|
||||
// repeating the first iteration.
|
||||
// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not
|
||||
// repeating the first iteration.
|
||||
//
|
||||
// If error is found in this step, the returned error contains the first
|
||||
// iteration + the second iteration until where it goes wrong.
|
||||
|
|
|
|||
|
|
@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() {
|
|||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
// - io.EOF
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -661,13 +661,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
|||
// NewStream errors result in transparent retry, as they mean nothing went onto
|
||||
// the wire. However, there are two notable exceptions:
|
||||
//
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
type NewStreamError struct {
|
||||
Err error
|
||||
|
||||
|
|
|
|||
|
|
@ -2005,7 +2005,7 @@ func (s) TestPingPong1MB(t *testing.T) {
|
|||
runPingPongTest(t, 1048576)
|
||||
}
|
||||
|
||||
//This is a stress-test of flow control logic.
|
||||
// This is a stress-test of flow control logic.
|
||||
func runPingPongTest(t *testing.T, msgSize int) {
|
||||
server, client, cancel := setUp(t, 0, 0, pingpong)
|
||||
defer cancel()
|
||||
|
|
|
|||
|
|
@ -300,6 +300,7 @@ type LoadParams struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Load:
|
||||
//
|
||||
// *LoadParams_ClosedLoop
|
||||
// *LoadParams_Poisson
|
||||
Load isLoadParams_Load `protobuf_oneof:"load"`
|
||||
|
|
@ -445,6 +446,7 @@ type ChannelArg struct {
|
|||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *ChannelArg_StrValue
|
||||
// *ChannelArg_IntValue
|
||||
Value isChannelArg_Value `protobuf_oneof:"value"`
|
||||
|
|
@ -834,6 +836,7 @@ type ClientArgs struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Argtype:
|
||||
//
|
||||
// *ClientArgs_Setup
|
||||
// *ClientArgs_Mark
|
||||
Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"`
|
||||
|
|
@ -1061,6 +1064,7 @@ type ServerArgs struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Argtype:
|
||||
//
|
||||
// *ServerArgs_Setup
|
||||
// *ServerArgs_Mark
|
||||
Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"`
|
||||
|
|
|
|||
|
|
@ -148,6 +148,7 @@ type Metric struct {
|
|||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *Metric_Count
|
||||
// *Metric_Histogram
|
||||
Value isMetric_Value `protobuf_oneof:"value"`
|
||||
|
|
|
|||
|
|
@ -43,10 +43,9 @@ const _ = proto.ProtoPackageIsVersion4
|
|||
// messages in your project. A typical example is to use it as argument or the
|
||||
// return value of a service API. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { };
|
||||
// };
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { };
|
||||
// };
|
||||
type Empty struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
|
|
|
|||
|
|
@ -193,6 +193,7 @@ type PayloadConfig struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Payload:
|
||||
//
|
||||
// *PayloadConfig_BytebufParams
|
||||
// *PayloadConfig_SimpleParams
|
||||
// *PayloadConfig_ComplexParams
|
||||
|
|
|
|||
|
|
@ -340,7 +340,8 @@ type rpcConfig struct {
|
|||
}
|
||||
|
||||
// parseRPCMetadata turns EmptyCall:key1:value1 into
|
||||
// {typ: emptyCall, md: {key1:value1}}.
|
||||
//
|
||||
// {typ: emptyCall, md: {key1:value1}}.
|
||||
func parseRPCMetadata(rpcMetadataStr string, rpcs []string) []*rpcConfig {
|
||||
rpcMetadataSplit := strings.Split(rpcMetadataStr, ",")
|
||||
rpcsToMD := make(map[string][]string)
|
||||
|
|
|
|||
|
|
@ -41,10 +41,11 @@ type MD map[string][]string
|
|||
// New creates an MD from a given key-value map.
|
||||
//
|
||||
// Only the following ASCII characters are allowed in keys:
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
//
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
|
|
@ -62,10 +63,11 @@ func New(m map[string]string) MD {
|
|||
// Pairs panics if len(kv) is odd.
|
||||
//
|
||||
// Only the following ASCII characters are allowed in keys:
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
//
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
|
|
@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
|||
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
||||
// key from the incoming metadata if it exists. Key must be lower-case.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
// Envoy) on the data plane. In a proxyless world with gRPC enabled
|
||||
// applications, aggregation of such reports will be done by the gRPC client.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs is this package are EXPERIMENTAL and may be changed or
|
||||
// removed in a later release.
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
// Package profiling exposes methods to manage profiling within gRPC.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@
|
|||
// queried by a client to remotely manage the gRPC profiling behaviour of an
|
||||
// application.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ type ServerReflectionRequest struct {
|
|||
// defined field and then handles them using corresponding methods.
|
||||
//
|
||||
// Types that are assignable to MessageRequest:
|
||||
//
|
||||
// *ServerReflectionRequest_FileByFilename
|
||||
// *ServerReflectionRequest_FileContainingSymbol
|
||||
// *ServerReflectionRequest_FileContainingExtension
|
||||
|
|
@ -263,6 +264,7 @@ type ServerReflectionResponse struct {
|
|||
// message_request in the request.
|
||||
//
|
||||
// Types that are assignable to MessageResponse:
|
||||
//
|
||||
// *ServerReflectionResponse_FileDescriptorResponse
|
||||
// *ServerReflectionResponse_AllExtensionNumbersResponse
|
||||
// *ServerReflectionResponse_ListServicesResponse
|
||||
|
|
|
|||
|
|
@ -23,9 +23,11 @@
|
|||
Package grpc_testing_not_regenerate is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
|
||||
testv3.proto
|
||||
|
||||
It has these top-level messages:
|
||||
|
||||
SearchResponseV3
|
||||
SearchRequestV3
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ The service implemented is defined in:
|
|||
https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
|
||||
|
||||
To register server reflection on a gRPC server:
|
||||
|
||||
import "google.golang.org/grpc/reflection"
|
||||
|
||||
s := grpc.NewServer()
|
||||
|
|
@ -32,7 +33,6 @@ To register server reflection on a gRPC server:
|
|||
reflection.Register(s)
|
||||
|
||||
s.Serve(lis)
|
||||
|
||||
*/
|
||||
package reflection // import "google.golang.org/grpc/reflection"
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ func Register(s GRPCServer) {
|
|||
// for a custom implementation to return zero values for the
|
||||
// grpc.ServiceInfo values in the map.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -85,7 +85,7 @@ type ServiceInfoProvider interface {
|
|||
// ExtensionResolver is the interface used to query details about extensions.
|
||||
// This interface is satisfied by protoregistry.GlobalTypes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -96,7 +96,7 @@ type ExtensionResolver interface {
|
|||
|
||||
// ServerOptions represents the options used to construct a reflection server.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -120,7 +120,7 @@ type ServerOptions struct {
|
|||
// This can be used to customize behavior of the reflection service. Most usages
|
||||
// should prefer to use Register instead.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ const (
|
|||
|
||||
// Address represents a server the client connects to.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -236,12 +236,12 @@ type ClientConn interface {
|
|||
//
|
||||
// Examples:
|
||||
//
|
||||
// - "dns://some_authority/foo.bar"
|
||||
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
||||
// - "foo.bar"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
||||
// - "unknown_scheme://authority/endpoint"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||||
// - "dns://some_authority/foo.bar"
|
||||
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
||||
// - "foo.bar"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
||||
// - "unknown_scheme://authority/endpoint"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||||
type Target struct {
|
||||
// Deprecated: use URL.Scheme instead.
|
||||
Scheme string
|
||||
|
|
|
|||
39
rpc_util.go
39
rpc_util.go
|
|
@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption {
|
|||
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption {
|
|||
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption {
|
|||
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||
// peer. The peer field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption {
|
|||
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||
// fast or not.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
|
|||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size in bytes the client can receive.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
|
|||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size in bytes the client can send.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
|||
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||
// credentials to use for the call.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|||
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||
// higher priority.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption {
|
|||
|
||||
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
|
|||
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||
// used for marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|||
// This function is provided for advanced users; prefer to use only
|
||||
// CallContentSubtype to select a registered codec instead.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
|
|||
// ForceCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption {
|
|||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|||
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
||||
// used for buffering this RPC's requests for retry purposes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
|
|||
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
||||
// memory to be used for caching this RPC for retry purposes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
@ -548,10 +548,11 @@ type parser struct {
|
|||
// format. The caller owns the returned msg memory.
|
||||
//
|
||||
// If there is an error, possible values are:
|
||||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
// - io.EOF, when no messages remain
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
//
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
|
|
|
|||
|
|
@ -449,9 +449,9 @@ func (c *advancedTLSCreds) OverrideServerName(serverNameOverride string) error {
|
|||
// and possibly custom verification check.
|
||||
// We have to build our own verification function here because current
|
||||
// tls module:
|
||||
// 1. does not have a good support on root cert reloading.
|
||||
// 2. will ignore basic certificate check when setting InsecureSkipVerify
|
||||
// to true.
|
||||
// 1. does not have a good support on root cert reloading.
|
||||
// 2. will ignore basic certificate check when setting InsecureSkipVerify
|
||||
// to true.
|
||||
func buildVerifyFunc(c *advancedTLSCreds,
|
||||
serverName string,
|
||||
rawConn net.Conn) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
||||
|
|
|
|||
|
|
@ -150,12 +150,12 @@ func x509NameHash(r pkix.RDNSequence) string {
|
|||
|
||||
// CheckRevocation checks the connection for revoked certificates based on RFC5280.
|
||||
// This implementation has the following major limitations:
|
||||
// * Indirect CRL files are not supported.
|
||||
// * CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format.
|
||||
// * OnlySomeReasons is not supported.
|
||||
// * Delta CRL files are not supported.
|
||||
// * Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path.
|
||||
// * CRL checks are done after path building, which goes against RFC4158.
|
||||
// - Indirect CRL files are not supported.
|
||||
// - CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format.
|
||||
// - OnlySomeReasons is not supported.
|
||||
// - Delta CRL files are not supported.
|
||||
// - Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path.
|
||||
// - CRL checks are done after path building, which goes against RFC4158.
|
||||
func CheckRevocation(conn tls.ConnectionState, cfg RevocationConfig) error {
|
||||
return CheckChainRevocation(conn.VerifiedChains, cfg)
|
||||
}
|
||||
|
|
@ -359,8 +359,8 @@ type authKeyID struct {
|
|||
// indirectCRL [4] BOOLEAN DEFAULT FALSE,
|
||||
// onlyContainsAttributeCerts [5] BOOLEAN DEFAULT FALSE }
|
||||
|
||||
// -- at most one of onlyContainsUserCerts, onlyContainsCACerts,
|
||||
// -- and onlyContainsAttributeCerts may be set to TRUE.
|
||||
// -- at most one of onlyContainsUserCerts, onlyContainsCACerts,
|
||||
// -- and onlyContainsAttributeCerts may be set to TRUE.
|
||||
type issuingDistributionPoint struct {
|
||||
DistributionPoint asn1.RawValue `asn1:"optional,tag:0"`
|
||||
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
|
||||
|
|
|
|||
|
|
@ -253,13 +253,16 @@ func getDecision(engine *policyEngine, match bool) Decision {
|
|||
return DecisionDeny
|
||||
}
|
||||
|
||||
// Returns the authorization decision of a single policy engine based on activation.
|
||||
// If any policy matches, the decision matches the engine's action, and the first
|
||||
// matching policy name will be returned.
|
||||
// Else if any policy is missing attributes, the decision is unknown, and the list of
|
||||
// policy names that can't be evaluated due to missing attributes will be returned.
|
||||
// Else, the decision is the opposite of the engine's action, i.e. an ALLOW engine
|
||||
// will return DecisionDeny, and vice versa.
|
||||
// Returns the authorization decision of a single policy engine based on
|
||||
// activation. If any policy matches, the decision matches the engine's
|
||||
// action, and the first matching policy name will be returned.
|
||||
//
|
||||
// Else if any policy is missing attributes, the decision is unknown, and the
|
||||
// list of policy names that can't be evaluated due to missing attributes will
|
||||
// be returned.
|
||||
//
|
||||
// Else, the decision is the opposite of the engine's action, i.e. an ALLOW
|
||||
// engine will return DecisionDeny, and vice versa.
|
||||
func (engine *policyEngine) evaluate(activation interpreter.Activation) (Decision, []string) {
|
||||
unknownPolicyNames := []string{}
|
||||
for policyName, program := range engine.programs {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package serviceconfig defines types and methods for operating on gRPC
|
||||
// service configs.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status {
|
|||
|
||||
// FromError returns a Status representation of err.
|
||||
//
|
||||
// - If err was produced by this package or implements the method `GRPCStatus()
|
||||
// *Status`, the appropriate Status is returned.
|
||||
// - If err was produced by this package or implements the method `GRPCStatus()
|
||||
// *Status`, the appropriate Status is returned.
|
||||
//
|
||||
// - If err is nil, a Status is returned with codes.OK and no message.
|
||||
// - If err is nil, a Status is returned with codes.OK and no message.
|
||||
//
|
||||
// - Otherwise, err is an error not compatible with this package. In this
|
||||
// case, a Status is returned with codes.Unknown and err's Error() message,
|
||||
// and ok is false.
|
||||
// - Otherwise, err is an error not compatible with this package. In this
|
||||
// case, a Status is returned with codes.Unknown and err's Error() message,
|
||||
// and ok is false.
|
||||
func FromError(err error) (s *Status, ok bool) {
|
||||
if err == nil {
|
||||
return nil, true
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ type GaugeResponse struct {
|
|||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *GaugeResponse_LongValue
|
||||
// *GaugeResponse_DoubleValue
|
||||
// *GaugeResponse_StringValue
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package tap defines the function handles which are executed on the transport
|
||||
// layer of gRPC-Go and related information.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -51,9 +51,9 @@ func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.S
|
|||
}
|
||||
|
||||
// setupGRPCServer performs the following:
|
||||
// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and
|
||||
// register the test service on it
|
||||
// - create a local TCP listener and start serving on it
|
||||
// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and
|
||||
// register the test service on it
|
||||
// - create a local TCP listener and start serving on it
|
||||
//
|
||||
// Returns the following:
|
||||
// - local listener on which the xDS-enabled gRPC server is serving on
|
||||
|
|
@ -118,12 +118,12 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) {
|
|||
// fallback functionality.
|
||||
//
|
||||
// The following sequence of events happen as part of this test:
|
||||
// - An xDS-enabled gRPC server is created and xDS credentials are configured.
|
||||
// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS
|
||||
// credentials are configured.
|
||||
// - Control plane is configured to not send any security configuration to both
|
||||
// the client and the server. This results in both of them using the
|
||||
// configured fallback credentials (which is insecure creds in this case).
|
||||
// - An xDS-enabled gRPC server is created and xDS credentials are configured.
|
||||
// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS
|
||||
// credentials are configured.
|
||||
// - Control plane is configured to not send any security configuration to both
|
||||
// the client and the server. This results in both of them using the
|
||||
// configured fallback credentials (which is insecure creds in this case).
|
||||
func (s) TestServerSideXDS_Fallback(t *testing.T) {
|
||||
managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil)
|
||||
defer cleanup1()
|
||||
|
|
@ -185,12 +185,12 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) {
|
|||
// credentials with file watcher certificate provider.
|
||||
//
|
||||
// The following sequence of events happen as part of this test:
|
||||
// - An xDS-enabled gRPC server is created and xDS credentials are configured.
|
||||
// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS
|
||||
// credentials are configured.
|
||||
// - Control plane is configured to send security configuration to both the
|
||||
// client and the server, pointing to the file watcher certificate provider.
|
||||
// We verify both TLS and mTLS scenarios.
|
||||
// - An xDS-enabled gRPC server is created and xDS credentials are configured.
|
||||
// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS
|
||||
// credentials are configured.
|
||||
// - Control plane is configured to send security configuration to both the
|
||||
// client and the server, pointing to the file watcher certificate provider.
|
||||
// We verify both TLS and mTLS scenarios.
|
||||
func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
// Package bootstrap provides the functionality to register possible options
|
||||
// for aspects of the xDS client through the bootstrap file.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed
|
||||
// in a later release.
|
||||
|
|
|
|||
|
|
@ -66,41 +66,41 @@ type priorityConfig struct {
|
|||
// If xds lb policy is ROUND_ROBIN, the children will be weighted_target for
|
||||
// locality picking, and round_robin for endpoint picking.
|
||||
//
|
||||
// ┌────────┐
|
||||
// │priority│
|
||||
// └┬──────┬┘
|
||||
// │ │
|
||||
// ┌───────────▼┐ ┌▼───────────┐
|
||||
// │cluster_impl│ │cluster_impl│
|
||||
// └─┬──────────┘ └──────────┬─┘
|
||||
// │ │
|
||||
// ┌──────────────▼─┐ ┌─▼──────────────┐
|
||||
// │locality_picking│ │locality_picking│
|
||||
// └┬──────────────┬┘ └┬──────────────┬┘
|
||||
// │ │ │ │
|
||||
// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐
|
||||
// │LRS│ │LRS│ │LRS│ │LRS│
|
||||
// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘
|
||||
// │ │ │ │
|
||||
// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐
|
||||
// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│
|
||||
// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘
|
||||
// ┌────────┐
|
||||
// │priority│
|
||||
// └┬──────┬┘
|
||||
// │ │
|
||||
// ┌───────────▼┐ ┌▼───────────┐
|
||||
// │cluster_impl│ │cluster_impl│
|
||||
// └─┬──────────┘ └──────────┬─┘
|
||||
// │ │
|
||||
// ┌──────────────▼─┐ ┌─▼──────────────┐
|
||||
// │locality_picking│ │locality_picking│
|
||||
// └┬──────────────┬┘ └┬──────────────┬┘
|
||||
// │ │ │ │
|
||||
// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐
|
||||
// │LRS│ │LRS│ │LRS│ │LRS│
|
||||
// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘
|
||||
// │ │ │ │
|
||||
// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐
|
||||
// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│
|
||||
// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘
|
||||
//
|
||||
// If xds lb policy is RING_HASH, the children will be just a ring_hash policy.
|
||||
// The endpoints from all localities will be flattened to one addresses list,
|
||||
// and the ring_hash policy will pick endpoints from it.
|
||||
//
|
||||
// ┌────────┐
|
||||
// │priority│
|
||||
// └┬──────┬┘
|
||||
// │ │
|
||||
// ┌──────────▼─┐ ┌─▼──────────┐
|
||||
// │cluster_impl│ │cluster_impl│
|
||||
// └──────┬─────┘ └─────┬──────┘
|
||||
// │ │
|
||||
// ┌──────▼─────┐ ┌─────▼──────┐
|
||||
// │ ring_hash │ │ ring_hash │
|
||||
// └────────────┘ └────────────┘
|
||||
// ┌────────┐
|
||||
// │priority│
|
||||
// └┬──────┬┘
|
||||
// │ │
|
||||
// ┌──────────▼─┐ ┌─▼──────────┐
|
||||
// │cluster_impl│ │cluster_impl│
|
||||
// └──────┬─────┘ └─────┬──────┘
|
||||
// │ │
|
||||
// ┌──────▼─────┐ ┌─────▼──────┐
|
||||
// │ ring_hash │ │ ring_hash │
|
||||
// └────────────┘ └────────────┘
|
||||
//
|
||||
// If endpointPickingPolicy is nil, roundrobin will be used.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -168,11 +168,11 @@ func clientEndpointsResource(nodeID, edsServiceName string, localities []localit
|
|||
|
||||
// TestEDS_OneLocality tests the cluster_resolver LB policy using an EDS
|
||||
// resource with one locality. The following scenarios are tested:
|
||||
// 1. Single backend. Test verifies that RPCs reach this backend.
|
||||
// 2. Add a backend. Test verifies that RPCs are roundrobined across the two
|
||||
// backends.
|
||||
// 3. Remove one backend. Test verifies that all RPCs reach the other backend.
|
||||
// 4. Replace the backend. Test verifies that all RPCs reach the new backend.
|
||||
// 1. Single backend. Test verifies that RPCs reach this backend.
|
||||
// 2. Add a backend. Test verifies that RPCs are roundrobined across the two
|
||||
// backends.
|
||||
// 3. Remove one backend. Test verifies that all RPCs reach the other backend.
|
||||
// 4. Replace the backend. Test verifies that all RPCs reach the new backend.
|
||||
func (s) TestEDS_OneLocality(t *testing.T) {
|
||||
// Spin up a management server to receive xDS resources from.
|
||||
managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil)
|
||||
|
|
@ -262,16 +262,16 @@ func (s) TestEDS_OneLocality(t *testing.T) {
|
|||
|
||||
// TestEDS_MultipleLocalities tests the cluster_resolver LB policy using an EDS
|
||||
// resource with multiple localities. The following scenarios are tested:
|
||||
// 1. Two localities, each with a single backend. Test verifies that RPCs are
|
||||
// weighted roundrobined across these two backends.
|
||||
// 2. Add another locality, with a single backend. Test verifies that RPCs are
|
||||
// weighted roundrobined across all the backends.
|
||||
// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined
|
||||
// across backends from the remaining localities.
|
||||
// 4. Add a backend to one locality. Test verifies that RPCs are weighted
|
||||
// roundrobined across localities.
|
||||
// 5. Change the weight of one of the localities. Test verifies that RPCs are
|
||||
// weighted roundrobined across the localities.
|
||||
// 1. Two localities, each with a single backend. Test verifies that RPCs are
|
||||
// weighted roundrobined across these two backends.
|
||||
// 2. Add another locality, with a single backend. Test verifies that RPCs are
|
||||
// weighted roundrobined across all the backends.
|
||||
// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined
|
||||
// across backends from the remaining localities.
|
||||
// 4. Add a backend to one locality. Test verifies that RPCs are weighted
|
||||
// roundrobined across localities.
|
||||
// 5. Change the weight of one of the localities. Test verifies that RPCs are
|
||||
// weighted roundrobined across the localities.
|
||||
//
|
||||
// In our LB policy tree, one of the descendents of the "cluster_resolver" LB
|
||||
// policy is the "weighted_target" LB policy which performs weighted roundrobin
|
||||
|
|
|
|||
|
|
@ -41,30 +41,44 @@ var (
|
|||
// from a priority to another).
|
||||
//
|
||||
// It's guaranteed that after this function returns:
|
||||
// - If some child is READY, it is childInUse, and all lower priorities are
|
||||
// closed.
|
||||
// - If some child is newly started(in Connecting for the first time), it is
|
||||
// childInUse, and all lower priorities are closed.
|
||||
// - Otherwise, the lowest priority is childInUse (none of the children is
|
||||
// ready, and the overall state is not ready).
|
||||
//
|
||||
// If some child is READY, it is childInUse, and all lower priorities are
|
||||
// closed.
|
||||
//
|
||||
// If some child is newly started(in Connecting for the first time), it is
|
||||
// childInUse, and all lower priorities are closed.
|
||||
//
|
||||
// Otherwise, the lowest priority is childInUse (none of the children is
|
||||
// ready, and the overall state is not ready).
|
||||
//
|
||||
// Steps:
|
||||
// - If all priorities were deleted, unset childInUse (to an empty string), and
|
||||
// set parent ClientConn to TransientFailure
|
||||
// - Otherwise, Scan all children from p0, and check balancer stats:
|
||||
// - For any of the following cases:
|
||||
// - If balancer is not started (not built), this is either a new child with
|
||||
// high priority, or a new builder for an existing child.
|
||||
// - If balancer is Connecting and has non-nil initTimer (meaning it
|
||||
// transitioned from Ready or Idle to connecting, not from TF, so we
|
||||
// should give it init-time to connect).
|
||||
// - If balancer is READY or IDLE
|
||||
// - If this is the lowest priority
|
||||
// - do the following:
|
||||
// - if this is not the old childInUse, override picker so old picker is no
|
||||
// longer used.
|
||||
// - switch to it (because all higher priorities are neither new or Ready)
|
||||
// - forward the new addresses and config
|
||||
//
|
||||
// If all priorities were deleted, unset childInUse (to an empty string), and
|
||||
// set parent ClientConn to TransientFailure
|
||||
//
|
||||
// Otherwise, Scan all children from p0, and check balancer stats:
|
||||
//
|
||||
// For any of the following cases:
|
||||
//
|
||||
// If balancer is not started (not built), this is either a new child with
|
||||
// high priority, or a new builder for an existing child.
|
||||
//
|
||||
// If balancer is Connecting and has non-nil initTimer (meaning it
|
||||
// transitioned from Ready or Idle to connecting, not from TF, so we
|
||||
// should give it init-time to connect).
|
||||
//
|
||||
// If balancer is READY or IDLE
|
||||
//
|
||||
// If this is the lowest priority
|
||||
//
|
||||
// do the following:
|
||||
//
|
||||
// if this is not the old childInUse, override picker so old picker is no
|
||||
// longer used.
|
||||
//
|
||||
// switch to it (because all higher priorities are neither new or Ready)
|
||||
//
|
||||
// forward the new addresses and config
|
||||
//
|
||||
// Caller must hold b.mu.
|
||||
func (b *priorityBalancer) syncPriority(childUpdating string) {
|
||||
|
|
|
|||
|
|
@ -324,13 +324,14 @@ func (b *ringhashBalancer) ResolverError(err error) {
|
|||
// UpdateSubConnState updates the per-SubConn state stored in the ring, and also
|
||||
// the aggregated state.
|
||||
//
|
||||
// It triggers an update to cc when:
|
||||
// - the new state is TransientFailure, to update the error message
|
||||
// - it's possible that this is a noop, but sending an extra update is easier
|
||||
// than comparing errors
|
||||
// - the aggregated state is changed
|
||||
// - the same picker will be sent again, but this update may trigger a re-pick
|
||||
// for some RPCs.
|
||||
// It triggers an update to cc when:
|
||||
// - the new state is TransientFailure, to update the error message
|
||||
// - it's possible that this is a noop, but sending an extra update is easier
|
||||
// than comparing errors
|
||||
//
|
||||
// - the aggregated state is changed
|
||||
// - the same picker will be sent again, but this update may trigger a re-pick
|
||||
// for some RPCs.
|
||||
func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
s := state.ConnectivityState
|
||||
if logger.V(2) {
|
||||
|
|
|
|||
|
|
@ -92,11 +92,11 @@ func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServe
|
|||
// - create a local TCP listener and start serving on it
|
||||
//
|
||||
// Returns the following:
|
||||
// - the management server: tests use this to configure resources
|
||||
// - nodeID expected by the management server: this is set in the Node proto
|
||||
// sent by the xdsClient for queries.
|
||||
// - the port the server is listening on
|
||||
// - cleanup function to be invoked by the tests when done
|
||||
// - the management server: tests use this to configure resources
|
||||
// - nodeID expected by the management server: this is set in the Node proto
|
||||
// sent by the xdsClient for queries.
|
||||
// - the port the server is listening on
|
||||
// - cleanup function to be invoked by the tests when done
|
||||
func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) {
|
||||
// Spin up a xDS management server on a local port.
|
||||
nodeID := uuid.New().String()
|
||||
|
|
|
|||
|
|
@ -32,13 +32,13 @@ import (
|
|||
|
||||
// connWrapper is a thin wrapper around a net.Conn returned by Accept(). It
|
||||
// provides the following additional functionality:
|
||||
// 1. A way to retrieve the configured deadline. This is required by the
|
||||
// ServerHandshake() method of the xdsCredentials when it attempts to read
|
||||
// key material from the certificate providers.
|
||||
// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to
|
||||
// retrieve the configured certificate providers.
|
||||
// 3. xDS filter_chain matching logic to select appropriate security
|
||||
// configuration for the incoming connection.
|
||||
// 1. A way to retrieve the configured deadline. This is required by the
|
||||
// ServerHandshake() method of the xdsCredentials when it attempts to read
|
||||
// key material from the certificate providers.
|
||||
// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to
|
||||
// retrieve the configured certificate providers.
|
||||
// 3. xDS filter_chain matching logic to select appropriate security
|
||||
// configuration for the incoming connection.
|
||||
type connWrapper struct {
|
||||
net.Conn
|
||||
|
||||
|
|
|
|||
|
|
@ -123,6 +123,7 @@ func TestPingPong(t *testing.T) {
|
|||
// - verify that
|
||||
// - all RPCs with the same metadata value are sent to the same backend
|
||||
// - only one backend is Ready
|
||||
//
|
||||
// - send more RPCs with different metadata values until a new backend is picked, and verify that
|
||||
// - only two backends are in Ready
|
||||
func TestAffinity(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -163,9 +163,9 @@ func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestAuthorityShare covers that
|
||||
// - watch with the same authority name doesn't create new authority
|
||||
// - watch with different authority name but same authority config doesn't
|
||||
// create new authority
|
||||
// - watch with the same authority name doesn't create new authority
|
||||
// - watch with different authority name but same authority config doesn't
|
||||
// create new authority
|
||||
func (s) TestAuthorityShare(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
ctrlCh := overrideNewController(t)
|
||||
|
|
@ -210,9 +210,9 @@ func (s) TestAuthorityShare(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestAuthorityIdle covers that
|
||||
// - authorities are put in a timeout cache when the last watch is canceled
|
||||
// - idle authorities are not immediately closed. They will be closed after a
|
||||
// timeout.
|
||||
// - authorities are put in a timeout cache when the last watch is canceled
|
||||
// - idle authorities are not immediately closed. They will be closed after a
|
||||
// timeout.
|
||||
func (s) TestAuthorityIdleTimeout(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
ctrlCh := overrideNewController(t)
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i
|
|||
// NewWithConfigForTesting returns an xDS client for the specified bootstrap
|
||||
// config, separate from the global singleton.
|
||||
//
|
||||
// Testing Only
|
||||
// # Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing purposes.
|
||||
func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, error) {
|
||||
|
|
@ -86,8 +86,7 @@ func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, autho
|
|||
// NewWithBootstrapContentsForTesting returns an xDS client for this config,
|
||||
// separate from the global singleton.
|
||||
//
|
||||
//
|
||||
// Testing Only
|
||||
// # Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing purposes.
|
||||
func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) {
|
||||
|
|
|
|||
|
|
@ -79,10 +79,10 @@ func (v2c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.Cli
|
|||
// rType, on the provided stream.
|
||||
//
|
||||
// version is the ack version to be sent with the request
|
||||
// - If this is the new request (not an ack/nack), version will be empty.
|
||||
// - If this is an ack, version will be the version from the response.
|
||||
// - If this is a nack, version will be the previous acked version (from
|
||||
// versionMap). If there was no ack before, it will be empty.
|
||||
// - If this is the new request (not an ack/nack), version will be empty.
|
||||
// - If this is an ack, version will be the version from the response.
|
||||
// - If this is a nack, version will be the previous acked version (from
|
||||
// versionMap). If there was no ack before, it will be empty.
|
||||
func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error {
|
||||
stream, ok := s.(adsStream)
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -81,10 +81,10 @@ func (v3c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.Cli
|
|||
// rType, on the provided stream.
|
||||
//
|
||||
// version is the ack version to be sent with the request
|
||||
// - If this is the new request (not an ack/nack), version will be empty.
|
||||
// - If this is an ack, version will be the version from the response.
|
||||
// - If this is a nack, version will be the previous acked version (from
|
||||
// versionMap). If there was no ack before, it will be empty.
|
||||
// - If this is the new request (not an ack/nack), version will be empty.
|
||||
// - If this is an ack, version will be the version from the response.
|
||||
// - If this is a nack, version will be the previous acked version (from
|
||||
// versionMap). If there was no ack before, it will be empty.
|
||||
func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error {
|
||||
stream, ok := s.(adsStream)
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -144,13 +144,13 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want
|
|||
|
||||
// TestLDSWatch covers the case where a single watcher exists for a single
|
||||
// listener resource. The test verifies the following scenarios:
|
||||
// 1. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of the watch callback.
|
||||
// 2. An update from the management server containing a resource *not* being
|
||||
// watched should not result in the invocation of the watch callback.
|
||||
// 3. After the watch is cancelled, an update from the management server
|
||||
// containing the resource that was being watched should not result in the
|
||||
// invocation of the watch callback.
|
||||
// 1. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of the watch callback.
|
||||
// 2. An update from the management server containing a resource *not* being
|
||||
// watched should not result in the invocation of the watch callback.
|
||||
// 3. After the watch is cancelled, an update from the management server
|
||||
// containing the resource that was being watched should not result in the
|
||||
// invocation of the watch callback.
|
||||
//
|
||||
// The test is run for old and new style names.
|
||||
func (s) TestLDSWatch(t *testing.T) {
|
||||
|
|
@ -263,14 +263,14 @@ func (s) TestLDSWatch(t *testing.T) {
|
|||
// TestLDSWatch_TwoWatchesForSameResourceName covers the case where two watchers
|
||||
// exist for a single listener resource. The test verifies the following
|
||||
// scenarios:
|
||||
// 1. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of both watch callbacks.
|
||||
// 2. After one of the watches is cancelled, a redundant update from the
|
||||
// management server should not result in the invocation of either of the
|
||||
// watch callbacks.
|
||||
// 3. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of the un-cancelled watch
|
||||
// callback.
|
||||
// 1. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of both watch callbacks.
|
||||
// 2. After one of the watches is cancelled, a redundant update from the
|
||||
// management server should not result in the invocation of either of the
|
||||
// watch callbacks.
|
||||
// 3. An update from the management server containing the resource being
|
||||
// watched should result in the invocation of the un-cancelled watch
|
||||
// callback.
|
||||
//
|
||||
// The test is run for old and new style names.
|
||||
func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
||||
|
|
@ -680,12 +680,12 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
|||
// TestLDSWatch_ResourceRemoved covers the cases where a resource being watched
|
||||
// is removed from the management server. The test verifies the following
|
||||
// scenarios:
|
||||
// 1. Removing a resource should trigger the watch callback with a resource
|
||||
// removed error. It should not trigger the watch callback for an unrelated
|
||||
// resource.
|
||||
// 2. An update to another resource should result in the invocation of the watch
|
||||
// callback associated with that resource. It should not result in the
|
||||
// invocation of the watch callback associated with the deleted resource.
|
||||
// 1. Removing a resource should trigger the watch callback with a resource
|
||||
// removed error. It should not trigger the watch callback for an unrelated
|
||||
// resource.
|
||||
// 2. An update to another resource should result in the invocation of the watch
|
||||
// callback associated with that resource. It should not result in the
|
||||
// invocation of the watch callback associated with the deleted resource.
|
||||
//
|
||||
// The test is run with both old and new style names.
|
||||
func (s) TestLDSWatch_ResourceRemoved(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
|
|||
// - an update is received after a watch()
|
||||
// - another update is received, with one resource removed
|
||||
// - this should trigger callback with resource removed error
|
||||
//
|
||||
// - one more update without the removed resource
|
||||
// - the callback (above) shouldn't receive any update
|
||||
func (s) TestClusterResourceRemoved(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -57,19 +57,19 @@ func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource
|
|||
}
|
||||
|
||||
// TestLDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
func (s) TestLDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
|
||||
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName})
|
||||
}
|
||||
|
||||
// TestRDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
|
||||
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
|
||||
VirtualHosts: []*xdsresource.VirtualHost{
|
||||
|
|
@ -82,19 +82,19 @@ func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestClusterFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
func (s) TestClusterFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
|
||||
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName})
|
||||
}
|
||||
|
||||
// TestEndpointsFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
// - Two watches with the same query string, but in different order. The two
|
||||
// watches should watch the same resource.
|
||||
// - The response has the same query string, but in different order. The watch
|
||||
// should still be notified.
|
||||
func (s) TestEndpointsFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
|
||||
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -428,6 +428,7 @@ func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update inte
|
|||
// - an update is received after a watch()
|
||||
// - another update is received, with one resource removed
|
||||
// - this should trigger callback with resource removed error
|
||||
//
|
||||
// - one more update without the removed resource
|
||||
// - the callback (above) shouldn't receive any update
|
||||
func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) {
|
||||
|
|
|
|||
|
|
@ -158,11 +158,11 @@ const (
|
|||
// filter chains in a single Listener resource. It also contains the default
|
||||
// filter chain specified in the Listener resource. It provides two important
|
||||
// pieces of functionality:
|
||||
// 1. Validate the filter chains in an incoming Listener resource to make sure
|
||||
// that there aren't filter chains which contain the same match criteria.
|
||||
// 2. As part of performing the above validation, it builds an internal data
|
||||
// structure which will if used to look up the matching filter chain at
|
||||
// connection time.
|
||||
// 1. Validate the filter chains in an incoming Listener resource to make sure
|
||||
// that there aren't filter chains which contain the same match criteria.
|
||||
// 2. As part of performing the above validation, it builds an internal data
|
||||
// structure which will if used to look up the matching filter chain at
|
||||
// connection time.
|
||||
//
|
||||
// The logic specified in the documentation around the xDS FilterChainMatch
|
||||
// proto mentions 8 criteria to match on.
|
||||
|
|
|
|||
|
|
@ -208,18 +208,21 @@ func match(domain, host string) (domainMatchType, bool) {
|
|||
// FindBestMatchingVirtualHost returns the virtual host whose domains field best
|
||||
// matches host
|
||||
//
|
||||
// The domains field support 4 different matching pattern types:
|
||||
// - Exact match
|
||||
// - Suffix match (e.g. “*ABC”)
|
||||
// - Prefix match (e.g. “ABC*)
|
||||
// - Universal match (e.g. “*”)
|
||||
// The domains field support 4 different matching pattern types:
|
||||
//
|
||||
// The best match is defined as:
|
||||
// - A match is better if it’s matching pattern type is better
|
||||
// - Exact match > suffix match > prefix match > universal match
|
||||
// - If two matches are of the same pattern type, the longer match is better
|
||||
// - This is to compare the length of the matching pattern, e.g. “*ABCDE” >
|
||||
// “*ABC”
|
||||
// - Exact match
|
||||
// - Suffix match (e.g. “*ABC”)
|
||||
// - Prefix match (e.g. “ABC*)
|
||||
// - Universal match (e.g. “*”)
|
||||
//
|
||||
// The best match is defined as:
|
||||
// - A match is better if it’s matching pattern type is better.
|
||||
// * Exact match > suffix match > prefix match > universal match.
|
||||
//
|
||||
// - If two matches are of the same pattern type, the longer match is
|
||||
// better.
|
||||
// * This is to compare the length of the matching pattern, e.g. “*ABCDE” >
|
||||
// “*ABC”
|
||||
func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client
|
||||
var (
|
||||
matchVh *VirtualHost
|
||||
|
|
|
|||
|
|
@ -62,12 +62,12 @@ type ServingModeChangeArgs struct {
|
|||
// to inject a bootstrap configuration used by only this server, instead of the
|
||||
// global configuration from the environment variables.
|
||||
//
|
||||
// Testing Only
|
||||
// # Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
|
|
@ -375,12 +375,12 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel,
|
|||
|
||||
// TestServeSuccess tests the successful case of calling Serve().
|
||||
// The following sequence of events happen:
|
||||
// 1. Create a new GRPCServer and call Serve() in a goroutine.
|
||||
// 2. Make sure an xdsClient is created, and an LDS watch is registered.
|
||||
// 3. Push an error response from the xdsClient, and make sure that Serve() does
|
||||
// not exit.
|
||||
// 4. Push a good response from the xdsClient, and make sure that Serve() on the
|
||||
// underlying grpc.Server is called.
|
||||
// 1. Create a new GRPCServer and call Serve() in a goroutine.
|
||||
// 2. Make sure an xdsClient is created, and an LDS watch is registered.
|
||||
// 3. Push an error response from the xdsClient, and make sure that Serve() does
|
||||
// not exit.
|
||||
// 4. Push a good response from the xdsClient, and make sure that Serve() on the
|
||||
// underlying grpc.Server is called.
|
||||
func (s) TestServeSuccess(t *testing.T) {
|
||||
fs, clientCh, cleanup := setupOverrides()
|
||||
defer cleanup()
|
||||
|
|
|
|||
|
|
@ -81,12 +81,12 @@ func init() {
|
|||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
// # Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
|
|
|||
Loading…
Reference in New Issue