mirror of https://github.com/grpc/grpc-go.git
envconfig: remove env vars for on-by-default features (#6749)
This commit is contained in:
parent
c76d75f4f9
commit
8190d883e0
|
|
@ -28,17 +28,11 @@ import (
|
|||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
// PickFirstLBConfig is set if we should support configuration of the
|
||||
// pick_first LB policy.
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
|
||||
// LeastRequestLB is set if we should support the least_request_experimental
|
||||
// LB policy, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
|
||||
|
|
|
|||
|
|
@ -50,46 +50,7 @@ var (
|
|||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
||||
// DNS cluster is enabled, which can be disabled by setting the environment
|
||||
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
// to "false".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be disabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "false".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
||||
// can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
||||
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ package grpcutil
|
|||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// RegisteredCompressorNames holds names of the registered compressors.
|
||||
|
|
@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool {
|
|||
// RegisteredCompressors returns a string of registered compressor names
|
||||
// separated by comma.
|
||||
func RegisteredCompressors() string {
|
||||
if !envconfig.AdvertiseCompressors {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(RegisteredCompressorNames, ",")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,27 +20,13 @@ package grpcutil
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
func TestRegisteredCompressors(t *testing.T) {
|
||||
defer func(c []string) { RegisteredCompressorNames = c }(RegisteredCompressorNames)
|
||||
defer func(v bool) { envconfig.AdvertiseCompressors = v }(envconfig.AdvertiseCompressors)
|
||||
RegisteredCompressorNames = []string{"gzip", "snappy"}
|
||||
tests := []struct {
|
||||
desc string
|
||||
enabled bool
|
||||
want string
|
||||
}{
|
||||
{desc: "compressor_ad_disabled", enabled: false, want: ""},
|
||||
{desc: "compressor_ad_enabled", enabled: true, want: "gzip,snappy"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
envconfig.AdvertiseCompressors = tt.enabled
|
||||
compressors := RegisteredCompressors()
|
||||
if compressors != tt.want {
|
||||
t.Fatalf("Unexpected compressors got:%s, want:%s", compressors, tt.want)
|
||||
}
|
||||
if got, want := RegisteredCompressors(), "gzip,snappy"; got != want {
|
||||
t.Fatalf("Unexpected compressors got:%s, want:%s", got, want)
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,12 +116,9 @@ func Contents(opts Options) ([]byte, error) {
|
|||
cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "ignore_resource_deletion")
|
||||
}
|
||||
|
||||
auths := make(map[string]authority)
|
||||
if envconfig.XDSFederation {
|
||||
// This will end up using the top-level server list for new style
|
||||
// resources with empty authority.
|
||||
auths[""] = authority{}
|
||||
}
|
||||
// This will end up using the top-level server list for new style
|
||||
// resources with empty authority.
|
||||
auths := map[string]authority{"": {}}
|
||||
for n, auURI := range opts.Authorities {
|
||||
auths[n] = authority{XdsServers: []server{{
|
||||
ServerURI: auURI,
|
||||
|
|
|
|||
14
pickfirst.go
14
pickfirst.go
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
|
|
@ -65,19 +64,6 @@ type pfConfig struct {
|
|||
}
|
||||
|
||||
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
if !envconfig.PickFirstLBConfig {
|
||||
// Prior to supporting loadbalancing configuration, the pick_first LB
|
||||
// policy did not implement the balancer.ConfigParser interface. This
|
||||
// meant that if a non-empty configuration was passed to it, the service
|
||||
// config unmarshaling code would throw a warning log, but would
|
||||
// continue using the pick_first LB policy. The code below ensures the
|
||||
// same behavior is retained if the env var is not set.
|
||||
if string(js) != "{}" {
|
||||
logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var cfg pfConfig
|
||||
if err := json.Unmarshal(js, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
|
|
@ -408,23 +407,6 @@ func (s) TestUnregisteredSetSendCompressorFailure(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func (s) TestUnadvertisedSetSendCompressorFailure(t *testing.T) {
|
||||
// Disable client compressor advertisement.
|
||||
defer func(b bool) { envconfig.AdvertiseCompressors = b }(envconfig.AdvertiseCompressors)
|
||||
envconfig.AdvertiseCompressors = false
|
||||
|
||||
resCompressor := "gzip"
|
||||
wantErr := status.Error(codes.Unknown, "unable to set send compressor: client does not support compressor \"gzip\"")
|
||||
|
||||
t.Run("unary", func(t *testing.T) {
|
||||
testUnarySetSendCompressorFailure(t, resCompressor, wantErr)
|
||||
})
|
||||
|
||||
t.Run("stream", func(t *testing.T) {
|
||||
testStreamSetSendCompressorFailure(t, resCompressor, wantErr)
|
||||
})
|
||||
}
|
||||
|
||||
func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) {
|
||||
ss := &stubserver.StubServer{
|
||||
EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ import (
|
|||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
|
|
@ -376,8 +375,6 @@ func (s) TestPickFirst_StickyTransientFailure(t *testing.T) {
|
|||
|
||||
// Tests the PF LB policy with shuffling enabled.
|
||||
func (s) TestPickFirst_ShuffleAddressList(t *testing.T) {
|
||||
defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig)
|
||||
envconfig.PickFirstLBConfig = true
|
||||
const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}`
|
||||
|
||||
// Install a shuffler that always reverses two entries.
|
||||
|
|
@ -429,45 +426,6 @@ func (s) TestPickFirst_ShuffleAddressList(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Tests the PF LB policy with the environment variable support of address list
|
||||
// shuffling disabled.
|
||||
func (s) TestPickFirst_ShuffleAddressListDisabled(t *testing.T) {
|
||||
defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig)
|
||||
envconfig.PickFirstLBConfig = false
|
||||
const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}`
|
||||
|
||||
// Install a shuffler that always reverses two entries.
|
||||
origShuf := grpcrand.Shuffle
|
||||
defer func() { grpcrand.Shuffle = origShuf }()
|
||||
grpcrand.Shuffle = func(n int, f func(int, int)) {
|
||||
if n != 2 {
|
||||
t.Errorf("Shuffle called with n=%v; want 2", n)
|
||||
return
|
||||
}
|
||||
f(0, 1) // reverse the two addresses
|
||||
}
|
||||
|
||||
// Set up our backends.
|
||||
cc, r, backends := setupPickFirst(t, 2)
|
||||
addrs := stubBackendsToResolverAddrs(backends)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Send a config with shuffling enabled. This will reverse the addresses,
|
||||
// so we should connect to backend 1 if shuffling is supported. However
|
||||
// with it disabled at the start of the test, we will connect to backend 0
|
||||
// instead.
|
||||
shufState := resolver.State{
|
||||
ServiceConfig: parseServiceConfig(t, r, serviceConfig),
|
||||
Addresses: []resolver.Address{addrs[0], addrs[1]},
|
||||
}
|
||||
r.UpdateState(shufState)
|
||||
if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test config parsing with the env var turned on and off for various scenarios.
|
||||
func (s) TestPickFirst_ParseConfig_Success(t *testing.T) {
|
||||
// Install a shuffler that always reverses two entries.
|
||||
|
|
@ -483,37 +441,16 @@ func (s) TestPickFirst_ParseConfig_Success(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
envVar bool
|
||||
serviceConfig string
|
||||
wantFirstAddr bool
|
||||
}{
|
||||
{
|
||||
name: "env var disabled with empty pickfirst config",
|
||||
envVar: false,
|
||||
name: "empty pickfirst config",
|
||||
serviceConfig: `{"loadBalancingConfig": [{"pick_first":{}}]}`,
|
||||
wantFirstAddr: true,
|
||||
},
|
||||
{
|
||||
name: "env var disabled with non-empty good pickfirst config",
|
||||
envVar: false,
|
||||
serviceConfig: `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}`,
|
||||
wantFirstAddr: true,
|
||||
},
|
||||
{
|
||||
name: "env var disabled with non-empty bad pickfirst config",
|
||||
envVar: false,
|
||||
serviceConfig: `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": 666 }}]}`,
|
||||
wantFirstAddr: true,
|
||||
},
|
||||
{
|
||||
name: "env var enabled with empty pickfirst config",
|
||||
envVar: true,
|
||||
serviceConfig: `{"loadBalancingConfig": [{"pick_first":{}}]}`,
|
||||
wantFirstAddr: true,
|
||||
},
|
||||
{
|
||||
name: "env var enabled with empty good pickfirst config",
|
||||
envVar: true,
|
||||
name: "empty good pickfirst config",
|
||||
serviceConfig: `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}`,
|
||||
wantFirstAddr: false,
|
||||
},
|
||||
|
|
@ -521,11 +458,6 @@ func (s) TestPickFirst_ParseConfig_Success(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// Set the env var as specified by the test table.
|
||||
origPickFirstLBConfig := envconfig.PickFirstLBConfig
|
||||
envconfig.PickFirstLBConfig = test.envVar
|
||||
defer func() { envconfig.PickFirstLBConfig = origPickFirstLBConfig }()
|
||||
|
||||
// Set up our backends.
|
||||
cc, r, backends := setupPickFirst(t, 2)
|
||||
addrs := stubBackendsToResolverAddrs(backends)
|
||||
|
|
@ -555,10 +487,6 @@ func (s) TestPickFirst_ParseConfig_Success(t *testing.T) {
|
|||
|
||||
// Test config parsing for a bad service config.
|
||||
func (s) TestPickFirst_ParseConfig_Failure(t *testing.T) {
|
||||
origPickFirstLBConfig := envconfig.PickFirstLBConfig
|
||||
envconfig.PickFirstLBConfig = true
|
||||
defer func() { envconfig.PickFirstLBConfig = origPickFirstLBConfig }()
|
||||
|
||||
// Service config should fail with the below config. Name resolvers are
|
||||
// expected to perform this parsing before they push the parsed service
|
||||
// config to the channel.
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/e2e"
|
||||
|
|
@ -84,12 +83,6 @@ func ringhashCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster {
|
|||
// propagated to pick the ring_hash policy. It doesn't test the affinity
|
||||
// behavior in ring_hash policy.
|
||||
func (s) TestClientSideAffinitySanityCheck(t *testing.T) {
|
||||
defer func() func() {
|
||||
old := envconfig.XDSRingHash
|
||||
envconfig.XDSRingHash = true
|
||||
return func() { envconfig.XDSRingHash = old }
|
||||
}()()
|
||||
|
||||
managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup1()
|
||||
|
||||
|
|
|
|||
|
|
@ -93,11 +93,6 @@ func clusterWithLBConfiguration(t *testing.T, clusterName, edsServiceName string
|
|||
// first) child load balancing policy, and asserts the correct distribution
|
||||
// based on the locality weights and the endpoint picking policy specified.
|
||||
func (s) TestWrrLocality(t *testing.T) {
|
||||
oldCustomLBSupport := envconfig.XDSCustomLBPolicy
|
||||
envconfig.XDSCustomLBPolicy = true
|
||||
defer func() {
|
||||
envconfig.XDSCustomLBPolicy = oldCustomLBSupport
|
||||
}()
|
||||
oldLeastRequestLBSupport := envconfig.LeastRequestLB
|
||||
envconfig.LeastRequestLB = true
|
||||
defer func() {
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/bootstrap"
|
||||
|
|
@ -54,10 +53,6 @@ import (
|
|||
// - CDS: old style, no authority (default authority)
|
||||
// - EDS: new style, in a different authority
|
||||
func (s) TestClientSideFederation(t *testing.T) {
|
||||
oldXDSFederation := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
defer func() { envconfig.XDSFederation = oldXDSFederation }()
|
||||
|
||||
// Start a management server as the default authority.
|
||||
serverDefaultAuth, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
|
|
@ -150,10 +145,6 @@ func (s) TestClientSideFederation(t *testing.T) {
|
|||
// in the bootstrap configuration. The test verifies that RPCs on the ClientConn
|
||||
// fail with an appropriate error.
|
||||
func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) {
|
||||
oldXDSFederation := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
defer func() { envconfig.XDSFederation = oldXDSFederation }()
|
||||
|
||||
// Setting up the management server is not *really* required for this test
|
||||
// case. All we need is a bootstrap configuration which does not contain the
|
||||
// authority mentioned in the dial target. But setting up the management
|
||||
|
|
@ -209,10 +200,6 @@ func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) {
|
|||
// with an authority which is not specified in the bootstrap configuration. The
|
||||
// test verifies that RPCs fail with an appropriate error.
|
||||
func (s) TestFederation_UnknownAuthorityInReceivedResponse(t *testing.T) {
|
||||
oldXDSFederation := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
defer func() { envconfig.XDSFederation = oldXDSFederation }()
|
||||
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/stubserver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/rls"
|
||||
|
|
@ -100,13 +99,8 @@ func (s) TestRLSinxDS(t *testing.T) {
|
|||
}
|
||||
|
||||
func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) {
|
||||
oldRLS := envconfig.XDSRLS
|
||||
envconfig.XDSRLS = true
|
||||
internal.RegisterRLSClusterSpecifierPluginForTesting()
|
||||
defer func() {
|
||||
envconfig.XDSRLS = oldRLS
|
||||
internal.UnregisterRLSClusterSpecifierPluginForTesting()
|
||||
}()
|
||||
defer internal.UnregisterRLSClusterSpecifierPluginForTesting()
|
||||
|
||||
// Set up all components and configuration necessary - management server,
|
||||
// xDS resolver, fake RLS Server, and xDS configuration which specifies an
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/e2e"
|
||||
"google.golang.org/grpc/status"
|
||||
|
|
@ -60,11 +59,6 @@ import (
|
|||
// (NonForwardingAction), and the RPC's matching those routes should proceed as
|
||||
// normal.
|
||||
func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup1()
|
||||
|
||||
|
|
@ -408,11 +402,6 @@ func serverListenerWithRBACHTTPFilters(t *testing.T, host string, port uint32, r
|
|||
// as normal and certain RPC's are denied by the RBAC HTTP Filter which gets
|
||||
// called by hooked xds interceptors.
|
||||
func (s) TestRBACHTTPFilter(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
internal.RegisterRBACHTTPFilterForTesting()
|
||||
defer internal.UnregisterRBACHTTPFilterForTesting()
|
||||
tests := []struct {
|
||||
|
|
@ -682,18 +671,6 @@ func (s) TestRBACHTTPFilter(t *testing.T) {
|
|||
t.Fatalf("UnaryCall() returned err with status: %v, wantStatusUnaryCall: %v", err, test.wantStatusUnaryCall)
|
||||
}
|
||||
|
||||
// Toggle the RBAC Env variable off, this should disable RBAC and allow any RPC"s through (will not go through
|
||||
// routing or processed by HTTP Filters and thus will never get denied by RBAC).
|
||||
envconfig.XDSRBAC = false
|
||||
if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
|
||||
t.Fatalf("EmptyCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err))
|
||||
}
|
||||
if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK {
|
||||
t.Fatalf("UnaryCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err))
|
||||
}
|
||||
// Toggle RBAC back on for next iterations.
|
||||
envconfig.XDSRBAC = true
|
||||
|
||||
if test.wantAuthzOutcomes != nil {
|
||||
if diff := cmp.Diff(lb.authzDecisionStat, test.wantAuthzOutcomes); diff != "" {
|
||||
t.Fatalf("authorization decision do not match\ndiff (-got +want):\n%s", diff)
|
||||
|
|
@ -827,13 +804,6 @@ func serverListenerWithBadRouteConfiguration(t *testing.T, host string, port uin
|
|||
}
|
||||
|
||||
func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) {
|
||||
// Turn RBAC support on.
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
|
||||
managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup1()
|
||||
|
||||
|
|
@ -883,63 +853,6 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) {
|
||||
// Turn RBAC support off.
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = false
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
|
||||
managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup1()
|
||||
|
||||
lis, cleanup2 := setupGRPCServer(t, bootstrapContents)
|
||||
defer cleanup2()
|
||||
|
||||
host, port, err := hostPortFromListener(lis)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve host and port of server: %v", err)
|
||||
}
|
||||
const serviceName = "my-service-fallback"
|
||||
|
||||
// The inbound listener needs a route table that will never match on a VH,
|
||||
// and thus shouldn't allow incoming RPC's to proceed.
|
||||
resources := e2e.DefaultClientResources(e2e.ResourceParams{
|
||||
DialTarget: serviceName,
|
||||
NodeID: nodeID,
|
||||
Host: host,
|
||||
Port: port,
|
||||
SecLevel: e2e.SecurityLevelNone,
|
||||
})
|
||||
// This bad route configuration shouldn't affect incoming RPC's from
|
||||
// proceeding as normal, as the configuration shouldn't be parsed due to the
|
||||
// RBAC Environment variable not being set to true.
|
||||
inboundLis := serverListenerWithBadRouteConfiguration(t, host, port)
|
||||
resources.Listeners = append(resources.Listeners, inboundLis)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
// Setup the management server with client and server-side resources.
|
||||
if err := managementServer.Update(ctx, resources); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial local test server: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
|
||||
client := testgrpc.NewTestServiceClient(cc)
|
||||
if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
|
||||
t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err))
|
||||
}
|
||||
if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK {
|
||||
t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err))
|
||||
}
|
||||
}
|
||||
|
||||
type statAuditLogger struct {
|
||||
authzDecisionStat map[bool]int // Map to hold counts of authorization decisions
|
||||
lastEvent *audit.Event // Field to store last received event
|
||||
|
|
|
|||
|
|
@ -133,16 +133,12 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts
|
|||
return nil, fmt.Errorf("failed to start xDS client: %v", err)
|
||||
}
|
||||
|
||||
// Create and return an xDS resolver.
|
||||
t.URL.Scheme = xdsName
|
||||
if envconfig.XDSFederation {
|
||||
t = resolver.Target{
|
||||
URL: url.URL{
|
||||
Scheme: xdsName,
|
||||
Host: c2pAuthority,
|
||||
Path: t.URL.Path,
|
||||
},
|
||||
}
|
||||
t = resolver.Target{
|
||||
URL: url.URL{
|
||||
Scheme: xdsName,
|
||||
Host: c2pAuthority,
|
||||
Path: t.URL.Path,
|
||||
},
|
||||
}
|
||||
xdsR, err := resolver.Get(xdsName).Build(t, cc, opts)
|
||||
if err != nil {
|
||||
|
|
@ -197,11 +193,7 @@ func newNode(zone string, ipv6Capable bool) *v3corepb.Node {
|
|||
|
||||
// runDirectPath returns whether this resolver should use direct path.
|
||||
//
|
||||
// direct path is enabled if this client is running on GCE, and the normal xDS
|
||||
// is not used (bootstrap env vars are not set) or federation is enabled.
|
||||
// direct path is enabled if this client is running on GCE.
|
||||
func runDirectPath() bool {
|
||||
if !onGCE() {
|
||||
return false
|
||||
}
|
||||
return envconfig.XDSFederation || envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == ""
|
||||
return onGCE()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import (
|
|||
"google.golang.org/grpc/credentials/tls/certprovider"
|
||||
"google.golang.org/grpc/internal/balancer/nop"
|
||||
xdsinternal "google.golang.org/grpc/internal/credentials/xds"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
|
|
@ -636,20 +635,18 @@ func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []cluste
|
|||
DNSHostname: cluster.DNSHostName,
|
||||
}
|
||||
}
|
||||
if envconfig.XDSOutlierDetection {
|
||||
odJSON := cluster.OutlierDetection
|
||||
// "In the cds LB policy, if the outlier_detection field is not set in
|
||||
// the Cluster resource, a "no-op" outlier_detection config will be
|
||||
// generated in the corresponding DiscoveryMechanism config, with all
|
||||
// fields unset." - A50
|
||||
if odJSON == nil {
|
||||
// This will pick up top level defaults in Cluster Resolver
|
||||
// ParseConfig, but sre and fpe will be nil still so still a
|
||||
// "no-op" config.
|
||||
odJSON = json.RawMessage(`{}`)
|
||||
}
|
||||
dm.OutlierDetection = odJSON
|
||||
odJSON := cluster.OutlierDetection
|
||||
// "In the cds LB policy, if the outlier_detection field is not set in
|
||||
// the Cluster resource, a "no-op" outlier_detection config will be
|
||||
// generated in the corresponding DiscoveryMechanism config, with all
|
||||
// fields unset." - A50
|
||||
if odJSON == nil {
|
||||
// This will pick up top level defaults in Cluster Resolver
|
||||
// ParseConfig, but sre and fpe will be nil still so still a
|
||||
// "no-op" config.
|
||||
odJSON = json.RawMessage(`{}`)
|
||||
}
|
||||
dm.OutlierDetection = odJSON
|
||||
|
||||
return append(dms, dm), true, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ import (
|
|||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/balancer/nop"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
|
|
@ -118,20 +117,18 @@ func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, err
|
|||
return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err)
|
||||
}
|
||||
|
||||
if envconfig.XDSOutlierDetection {
|
||||
for i, dm := range cfg.DiscoveryMechanisms {
|
||||
lbCfg, err := odParser.ParseConfig(dm.OutlierDetection)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err)
|
||||
}
|
||||
odCfg, ok := lbCfg.(*outlierdetection.LBConfig)
|
||||
if !ok {
|
||||
// Shouldn't happen, Parser built at build time with Outlier Detection
|
||||
// builder pulled from gRPC LB Registry.
|
||||
return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg)
|
||||
}
|
||||
cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg
|
||||
for i, dm := range cfg.DiscoveryMechanisms {
|
||||
lbCfg, err := odParser.ParseConfig(dm.OutlierDetection)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err)
|
||||
}
|
||||
odCfg, ok := lbCfg.(*outlierdetection.LBConfig)
|
||||
if !ok {
|
||||
// Shouldn't happen, Parser built at build time with Outlier Detection
|
||||
// builder pulled from gRPC LB Registry.
|
||||
return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg)
|
||||
}
|
||||
cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg
|
||||
}
|
||||
if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil {
|
||||
// This will never occur, valid configuration is emitted from the xDS
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ import (
|
|||
"sort"
|
||||
|
||||
"google.golang.org/grpc/balancer/weightedroundrobin"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/hierarchy"
|
||||
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
|
@ -98,47 +97,27 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi
|
|||
}
|
||||
retConfig.Priorities = append(retConfig.Priorities, names...)
|
||||
retAddrs = append(retAddrs, addrs...)
|
||||
var odCfgs map[string]*outlierdetection.LBConfig
|
||||
if envconfig.XDSOutlierDetection {
|
||||
odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection)
|
||||
for n, c := range odCfgs {
|
||||
retConfig.Children[n] = &priority.Child{
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c},
|
||||
// Ignore all re-resolution from EDS children.
|
||||
IgnoreReresolutionRequests: true,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
for n, c := range configs {
|
||||
odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection)
|
||||
for n, c := range odCfgs {
|
||||
retConfig.Children[n] = &priority.Child{
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c},
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c},
|
||||
// Ignore all re-resolution from EDS children.
|
||||
IgnoreReresolutionRequests: true,
|
||||
}
|
||||
|
||||
}
|
||||
continue
|
||||
case DiscoveryMechanismTypeLogicalDNS:
|
||||
name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism)
|
||||
retConfig.Priorities = append(retConfig.Priorities, name)
|
||||
retAddrs = append(retAddrs, addrs...)
|
||||
var odCfg *outlierdetection.LBConfig
|
||||
if envconfig.XDSOutlierDetection {
|
||||
odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection)
|
||||
retConfig.Children[name] = &priority.Child{
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg},
|
||||
// Not ignore re-resolution from DNS children, they will trigger
|
||||
// DNS to re-resolve.
|
||||
IgnoreReresolutionRequests: false,
|
||||
}
|
||||
continue
|
||||
}
|
||||
odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection)
|
||||
retConfig.Children[name] = &priority.Child{
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config},
|
||||
Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg},
|
||||
// Not ignore re-resolution from DNS children, they will trigger
|
||||
// DNS to re-resolve.
|
||||
IgnoreReresolutionRequests: false,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return retConfig, retAddrs, nil
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@ import (
|
|||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
|
|
@ -55,9 +54,7 @@ var (
|
|||
const Name = "outlier_detection_experimental"
|
||||
|
||||
func init() {
|
||||
if envconfig.XDSOutlierDetection {
|
||||
balancer.Register(bb{})
|
||||
}
|
||||
balancer.Register(bb{})
|
||||
}
|
||||
|
||||
type bb struct{}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1"
|
||||
"google.golang.org/grpc/xds/internal/clusterspecifier"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
|
|
@ -35,9 +34,7 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
if envconfig.XDSRLS {
|
||||
clusterspecifier.Register(rls{})
|
||||
}
|
||||
clusterspecifier.Register(rls{})
|
||||
|
||||
// TODO: Remove these once the RLS env var is removed.
|
||||
internal.RegisterRLSClusterSpecifierPluginForTesting = func() {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/xds/rbac"
|
||||
"google.golang.org/grpc/xds/internal/httpfilter"
|
||||
|
|
@ -39,9 +38,7 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
if envconfig.XDSRBAC {
|
||||
httpfilter.Register(builder{})
|
||||
}
|
||||
httpfilter.Register(builder{})
|
||||
|
||||
// TODO: Remove these once the RBAC env var is removed.
|
||||
internal.RegisterRBACHTTPFilterForTesting = func() {
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/e2e"
|
||||
|
|
@ -113,14 +112,6 @@ func (testClusterSpecifierPlugin) ParseClusterSpecifierConfig(cfg proto.Message)
|
|||
// The test also verifies that a change in the cluster specifier plugin config
|
||||
// result in appropriate change in the service config pushed by the resolver.
|
||||
func (s) TestResolverClusterSpecifierPlugin(t *testing.T) {
|
||||
// Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client
|
||||
// allows routes with cluster specifier plugin as their route action.
|
||||
oldRLS := envconfig.XDSRLS
|
||||
envconfig.XDSRLS = true
|
||||
defer func() {
|
||||
envconfig.XDSRLS = oldRLS
|
||||
}()
|
||||
|
||||
// Spin up an xDS management server for the test.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
|
@ -211,14 +202,6 @@ func (s) TestResolverClusterSpecifierPlugin(t *testing.T) {
|
|||
// their corresponding configurations remain in service config if RPCs are in
|
||||
// flight.
|
||||
func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) {
|
||||
// Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client
|
||||
// allows routes with cluster specifier plugin as their route action.
|
||||
oldRLS := envconfig.XDSRLS
|
||||
envconfig.XDSRLS = true
|
||||
defer func() {
|
||||
envconfig.XDSRLS = oldRLS
|
||||
}()
|
||||
|
||||
// Spin up an xDS management server for the test.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import (
|
|||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
|
|
@ -183,10 +182,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
|
|||
}
|
||||
|
||||
lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name)
|
||||
// Request Hashes are only applicable for a Ring Hash LB.
|
||||
if envconfig.XDSRingHash {
|
||||
lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies))
|
||||
}
|
||||
lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies))
|
||||
|
||||
config := &iresolver.RPCConfig{
|
||||
// Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy.
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import (
|
|||
"google.golang.org/grpc/credentials/insecure"
|
||||
xdscreds "google.golang.org/grpc/credentials/xds"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
|
|
@ -159,11 +158,6 @@ func (s) TestResolverBuilder_DifferentBootstrapConfigs(t *testing.T) {
|
|||
// Test builds an xDS resolver and verifies that the resource name specified in
|
||||
// the discovery request matches expectations.
|
||||
func (s) TestResolverResourceName(t *testing.T) {
|
||||
// Federation support is required when new style names are used.
|
||||
oldXDSFederation := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
defer func() { envconfig.XDSFederation = oldXDSFederation }()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
listenerResourceNameTemplate string
|
||||
|
|
@ -482,10 +476,6 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) {
|
|||
// specifying to generate a hash. The configSelector generated should
|
||||
// successfully generate a Hash.
|
||||
func (s) TestResolverRequestHash(t *testing.T) {
|
||||
oldRH := envconfig.XDSRingHash
|
||||
envconfig.XDSRingHash = true
|
||||
defer func() { envconfig.XDSRingHash = oldRH }()
|
||||
|
||||
// Spin up an xDS management server for the test.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ import (
|
|||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||
|
|
@ -259,9 +258,6 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
|
|||
conn.Close()
|
||||
continue
|
||||
}
|
||||
if !envconfig.XDSRBAC {
|
||||
return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil
|
||||
}
|
||||
var rc xdsresource.RouteConfigUpdate
|
||||
if fc.InlineRouteConfig != nil {
|
||||
rc = *fc.InlineRouteConfig
|
||||
|
|
@ -369,10 +365,8 @@ func (l *listenerWrapper) handleLDSUpdate(update xdsresource.ListenerUpdate) {
|
|||
// Server's state to ServingModeNotServing. That prevents new connections
|
||||
// from being accepted, whereas here we simply want the clients to reconnect
|
||||
// to get the updated configuration.
|
||||
if envconfig.XDSRBAC {
|
||||
if l.drainCallback != nil {
|
||||
l.drainCallback(l.Listener.Addr())
|
||||
}
|
||||
if l.drainCallback != nil {
|
||||
l.drainCallback(l.Listener.Addr())
|
||||
}
|
||||
l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames)
|
||||
// If there are no dynamic RDS Configurations still needed to be received
|
||||
|
|
|
|||
|
|
@ -499,18 +499,10 @@ func newConfigFromContents(data []byte) (*Config, error) {
|
|||
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||
}
|
||||
case "client_default_listener_resource_name_template":
|
||||
if !envconfig.XDSFederation {
|
||||
logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k)
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil {
|
||||
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||
}
|
||||
case "authorities":
|
||||
if !envconfig.XDSFederation {
|
||||
logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k)
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(v, &config.Authorities); err != nil {
|
||||
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -975,10 +975,6 @@ func TestNewConfigWithFederation(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
oldFederationSupport := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
defer func() { envconfig.XDSFederation = oldFederationSupport }()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testNewConfigWithFileNameEnv(t, test.name, test.wantErr, test.wantConfig)
|
||||
|
|
|
|||
|
|
@ -65,8 +65,6 @@ var (
|
|||
// Returns two listeners used by the default and non-default management servers
|
||||
// respectively, and the xDS client and its close function.
|
||||
func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time.Duration) (*testutils.ListenerWrapper, *testutils.ListenerWrapper, xdsclient.XDSClient, func()) {
|
||||
overrideFedEnvVar(t)
|
||||
|
||||
// Create listener wrappers which notify on to a channel whenever a new
|
||||
// connection is accepted. We use this to track the number of transports
|
||||
// used by the xDS client.
|
||||
|
|
|
|||
|
|
@ -165,7 +165,6 @@ func (s) TestCDSWatch(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -293,7 +292,6 @@ func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -374,7 +372,6 @@ func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
// the management server containing both resources results in the invocation of
|
||||
// all watch callbacks.
|
||||
func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -445,7 +442,6 @@ func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
|||
// watch callback is invoked with the contents from the cache, instead of a
|
||||
// request being sent to the management server.
|
||||
func (s) TestCDSWatch_ResourceCaching(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
firstRequestReceived := false
|
||||
firstAckReceived := grpcsync.NewEvent()
|
||||
secondRequestReceived := grpcsync.NewEvent()
|
||||
|
|
@ -534,7 +530,6 @@ func (s) TestCDSWatch_ResourceCaching(t *testing.T) {
|
|||
// verifies that the watch callback is invoked with an error once the
|
||||
// watchExpiryTimer fires.
|
||||
func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -573,7 +568,6 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
|||
// verifies that the behavior associated with the expiry timer (i.e, callback
|
||||
// invocation with error) does not take place.
|
||||
func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -640,7 +634,6 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
|||
// callback associated with that resource. It should not result in the
|
||||
// invocation of the watch callback associated with the deleted resource.
|
||||
func (s) TesCDSWatch_ResourceRemoved(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -745,7 +738,6 @@ func (s) TesCDSWatch_ResourceRemoved(t *testing.T) {
|
|||
// server is NACK'ed by the xdsclient. The test verifies that the error is
|
||||
// propagated to the watcher.
|
||||
func (s) TestCDSWatch_NACKError(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -792,7 +784,6 @@ func (s) TestCDSWatch_NACKError(t *testing.T) {
|
|||
// to the valid resource receive the update, while watchers corresponding to the
|
||||
// invalid resource receive an error.
|
||||
func (s) TestCDSWatch_PartialValid(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -862,7 +853,6 @@ func (s) TestCDSWatch_PartialValid(t *testing.T) {
|
|||
// expected to wait for the watch timeout to expire before concluding that the
|
||||
// resource does not exist on the server
|
||||
func (s) TestCDSWatch_PartialResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
|||
|
|
@ -202,7 +202,6 @@ func (s) TestEDSWatch(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -370,7 +369,6 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -452,7 +450,6 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
//
|
||||
// The test is run with both old and new style names.
|
||||
func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -528,7 +525,6 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
|||
// watch callback is invoked with the contents from the cache, instead of a
|
||||
// request being sent to the management server.
|
||||
func (s) TestEDSWatch_ResourceCaching(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
firstRequestReceived := false
|
||||
firstAckReceived := grpcsync.NewEvent()
|
||||
secondRequestReceived := grpcsync.NewEvent()
|
||||
|
|
@ -628,7 +624,6 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) {
|
|||
// verifies that the watch callback is invoked with an error once the
|
||||
// watchExpiryTimer fires.
|
||||
func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -667,7 +662,6 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
|||
// verifies that the behavior associated with the expiry timer (i.e, callback
|
||||
// invocation with error) does not take place.
|
||||
func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -737,7 +731,6 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
|||
// server is NACK'ed by the xdsclient. The test verifies that the error is
|
||||
// propagated to the watcher.
|
||||
func (s) TestEDSWatch_NACKError(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -784,7 +777,6 @@ func (s) TestEDSWatch_NACKError(t *testing.T) {
|
|||
// to the valid resource receive the update, while watchers corresponding to the
|
||||
// invalid resource receive an error.
|
||||
func (s) TestEDSWatch_PartialValid(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
|||
|
|
@ -44,8 +44,6 @@ const testNonDefaultAuthority = "non-default-authority"
|
|||
// Returns the management server associated with the non-default authority, the
|
||||
// nodeID to use, and the xDS client.
|
||||
func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string, xdsclient.XDSClient) {
|
||||
overrideFedEnvVar(t)
|
||||
|
||||
// Start a management server as the default authority.
|
||||
serverDefaultAuthority, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpctest"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
|
|
@ -49,12 +48,6 @@ import (
|
|||
_ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter.
|
||||
)
|
||||
|
||||
func overrideFedEnvVar(t *testing.T) {
|
||||
oldFed := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = true
|
||||
t.Cleanup(func() { envconfig.XDSFederation = oldFed })
|
||||
}
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
|
|
@ -194,7 +187,6 @@ func (s) TestLDSWatch(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -324,7 +316,6 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -410,7 +401,6 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
//
|
||||
// The test is run with both old and new style names.
|
||||
func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -482,7 +472,6 @@ func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
|||
// watch callback is invoked with the contents from the cache, instead of a
|
||||
// request being sent to the management server.
|
||||
func (s) TestLDSWatch_ResourceCaching(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
firstRequestReceived := false
|
||||
firstAckReceived := grpcsync.NewEvent()
|
||||
secondRequestReceived := grpcsync.NewEvent()
|
||||
|
|
@ -575,7 +564,6 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) {
|
|||
// verifies that the watch callback is invoked with an error once the
|
||||
// watchExpiryTimer fires.
|
||||
func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -616,7 +604,6 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
|||
// verifies that the behavior associated with the expiry timer (i.e, callback
|
||||
// invocation with error) does not take place.
|
||||
func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -686,7 +673,6 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
|||
//
|
||||
// The test is run with both old and new style names.
|
||||
func (s) TestLDSWatch_ResourceRemoved(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -794,7 +780,6 @@ func (s) TestLDSWatch_ResourceRemoved(t *testing.T) {
|
|||
// server is NACK'ed by the xdsclient. The test verifies that the error is
|
||||
// propagated to the watcher.
|
||||
func (s) TestLDSWatch_NACKError(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -843,7 +828,6 @@ func (s) TestLDSWatch_NACKError(t *testing.T) {
|
|||
// to the valid resource receive the update, while watchers corresponding to the
|
||||
// invalid resource receive an error.
|
||||
func (s) TestLDSWatch_PartialValid(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -918,7 +902,6 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) {
|
|||
// expected to wait for the watch timeout to expire before concluding that the
|
||||
// resource does not exist on the server
|
||||
func (s) TestLDSWatch_PartialResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
|||
|
|
@ -51,8 +51,6 @@ var (
|
|||
// a resource, and more watches are registered from the first watch's callback.
|
||||
// The test verifies that this scenario does not lead to a deadlock.
|
||||
func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
|
||||
// Start an xDS management server and set the option to allow it to respond
|
||||
// to requests which only specify a subset of the configured resources.
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true})
|
||||
|
|
@ -152,8 +150,6 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
|||
//
|
||||
// It also verifies the same behavior holds after a stream restart.
|
||||
func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
|
||||
// Create a restartable listener which can close existing connections.
|
||||
l, err := testutils.LocalTCPListener()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -175,7 +175,6 @@ func (s) TestRDSWatch(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -345,7 +344,6 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -431,7 +429,6 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
|
|||
//
|
||||
// The test is run with both old and new style names.
|
||||
func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -513,7 +510,6 @@ func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
|
|||
// watch callback is invoked with the contents from the cache, instead of a
|
||||
// request being sent to the management server.
|
||||
func (s) TestRDSWatch_ResourceCaching(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
firstRequestReceived := false
|
||||
firstAckReceived := grpcsync.NewEvent()
|
||||
secondRequestReceived := grpcsync.NewEvent()
|
||||
|
|
@ -616,7 +612,6 @@ func (s) TestRDSWatch_ResourceCaching(t *testing.T) {
|
|||
// verifies that the watch callback is invoked with an error once the
|
||||
// watchExpiryTimer fires.
|
||||
func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -658,7 +653,6 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
|
|||
// verifies that the behavior associated with the expiry timer (i.e, callback
|
||||
// invocation with error) does not take place.
|
||||
func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to spin up the xDS management server: %v", err)
|
||||
|
|
@ -730,7 +724,6 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
|
|||
// server is NACK'ed by the xdsclient. The test verifies that the error is
|
||||
// propagated to the watcher.
|
||||
func (s) TestRDSWatch_NACKError(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -779,7 +772,6 @@ func (s) TestRDSWatch_NACKError(t *testing.T) {
|
|||
// to the valid resource receive the update, while watchers corresponding to the
|
||||
// invalid resource receive an error.
|
||||
func (s) TestRDSWatch_PartialValid(t *testing.T) {
|
||||
overrideFedEnvVar(t)
|
||||
mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
|||
|
|
@ -67,9 +67,6 @@ const (
|
|||
)
|
||||
|
||||
func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) {
|
||||
if !envconfig.XDSRingHash {
|
||||
return nil, nil
|
||||
}
|
||||
rhProto := &v3ringhashpb.RingHash{}
|
||||
if err := proto.Unmarshal(rawProto, rhProto); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal resource: %v", err)
|
||||
|
|
@ -103,9 +100,6 @@ type pfConfig struct {
|
|||
}
|
||||
|
||||
func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) {
|
||||
if !envconfig.PickFirstLBConfig {
|
||||
return nil, nil
|
||||
}
|
||||
pfProto := &v3pickfirstpb.PickFirst{}
|
||||
if err := proto.Unmarshal(rawProto, pfProto); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal resource: %v", err)
|
||||
|
|
|
|||
|
|
@ -79,8 +79,6 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) {
|
|||
name string
|
||||
policy *v3clusterpb.LoadBalancingPolicy
|
||||
wantConfig string // JSON config
|
||||
rhDisabled bool
|
||||
pfDisabled bool
|
||||
lrEnabled bool
|
||||
}{
|
||||
{
|
||||
|
|
@ -180,30 +178,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) {
|
|||
wantConfig: `[{"round_robin": {}}]`,
|
||||
},
|
||||
{
|
||||
name: "ring_hash_disabled_rh_rr_use_first_supported",
|
||||
policy: &v3clusterpb.LoadBalancingPolicy{
|
||||
Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{
|
||||
{
|
||||
TypedExtensionConfig: &v3corepb.TypedExtensionConfig{
|
||||
TypedConfig: testutils.MarshalAny(t, &v3ringhashpb.RingHash{
|
||||
HashFunction: v3ringhashpb.RingHash_XX_HASH,
|
||||
MinimumRingSize: wrapperspb.UInt64(10),
|
||||
MaximumRingSize: wrapperspb.UInt64(100),
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
TypedExtensionConfig: &v3corepb.TypedExtensionConfig{
|
||||
TypedConfig: testutils.MarshalAny(t, &v3roundrobinpb.RoundRobin{}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantConfig: `[{"round_robin": {}}]`,
|
||||
rhDisabled: true,
|
||||
},
|
||||
{
|
||||
name: "pick_first_enabled_pf_rr_use_pick_first",
|
||||
name: "pf_rr_use_pick_first",
|
||||
policy: &v3clusterpb.LoadBalancingPolicy{
|
||||
Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{
|
||||
{
|
||||
|
|
@ -332,18 +307,10 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.rhDisabled {
|
||||
defer func(old bool) { envconfig.XDSRingHash = old }(envconfig.XDSRingHash)
|
||||
envconfig.XDSRingHash = false
|
||||
}
|
||||
if test.lrEnabled {
|
||||
defer func(old bool) { envconfig.LeastRequestLB = old }(envconfig.LeastRequestLB)
|
||||
envconfig.LeastRequestLB = true
|
||||
}
|
||||
if test.pfDisabled {
|
||||
defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig)
|
||||
envconfig.PickFirstLBConfig = false
|
||||
}
|
||||
rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("ConvertToServiceConfig(%s) failed: %v", pretty.ToJSON(test.policy), err)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/xds/internal/httpfilter"
|
||||
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||
|
|
@ -629,9 +628,6 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error)
|
|||
// TODO: Implement terminal filter logic, as per A36.
|
||||
filterChain.HTTPFilters = filters
|
||||
seenHCM = true
|
||||
if !envconfig.XDSRBAC {
|
||||
continue
|
||||
}
|
||||
switch hcm.RouteSpecifier.(type) {
|
||||
case *v3httppb.HttpConnectionManager_Rds:
|
||||
if hcm.GetRds().GetConfigSource().GetAds() == nil {
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ import (
|
|||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/e2e"
|
||||
|
|
@ -529,11 +528,6 @@ func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) {
|
|||
// TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the
|
||||
// filter chain with valid HTTP Filters present.
|
||||
func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *v3listenerpb.Listener
|
||||
|
|
@ -769,11 +763,6 @@ func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) {
|
|||
// TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route
|
||||
// Update in the filter chain are invalid.
|
||||
func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *v3listenerpb.Listener
|
||||
|
|
@ -981,11 +970,6 @@ func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) {
|
|||
// TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the
|
||||
// filter chain with valid HTTP Filters present.
|
||||
func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
tests := []struct {
|
||||
name string
|
||||
lis *v3listenerpb.Listener
|
||||
|
|
@ -1304,11 +1288,6 @@ func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) {
|
|||
// TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the
|
||||
// security configuration in the filter chain contains valid data.
|
||||
func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
tests := []struct {
|
||||
desc string
|
||||
lis *v3listenerpb.Listener
|
||||
|
|
@ -1536,11 +1515,6 @@ func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) {
|
|||
// success at config validation time and the filter chains which contains
|
||||
// unsupported match fields will be skipped at lookup time.
|
||||
func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
unspecifiedEntry := &destPrefixEntry{
|
||||
srcTypeArr: [3]*sourcePrefixes{
|
||||
{
|
||||
|
|
@ -1706,11 +1680,6 @@ func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) {
|
|||
// TestNewFilterChainImpl_Success_AllCombinations verifies different
|
||||
// combinations of the supported match criteria.
|
||||
func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
tests := []struct {
|
||||
desc string
|
||||
lis *v3listenerpb.Listener
|
||||
|
|
@ -2357,11 +2326,6 @@ func (s) TestLookup_Failures(t *testing.T) {
|
|||
}
|
||||
|
||||
func (s) TestLookup_Successes(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
lisWithDefaultChain := &v3listenerpb.Listener{
|
||||
FilterChains: []*v3listenerpb.FilterChain{
|
||||
{
|
||||
|
|
|
|||
|
|
@ -21,8 +21,6 @@ import (
|
|||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// FederationScheme is the scheme of a federation resource name.
|
||||
|
|
@ -56,10 +54,6 @@ type Name struct {
|
|||
// The caller can tell if the parsing is successful by checking the returned
|
||||
// Scheme.
|
||||
func ParseName(name string) *Name {
|
||||
if !envconfig.XDSFederation {
|
||||
// Return "" scheme to use the default authority for the server.
|
||||
return &Name{ID: name}
|
||||
}
|
||||
if !strings.Contains(name, "://") {
|
||||
// Only the long form URL, with ://, is valid.
|
||||
return &Name{ID: name}
|
||||
|
|
|
|||
|
|
@ -22,69 +22,53 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
env bool // Whether federation env is set to true.
|
||||
in string
|
||||
want *Name
|
||||
wantStr string
|
||||
}{
|
||||
{
|
||||
name: "env off",
|
||||
env: false,
|
||||
in: "xdstp://auth/type/id",
|
||||
want: &Name{ID: "xdstp://auth/type/id"},
|
||||
wantStr: "xdstp://auth/type/id",
|
||||
},
|
||||
{
|
||||
name: "old style name",
|
||||
env: true,
|
||||
in: "test-resource",
|
||||
want: &Name{ID: "test-resource"},
|
||||
wantStr: "test-resource",
|
||||
},
|
||||
{
|
||||
name: "invalid not url",
|
||||
env: true,
|
||||
in: "a:/b/c",
|
||||
want: &Name{ID: "a:/b/c"},
|
||||
wantStr: "a:/b/c",
|
||||
},
|
||||
{
|
||||
name: "invalid no resource type",
|
||||
env: true,
|
||||
in: "xdstp://auth/id",
|
||||
want: &Name{ID: "xdstp://auth/id"},
|
||||
wantStr: "xdstp://auth/id",
|
||||
},
|
||||
{
|
||||
name: "valid with no authority",
|
||||
env: true,
|
||||
in: "xdstp:///type/id",
|
||||
want: &Name{Scheme: "xdstp", Authority: "", Type: "type", ID: "id"},
|
||||
wantStr: "xdstp:///type/id",
|
||||
},
|
||||
{
|
||||
name: "valid no ctx params",
|
||||
env: true,
|
||||
in: "xdstp://auth/type/id",
|
||||
want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id"},
|
||||
wantStr: "xdstp://auth/type/id",
|
||||
},
|
||||
{
|
||||
name: "valid with ctx params",
|
||||
env: true,
|
||||
in: "xdstp://auth/type/id?a=1&b=2",
|
||||
want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}},
|
||||
wantStr: "xdstp://auth/type/id?a=1&b=2",
|
||||
},
|
||||
{
|
||||
name: "valid with ctx params sorted by keys",
|
||||
env: true,
|
||||
in: "xdstp://auth/type/id?b=2&a=1",
|
||||
want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}},
|
||||
wantStr: "xdstp://auth/type/id?a=1&b=2",
|
||||
|
|
@ -92,11 +76,6 @@ func TestParseName(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() func() {
|
||||
oldEnv := envconfig.XDSFederation
|
||||
envconfig.XDSFederation = tt.env
|
||||
return func() { envconfig.XDSFederation = oldEnv }
|
||||
}()()
|
||||
got := ParseName(tt.in)
|
||||
if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(Name{}, "processingDirective")) {
|
||||
t.Errorf("ParseName() = %#v, want %#v", got, tt.want)
|
||||
|
|
|
|||
|
|
@ -100,19 +100,13 @@ func (s) TestValidateCluster_Success(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
origCustomLBSupport := envconfig.XDSCustomLBPolicy
|
||||
envconfig.XDSCustomLBPolicy = true
|
||||
defer func() {
|
||||
envconfig.XDSCustomLBPolicy = origCustomLBSupport
|
||||
}()
|
||||
defer func(old bool) { envconfig.LeastRequestLB = old }(envconfig.LeastRequestLB)
|
||||
envconfig.LeastRequestLB = true
|
||||
tests := []struct {
|
||||
name string
|
||||
cluster *v3clusterpb.Cluster
|
||||
wantUpdate xdsresource.ClusterUpdate
|
||||
wantLBConfig *iserviceconfig.BalancerConfig
|
||||
customLBDisabled bool
|
||||
name string
|
||||
cluster *v3clusterpb.Cluster
|
||||
wantUpdate xdsresource.ClusterUpdate
|
||||
wantLBConfig *iserviceconfig.BalancerConfig
|
||||
}{
|
||||
{
|
||||
name: "happy-case-logical-dns",
|
||||
|
|
@ -483,53 +477,6 @@ func (s) TestValidateCluster_Success(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom-lb-env-var-not-set-ignore-load-balancing-policy-use-lb-policy-and-enum",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
Name: clusterName,
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName,
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_RING_HASH,
|
||||
LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{
|
||||
RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{
|
||||
MinimumRingSize: wrapperspb.UInt64(20),
|
||||
MaximumRingSize: wrapperspb.UInt64(200),
|
||||
},
|
||||
},
|
||||
LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{
|
||||
Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{
|
||||
{
|
||||
TypedExtensionConfig: &v3corepb.TypedExtensionConfig{
|
||||
TypedConfig: testutils.MarshalAny(t, &v3ringhashpb.RingHash{
|
||||
HashFunction: v3ringhashpb.RingHash_XX_HASH,
|
||||
MinimumRingSize: wrapperspb.UInt64(10),
|
||||
MaximumRingSize: wrapperspb.UInt64(100),
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantUpdate: xdsresource.ClusterUpdate{
|
||||
ClusterName: clusterName,
|
||||
EDSServiceName: serviceName,
|
||||
},
|
||||
wantLBConfig: &iserviceconfig.BalancerConfig{
|
||||
Name: "ring_hash_experimental",
|
||||
Config: &ringhash.LBConfig{
|
||||
MinRingSize: 20,
|
||||
MaxRingSize: 200,
|
||||
},
|
||||
},
|
||||
customLBDisabled: true,
|
||||
},
|
||||
{
|
||||
name: "load-balancing-policy-takes-precedence-over-lb-policy-and-enum",
|
||||
cluster: &v3clusterpb.Cluster{
|
||||
|
|
@ -580,12 +527,6 @@ func (s) TestValidateCluster_Success(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.customLBDisabled {
|
||||
envconfig.XDSCustomLBPolicy = false
|
||||
defer func() {
|
||||
envconfig.XDSCustomLBPolicy = true
|
||||
}()
|
||||
}
|
||||
update, err := xdsresource.ValidateClusterAndConstructClusterUpdateForTesting(test.cluster)
|
||||
if err != nil {
|
||||
t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err)
|
||||
|
|
|
|||
|
|
@ -87,9 +87,6 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu
|
|||
case v3clusterpb.Cluster_ROUND_ROBIN:
|
||||
lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`)
|
||||
case v3clusterpb.Cluster_RING_HASH:
|
||||
if !envconfig.XDSRingHash {
|
||||
return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster)
|
||||
}
|
||||
rhc := cluster.GetRingHashLbConfig()
|
||||
if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH {
|
||||
return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster)
|
||||
|
|
@ -132,24 +129,18 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu
|
|||
// Process security configuration received from the control plane iff the
|
||||
// corresponding environment variable is set.
|
||||
var sc *SecurityConfig
|
||||
if envconfig.XDSClientSideSecurity {
|
||||
var err error
|
||||
if sc, err = securityConfigFromCluster(cluster); err != nil {
|
||||
return ClusterUpdate{}, err
|
||||
}
|
||||
if sc, err = securityConfigFromCluster(cluster); err != nil {
|
||||
return ClusterUpdate{}, err
|
||||
}
|
||||
|
||||
// Process outlier detection received from the control plane iff the
|
||||
// corresponding environment variable is set.
|
||||
var od json.RawMessage
|
||||
if envconfig.XDSOutlierDetection {
|
||||
var err error
|
||||
if od, err = outlierConfigFromCluster(cluster); err != nil {
|
||||
return ClusterUpdate{}, err
|
||||
}
|
||||
if od, err = outlierConfigFromCluster(cluster); err != nil {
|
||||
return ClusterUpdate{}, err
|
||||
}
|
||||
|
||||
if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy {
|
||||
if cluster.GetLoadBalancingPolicy() != nil {
|
||||
lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0)
|
||||
if err != nil {
|
||||
return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err)
|
||||
|
|
@ -201,9 +192,6 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu
|
|||
}
|
||||
return ret, nil
|
||||
case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS:
|
||||
if !envconfig.XDSAggregateAndDNS {
|
||||
return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster)
|
||||
}
|
||||
ret.ClusterType = ClusterTypeLogicalDNS
|
||||
dnsHN, err := dnsHostNameFromCluster(cluster)
|
||||
if err != nil {
|
||||
|
|
@ -212,9 +200,6 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu
|
|||
ret.DNSHostName = dnsHN
|
||||
return ret, nil
|
||||
case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate":
|
||||
if !envconfig.XDSAggregateAndDNS {
|
||||
return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster)
|
||||
}
|
||||
clusters := &v3aggregateclusterpb.ClusterConfig{}
|
||||
if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil {
|
||||
return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/xds/matcher"
|
||||
|
|
@ -53,11 +52,6 @@ const (
|
|||
var emptyUpdate = ClusterUpdate{ClusterName: clusterName, LRSServerConfig: ClusterLRSOff}
|
||||
|
||||
func (s) TestValidateCluster_Failure(t *testing.T) {
|
||||
oldCustomLBSupport := envconfig.XDSCustomLBPolicy
|
||||
envconfig.XDSCustomLBPolicy = true
|
||||
defer func() {
|
||||
envconfig.XDSCustomLBPolicy = oldCustomLBSupport
|
||||
}()
|
||||
tests := []struct {
|
||||
name string
|
||||
cluster *v3clusterpb.Cluster
|
||||
|
|
@ -277,12 +271,6 @@ func (s) TestValidateCluster_Failure(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS
|
||||
envconfig.XDSAggregateAndDNS = true
|
||||
defer func() { envconfig.XDSAggregateAndDNS = oldAggregateAndDNSSupportEnv }()
|
||||
oldRingHashSupport := envconfig.XDSRingHash
|
||||
envconfig.XDSRingHash = true
|
||||
defer func() { envconfig.XDSRingHash = oldRingHashSupport }()
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil {
|
||||
|
|
@ -292,54 +280,6 @@ func (s) TestValidateCluster_Failure(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) {
|
||||
// Turn off the env var protection for client-side security.
|
||||
origClientSideSecurityEnvVar := envconfig.XDSClientSideSecurity
|
||||
envconfig.XDSClientSideSecurity = false
|
||||
defer func() { envconfig.XDSClientSideSecurity = origClientSideSecurityEnvVar }()
|
||||
|
||||
cluster := &v3clusterpb.Cluster{
|
||||
Name: clusterName,
|
||||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS},
|
||||
EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &v3corepb.ConfigSource{
|
||||
ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{
|
||||
Ads: &v3corepb.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
ServiceName: serviceName,
|
||||
},
|
||||
LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN,
|
||||
TransportSocket: &v3corepb.TransportSocket{
|
||||
Name: "envoy.transport_sockets.tls",
|
||||
ConfigType: &v3corepb.TransportSocket_TypedConfig{
|
||||
TypedConfig: testutils.MarshalAny(t, &v3tlspb.UpstreamTlsContext{
|
||||
CommonTlsContext: &v3tlspb.CommonTlsContext{
|
||||
ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{
|
||||
ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{
|
||||
InstanceName: "rootInstance",
|
||||
CertificateName: "rootCert",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
wantUpdate := ClusterUpdate{
|
||||
ClusterName: clusterName,
|
||||
EDSServiceName: serviceName,
|
||||
LRSServerConfig: ClusterLRSOff,
|
||||
}
|
||||
gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster)
|
||||
if err != nil {
|
||||
t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(wantUpdate, gotUpdate, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" {
|
||||
t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func (s) TestSecurityConfigFromCommonTLSContextUsingNewFields_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ import (
|
|||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/testutils/xds/e2e"
|
||||
|
|
@ -592,11 +591,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}
|
||||
|
||||
func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
||||
oldRBAC := envconfig.XDSRBAC
|
||||
envconfig.XDSRBAC = true
|
||||
defer func() {
|
||||
envconfig.XDSRBAC = oldRBAC
|
||||
}()
|
||||
const (
|
||||
v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999"
|
||||
testVersion = "test-version-lds-server"
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/xds/matcher"
|
||||
"google.golang.org/grpc/xds/internal/clusterspecifier"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
|
@ -75,13 +74,9 @@ func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, erro
|
|||
// we are looking for.
|
||||
func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) {
|
||||
vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts()))
|
||||
csps := make(map[string]clusterspecifier.BalancerConfig)
|
||||
if envconfig.XDSRLS {
|
||||
var err error
|
||||
csps, err = processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins)
|
||||
if err != nil {
|
||||
return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err)
|
||||
}
|
||||
csps, err := processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins)
|
||||
if err != nil {
|
||||
return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err)
|
||||
}
|
||||
// cspNames represents all the cluster specifiers referenced by Route
|
||||
// Actions - any cluster specifiers not referenced by a Route Action can be
|
||||
|
|
@ -309,13 +304,11 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif
|
|||
action := r.GetRoute()
|
||||
|
||||
// Hash Policies are only applicable for a Ring Hash LB.
|
||||
if envconfig.XDSRingHash {
|
||||
hp, err := hashPoliciesProtoToSlice(action.HashPolicy)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
route.HashPolicies = hp
|
||||
hp, err := hashPoliciesProtoToSlice(action.HashPolicy)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
route.HashPolicies = hp
|
||||
|
||||
switch a := action.GetClusterSpecifier().(type) {
|
||||
case *v3routepb.RouteAction_Cluster:
|
||||
|
|
@ -356,10 +349,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif
|
|||
// This means that if this env var is not set, we should treat
|
||||
// it as if it we didn't know about the cluster_specifier_plugin
|
||||
// at all.
|
||||
if !envconfig.XDSRLS {
|
||||
logger.Warningf("Ignoring route %+v with unsupported route_action field: cluster_specifier_plugin", r)
|
||||
continue
|
||||
}
|
||||
if _, ok := csps[a.ClusterSpecifierPlugin]; !ok {
|
||||
// "When processing RouteActions, if any action includes a
|
||||
// cluster_specifier_plugin value that is not in
|
||||
|
|
@ -389,7 +378,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif
|
|||
route.MaxStreamDuration = &d
|
||||
}
|
||||
|
||||
var err error
|
||||
route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err)
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/internal/xds/matcher"
|
||||
|
|
@ -148,12 +147,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
}
|
||||
goodUpdate = RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{{
|
||||
Domains: []string{ldsTarget},
|
||||
Routes: nil,
|
||||
}},
|
||||
}
|
||||
goodUpdateWithNormalRoute = RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
|
|
@ -222,17 +215,11 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond}
|
||||
)
|
||||
|
||||
oldRLS := envconfig.XDSRLS
|
||||
defer func() {
|
||||
envconfig.XDSRLS = oldRLS
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
rc *v3routepb.RouteConfiguration
|
||||
wantUpdate RouteConfigUpdate
|
||||
wantError bool
|
||||
rlsEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "default-route-match-field-is-nil",
|
||||
|
|
@ -669,24 +656,21 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{
|
||||
clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false),
|
||||
}, []string{"cspA"}),
|
||||
wantError: true,
|
||||
rlsEnabled: true,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "error-in-cluster-specifier-plugin-conversion-method",
|
||||
rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{
|
||||
clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig, false),
|
||||
}, []string{"cspA"}),
|
||||
wantError: true,
|
||||
rlsEnabled: true,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "route-action-that-references-undeclared-cluster-specifier-plugin",
|
||||
rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{
|
||||
clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false),
|
||||
}, []string{"cspA", "cspB"}),
|
||||
wantError: true,
|
||||
rlsEnabled: true,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "emitted-cluster-specifier-plugins",
|
||||
|
|
@ -694,7 +678,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false),
|
||||
}, []string{"cspA"}),
|
||||
wantUpdate: goodUpdateWithClusterSpecifierPluginA,
|
||||
rlsEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted-cluster-specifier-plugins-not-referenced",
|
||||
|
|
@ -703,21 +686,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig, false),
|
||||
}, []string{"cspA"}),
|
||||
wantUpdate: goodUpdateWithClusterSpecifierPluginA,
|
||||
rlsEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "ignore-error-in-cluster-specifier-plugin-env-var-off",
|
||||
rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{
|
||||
clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false),
|
||||
}, []string{}),
|
||||
wantUpdate: goodUpdate,
|
||||
},
|
||||
{
|
||||
name: "cluster-specifier-plugin-referenced-env-var-off",
|
||||
rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{
|
||||
clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false),
|
||||
}, []string{"cspA"}),
|
||||
wantUpdate: goodUpdate,
|
||||
},
|
||||
// This tests a scenario where a cluster specifier plugin is not found
|
||||
// and is optional. Any routes referencing that not found optional
|
||||
|
|
@ -729,7 +697,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, true),
|
||||
}, []string{"cspA"}),
|
||||
wantUpdate: goodUpdateWithNormalRoute,
|
||||
rlsEnabled: true,
|
||||
},
|
||||
// This tests a scenario where a route has an unsupported cluster
|
||||
// specifier. Any routes with an unsupported cluster specifier should be
|
||||
|
|
@ -739,12 +706,10 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) {
|
|||
name: "unsupported-cluster-specifier-route-should-ignore",
|
||||
rc: goodRouteConfigWithUnsupportedClusterSpecifier,
|
||||
wantUpdate: goodUpdateWithNormalRoute,
|
||||
rlsEnabled: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
envconfig.XDSRLS = test.rlsEnabled
|
||||
gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc)
|
||||
if (gotError != nil) != test.wantError ||
|
||||
!cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(),
|
||||
|
|
@ -1508,9 +1473,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) {
|
|||
return fmt.Sprint(fc)
|
||||
}),
|
||||
}
|
||||
oldRingHashSupport := envconfig.XDSRingHash
|
||||
envconfig.XDSRingHash = true
|
||||
defer func() { envconfig.XDSRingHash = oldRingHashSupport }()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, _, err := routesProtoToSlice(tt.routes, nil)
|
||||
|
|
@ -1621,9 +1583,6 @@ func (s) TestHashPoliciesProtoToSlice(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
oldRingHashSupport := envconfig.XDSRingHash
|
||||
envconfig.XDSRingHash = true
|
||||
defer func() { envconfig.XDSRingHash = oldRingHashSupport }()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := hashPoliciesProtoToSlice(tt.hashPolicies)
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
|
|
@ -395,10 +394,8 @@ func routeAndProcess(ctx context.Context) error {
|
|||
// xdsUnaryInterceptor is the unary interceptor added to the gRPC server to
|
||||
// perform any xDS specific functionality on unary RPCs.
|
||||
func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
if envconfig.XDSRBAC {
|
||||
if err := routeAndProcess(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := routeAndProcess(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
|
@ -406,10 +403,8 @@ func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo,
|
|||
// xdsStreamInterceptor is the stream interceptor added to the gRPC server to
|
||||
// perform any xDS specific functionality on streaming RPCs.
|
||||
func xdsStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if envconfig.XDSRBAC {
|
||||
if err := routeAndProcess(ss.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := routeAndProcess(ss.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
return handler(srv, ss)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue