xds/federation: update xdsclient to support multi authority (#5042)

This commit is contained in:
Menghan Li 2022-01-04 15:50:50 -08:00 committed by GitHub
parent b069440926
commit afded7231d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1595 additions and 1711 deletions

View File

@ -0,0 +1,228 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xdsclient
import (
"errors"
"fmt"
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
"google.golang.org/grpc/xds/internal/xdsclient/load"
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
const federationScheme = "xdstp"
// findAuthority returns the authority for this name. If it doesn't already
// exist, one will be created.
//
// Note that this doesn't always create new authority. authorities with the same
// config but different names are shared.
//
// The returned unref function must be called when the caller is done using this
// authority, without holding c.authorityMu.
//
// Caller must not hold c.authorityMu.
func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) {
scheme, authority := n.Scheme, n.Authority
c.authorityMu.Lock()
defer c.authorityMu.Unlock()
if c.done.HasFired() {
return nil, nil, errors.New("the xds-client is closed")
}
config := c.config.XDSServer
if scheme == federationScheme {
cfg, ok := c.config.Authorities[authority]
if !ok {
return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority)
}
config = cfg.XDSServer
}
a, err := c.newAuthority(config)
if err != nil {
return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err)
}
// All returned authority from this function will be used by a watch,
// holding the ref here.
//
// Note that this must be done while c.authorityMu is held, to avoid the
// race that an authority is returned, but before the watch starts, the
// old last watch is canceled (in another goroutine), causing this
// authority to be removed, and then a watch will start on a removed
// authority.
//
// unref() will be done when the watch is canceled.
a.ref()
return a, func() { c.unrefAuthority(a) }, nil
}
// newAuthority creates a new authority for the config. But before that, it
// checks the cache to see if an authority for this config already exists.
//
// caller must hold c.authorityMu
func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) {
// First check if there's already an authority for this config. If found, it
// means this authority is used by other watches (could be the same
// authority name, or a different authority name but the same server
// config). Return it.
configStr := config.String()
if a, ok := c.authorities[configStr]; ok {
return a, nil
}
// Second check if there's an authority in the idle cache. If found, it
// means this authority was created, but moved to the idle cache because the
// watch was canceled. Move it from idle cache to the authority cache, and
// return.
if old, ok := c.idleAuthorities.Remove(configStr); ok {
oldA, _ := old.(*authority)
if oldA != nil {
c.authorities[configStr] = oldA
return oldA, nil
}
}
// Make a new authority since there's no existing authority for this config.
ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.logger)}
defer func() {
if retErr != nil {
ret.close()
}
}()
ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger)
if err != nil {
return nil, err
}
ret.controller = ctr
// Add it to the cache, so it will be reused.
c.authorities[configStr] = ret
return ret, nil
}
// unrefAuthority unrefs the authority. It also moves the authority to idle
// cache if it's ref count is 0.
//
// This function doesn't need to called explicitly. It's called by the returned
// unref from findAuthority().
//
// Caller must not hold c.authorityMu.
func (c *clientImpl) unrefAuthority(a *authority) {
c.authorityMu.Lock()
defer c.authorityMu.Unlock()
if a.unref() > 0 {
return
}
configStr := a.config.String()
delete(c.authorities, configStr)
c.idleAuthorities.Add(configStr, a, func() {
a.close()
})
}
// authority is a combination of pubsub and the controller for this authority.
//
// Note that it might make sense to use one pubsub for all the resources (for
// all the controllers). One downside is the handling of StoW APIs (LDS/CDS).
// These responses contain all the resources from that control plane, so pubsub
// will need to keep lists of resources from each control plane, to know what
// are removed.
type authority struct {
config *bootstrap.ServerConfig
pubsub *pubsub.Pubsub
controller controllerInterface
refCount int
}
// caller must hold parent's authorityMu.
func (a *authority) ref() {
a.refCount++
}
// caller must hold parent's authorityMu.
func (a *authority) unref() int {
a.refCount--
return a.refCount
}
func (a *authority) close() {
if a.pubsub != nil {
a.pubsub.Close()
}
if a.controller != nil {
a.controller.Close()
}
}
func (a *authority) watchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) {
first, cancelF := a.pubsub.WatchListener(serviceName, cb)
if first {
a.controller.AddWatch(xdsresource.ListenerResource, serviceName)
}
return func() {
if cancelF() {
a.controller.RemoveWatch(xdsresource.ListenerResource, serviceName)
}
}
}
func (a *authority) watchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) {
first, cancelF := a.pubsub.WatchRouteConfig(routeName, cb)
if first {
a.controller.AddWatch(xdsresource.RouteConfigResource, routeName)
}
return func() {
if cancelF() {
a.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName)
}
}
}
func (a *authority) watchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) {
first, cancelF := a.pubsub.WatchCluster(clusterName, cb)
if first {
a.controller.AddWatch(xdsresource.ClusterResource, clusterName)
}
return func() {
if cancelF() {
a.controller.RemoveWatch(xdsresource.ClusterResource, clusterName)
}
}
}
func (a *authority) watchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) {
first, cancelF := a.pubsub.WatchEndpoints(clusterName, cb)
if first {
a.controller.AddWatch(xdsresource.EndpointsResource, clusterName)
}
return func() {
if cancelF() {
a.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName)
}
}
}
func (a *authority) reportLoad(server string) (*load.Store, func()) {
return a.controller.ReportLoad(server)
}
func (a *authority) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD {
return a.pubsub.Dump(t)
}

View File

@ -0,0 +1,353 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xdsclient
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal/testutils"
xdstestutils "google.golang.org/grpc/xds/internal/testutils"
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
"google.golang.org/protobuf/testing/protocmp"
)
var (
serverConfigs = []*bootstrap.ServerConfig{
{
ServerURI: testXDSServer + "0",
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
CredsType: "creds-0",
TransportAPI: version.TransportV2,
NodeProto: xdstestutils.EmptyNodeProtoV2,
},
{
ServerURI: testXDSServer + "1",
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
CredsType: "creds-1",
TransportAPI: version.TransportV3,
NodeProto: xdstestutils.EmptyNodeProtoV3,
},
{
ServerURI: testXDSServer + "2",
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
CredsType: "creds-2",
TransportAPI: version.TransportV2,
NodeProto: xdstestutils.EmptyNodeProtoV2,
},
}
serverConfigCmpOptions = cmp.Options{
cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds"),
protocmp.Transform(),
}
)
// watchAndFetchNewController starts a CDS watch on the client for the given
// resourceName, and tries to receive a new controller from the ctrlCh.
//
// It returns false if there's no controller in the ctrlCh.
func watchAndFetchNewController(t *testing.T, client *clientImpl, resourceName string, ctrlCh *testutils.Channel) (*testController, bool, func()) {
updateCh := testutils.NewChannel()
cancelWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) {
updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
// Clear the item in the watch channel, otherwise the next watch will block.
authority := xdsresource.ParseName(resourceName).Authority
var config *bootstrap.ServerConfig
if authority == "" {
config = client.config.XDSServer
} else {
authConfig, ok := client.config.Authorities[authority]
if !ok {
t.Fatalf("failed to find authority %q", authority)
}
config = authConfig.XDSServer
}
a := client.authorities[config.String()]
if a == nil {
t.Fatalf("authority for %q is not created", authority)
}
ctrlTemp := a.controller.(*testController)
// Clear the channel so the next watch on this controller can proceed.
ctrlTemp.addWatches[xdsresource.ClusterResource].ReceiveOrFail()
cancelWatchRet := func() {
cancelWatch()
ctrlTemp.removeWatches[xdsresource.ClusterResource].ReceiveOrFail()
}
// Try to receive a new controller.
c, ok := ctrlCh.ReceiveOrFail()
if !ok {
return nil, false, cancelWatchRet
}
ctrl := c.(*testController)
return ctrl, true, cancelWatchRet
}
// TestAuthorityDefaultAuthority covers that a watch for an old style resource
// name (one without authority) builds a controller using the top level server
// config.
func (s) TestAuthorityDefaultAuthority(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}},
}, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
ctrl, ok, _ := watchAndFetchNewController(t, client, testCDSName, ctrlCh)
if !ok {
t.Fatalf("want a new controller to be built, got none")
}
// Want the default server config.
wantConfig := serverConfigs[0]
if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" {
t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff)
}
}
// TestAuthorityNoneDefaultAuthority covers that a watch with a new style
// resource name creates a controller with the corresponding server config.
func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}},
}, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil)
ctrl, ok, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh)
if !ok {
t.Fatalf("want a new controller to be built, got none")
}
// Want the server config for this authority.
wantConfig := serverConfigs[1]
if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" {
t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff)
}
}
// TestAuthorityShare covers that
// - watch with the same authority name doesn't create new authority
// - watch with different authority name but same authority config doesn't
// create new authority
func (s) TestAuthorityShare(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{
testAuthority: {XDSServer: serverConfigs[1]},
testAuthority2: {XDSServer: serverConfigs[1]}, // Another authority name, but with the same config.
},
}, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil)
ctrl1, ok1, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh)
if !ok1 {
t.Fatalf("want a new controller to be built, got none")
}
// Want the server config for this authority.
wantConfig := serverConfigs[1]
if diff := cmp.Diff(ctrl1.config, wantConfig, serverConfigCmpOptions); diff != "" {
t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff)
}
// Call the watch with the same authority name. This shouldn't create a new
// controller.
resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil)
ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh)
if ok2 {
t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config)
}
// Call the watch with a different authority name, but the same server
// config. This shouldn't create a new controller.
resourceNameSameConfig := buildResourceName(xdsresource.ClusterResource, testAuthority2, testCDSName+"1", nil)
if ctrl, ok, _ := watchAndFetchNewController(t, client, resourceNameSameConfig, ctrlCh); ok {
t.Fatalf("an unexpected controller is built with config: %v", ctrl.config)
}
}
// TestAuthorityIdle covers that
// - authorities are put in a timeout cache when the last watch is canceled
// - idle authorities are not immediately closed. They will be closed after a
// timeout.
func (s) TestAuthorityIdleTimeout(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
const idleTimeout = 50 * time.Millisecond
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{
testAuthority: {XDSServer: serverConfigs[1]},
},
}, defaultWatchExpiryTimeout, idleTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil)
ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh)
if !ok1 {
t.Fatalf("want a new controller to be built, got none")
}
var cancelWatch2 func()
// Call the watch with the same authority name. This shouldn't create a new
// controller.
resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil)
ctrl2, ok2, cancelWatch2 := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh)
if ok2 {
t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config)
}
cancelWatch1()
if ctrl1.done.HasFired() {
t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache")
}
// Cancel the second watch, should put controller in the idle cache.
cancelWatch2()
if ctrl1.done.HasFired() {
t.Fatalf("controller is closed when the second watch is closed")
}
time.Sleep(idleTimeout * 2)
if !ctrl1.done.HasFired() {
t.Fatalf("controller is not closed after idle timeout")
}
}
// TestAuthorityClientClose covers that the authorities in use and in idle cache
// are all closed when the client is closed.
func (s) TestAuthorityClientClose(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{
testAuthority: {XDSServer: serverConfigs[1]},
},
}, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
resourceName := testCDSName
ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh)
if !ok1 {
t.Fatalf("want a new controller to be built, got none")
}
resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil)
ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh)
if !ok2 {
t.Fatalf("want a new controller to be built, got none")
}
cancelWatch1()
if ctrl1.done.HasFired() {
t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache")
}
// Close the client while watch2 is not canceled. ctrl1 is in the idle
// cache, ctrl2 is in use. Both should be closed.
client.Close()
if !ctrl1.done.HasFired() {
t.Fatalf("controller in idle cache is not closed after client is closed")
}
if !ctrl2.done.HasFired() {
t.Fatalf("controller in use is not closed after client is closed")
}
}
// TestAuthorityRevive covers that the authorities in the idle cache is revived
// when a new watch is started on this authority.
func (s) TestAuthorityRevive(t *testing.T) {
overrideFedEnvVar(t)
ctrlCh := overrideNewController(t)
const idleTimeout = 50 * time.Millisecond
client, err := newWithConfig(&bootstrap.Config{
XDSServer: serverConfigs[0],
Authorities: map[string]*bootstrap.Authority{
testAuthority: {XDSServer: serverConfigs[1]},
},
}, defaultWatchExpiryTimeout, idleTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
// Start a watch on the authority, and cancel it. This puts the authority in
// the idle cache.
resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil)
ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh)
if !ok1 {
t.Fatalf("want a new controller to be built, got none")
}
cancelWatch1()
// Start another watch on this authority, it should retrieve the authority
// from the cache, instead of creating a new one.
resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil)
ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh)
if ok2 {
t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config)
}
// Wait for double the idle timeout, the controller shouldn't be closed,
// since it was revived.
time.Sleep(idleTimeout * 2)
if ctrl1.done.HasFired() {
t.Fatalf("controller that was revived is closed after timeout, want not closed")
}
}

View File

@ -1,65 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package xdsclient
import (
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
// NewListeners is called by the underlying xdsAPIClient when it receives an
// xDS response.
//
// A response can contain multiple resources. They will be parsed and put in a
// map from resource name to the resource content.
func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
c.pubsub.NewListeners(updates, metadata)
}
// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an
// xDS response.
//
// A response can contain multiple resources. They will be parsed and put in a
// map from resource name to the resource content.
func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
c.pubsub.NewRouteConfigs(updates, metadata)
}
// NewClusters is called by the underlying xdsAPIClient when it receives an xDS
// response.
//
// A response can contain multiple resources. They will be parsed and put in a
// map from resource name to the resource content.
func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
c.pubsub.NewClusters(updates, metadata)
}
// NewEndpoints is called by the underlying xdsAPIClient when it receives an
// xDS response.
//
// A response can contain multiple resources. They will be parsed and put in a
// map from resource name to the resource content.
func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
c.pubsub.NewEndpoints(updates, metadata)
}
// NewConnectionError is called by the underlying xdsAPIClient when it receives
// a connection error. The error will be forwarded to all the resource watchers.
func (c *clientImpl) NewConnectionError(err error) {
c.pubsub.NewConnectionError(err)
}

View File

@ -22,12 +22,13 @@ package xdsclient
import (
"fmt"
"sync"
"time"
"google.golang.org/grpc/internal/cache"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
@ -42,17 +43,39 @@ type clientImpl struct {
done *grpcsync.Event
config *bootstrap.Config
controller controllerInterface
// authorityMu protects the authority fields. It's necessary because an
// authority is created when it's used.
authorityMu sync.Mutex
// authorities is a map from ServerConfig to authority. So that
// different authorities sharing the same ServerConfig can share the
// authority.
//
// The key is **ServerConfig.String()**, not the authority name.
//
// An authority is either in authorities, or idleAuthorities,
// never both.
authorities map[string]*authority
// idleAuthorities keeps the authorities that are not used (the last
// watch on it was canceled). They are kept in the cache and will be deleted
// after a timeout. The key is ServerConfig.String().
//
// An authority is either in authorities, or idleAuthorities,
// never both.
idleAuthorities *cache.TimeoutCache
logger *grpclog.PrefixLogger
pubsub *pubsub.Pubsub
logger *grpclog.PrefixLogger
watchExpiryTimeout time.Duration
}
// newWithConfig returns a new xdsClient with the given config.
func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (_ *clientImpl, retErr error) {
func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (_ *clientImpl, retErr error) {
c := &clientImpl{
done: grpcsync.NewEvent(),
config: config,
done: grpcsync.NewEvent(),
config: config,
watchExpiryTimeout: watchExpiryTimeout,
authorities: make(map[string]*authority),
idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout),
}
defer func() {
@ -64,14 +87,6 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (
c.logger = prefixLogger(c)
c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer)
c.pubsub = pubsub.New(watchExpiryTimeout, c.logger)
controller, err := newController(config.XDSServer, c.pubsub, c.updateValidator, c.logger)
if err != nil {
return nil, fmt.Errorf("xds: failed to connect to the control plane: %v", err)
}
c.controller = controller
c.logger.Infof("Created")
return c, nil
}
@ -94,12 +109,14 @@ func (c *clientImpl) Close() {
// Note that Close needs to check for nils even if some of them are always
// set in the constructor. This is because the constructor defers Close() in
// error cases, and the fields might not be set when the error happens.
if c.controller != nil {
c.controller.Close()
}
if c.pubsub != nil {
c.pubsub.Close()
c.authorityMu.Lock()
for _, a := range c.authorities {
a.close()
}
c.idleAuthorities.Clear(true)
c.authorityMu.Unlock()
c.logger.Infof("Shutdown")
}

View File

@ -51,12 +51,15 @@ func Test(t *testing.T) {
}
const (
testXDSServer = "xds-server"
testXDSServer = "xds-server"
testXDSServerAuthority = "xds-server-authority"
testLDSName = "test-lds"
testRDSName = "test-rds"
testCDSName = "test-cds"
testEDSName = "test-eds"
testAuthority = "test-authority"
testAuthority2 = "test-authority-2"
testLDSName = "test-lds"
testRDSName = "test-rds"
testCDSName = "test-cds"
testEDSName = "test-eds"
defaultTestWatchExpiryTimeout = 500 * time.Millisecond
defaultTestTimeout = 5 * time.Second
@ -67,38 +70,47 @@ func newStringP(s string) *string {
return &s
}
func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstrap.Config, time.Duration) {
watchExpiryTimeout := defaultWatchExpiryTimeout
if overrideWatchExpiryTimeout {
watchExpiryTimeout = defaultTestWatchExpiryTimeout
}
func clientOpts() *bootstrap.Config {
return &bootstrap.Config{
XDSServer: &bootstrap.ServerConfig{
ServerURI: balancerName,
ServerURI: testXDSServer,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
NodeProto: xdstestutils.EmptyNodeProtoV2,
},
}, watchExpiryTimeout
Authorities: map[string]*bootstrap.Authority{
testAuthority: {
XDSServer: &bootstrap.ServerConfig{
ServerURI: testXDSServerAuthority,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
NodeProto: xdstestutils.EmptyNodeProtoV2,
},
},
},
}
}
type testController struct {
// config is the config this controller is created with.
config *bootstrap.ServerConfig
done *grpcsync.Event
addWatches map[xdsresource.ResourceType]*testutils.Channel
removeWatches map[xdsresource.ResourceType]*testutils.Channel
}
func overrideNewController() (*testutils.Channel, func()) {
func overrideNewController(t *testing.T) *testutils.Channel {
origNewController := newController
ch := testutils.NewChannel()
newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) {
ret := newTestController()
ret := newTestController(config)
ch.Send(ret)
return ret, nil
}
return ch, func() { newController = origNewController }
t.Cleanup(func() { newController = origNewController })
return ch
}
func newTestController() *testController {
func newTestController(config *bootstrap.ServerConfig) *testController {
addWatches := map[xdsresource.ResourceType]*testutils.Channel{
xdsresource.ListenerResource: testutils.NewChannel(),
xdsresource.RouteConfigResource: testutils.NewChannel(),
@ -112,6 +124,7 @@ func newTestController() *testController {
xdsresource.EndpointsResource: testutils.NewChannel(),
}
return &testController{
config: config,
done: grpcsync.NewEvent(),
addWatches: addWatches,
removeWatches: removeWatches,
@ -137,22 +150,14 @@ func (c *testController) Close() {
// TestWatchCallAnotherWatch covers the case where watch() is called inline by a
// callback. It makes sure it doesn't cause a deadlock.
func (s) TestWatchCallAnotherWatch(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
// Start a watch for some resource, so that the controller and update
// handlers are built for this authority. The test needs these to make an
// inline watch in a callback.
client, ctrlCh := testClientSetup(t, false)
newWatch(t, client, xdsresource.ClusterResource, "doesnot-matter")
controller, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, "doesnot-matter")
clusterUpdateCh := testutils.NewChannel()
firstTime := true
@ -161,17 +166,17 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
// Calls another watch inline, to ensure there's deadlock.
client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil {
if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
firstTime = false
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
@ -179,7 +184,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
// The second update needs to be different in the underlying resource proto
// for the watch callback to be invoked.
wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
@ -272,18 +277,21 @@ func (s) TestClientNewSingleton(t *testing.T) {
}
defer func() { bootstrapNewConfig = oldBootstrapNewConfig }()
apiClientCh, cleanup := overrideNewController()
defer cleanup()
ctrlCh := overrideNewController(t)
// The first New(). Should create a Client and a new APIClient.
client, err := newRefCounted()
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
// Call a watch to create the controller.
client.WatchCluster("doesnot-matter", func(update xdsresource.ClusterUpdate, err error) {})
clientImpl := client.clientImpl
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
c, err := ctrlCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
@ -306,7 +314,7 @@ func (s) TestClientNewSingleton(t *testing.T) {
sctx, scancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
defer scancel()
_, err := apiClientCh.Receive(sctx)
_, err := ctrlCh.Receive(sctx)
if err == nil {
client.Close()
t.Fatalf("%d-th call to New() created a new API client", i)
@ -342,7 +350,11 @@ func (s) TestClientNewSingleton(t *testing.T) {
t.Fatalf("failed to create client: %v", err)
}
defer client2.Close()
c2, err := apiClientCh.Receive(ctx)
// Call a watch to create the controller.
client2.WatchCluster("abc", func(update xdsresource.ClusterUpdate, err error) {})
c2, err := ctrlCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}

View File

@ -22,22 +22,42 @@ import (
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
func mergeMaps(maps []map[string]xdsresource.UpdateWithMD) map[string]xdsresource.UpdateWithMD {
ret := make(map[string]xdsresource.UpdateWithMD)
for _, m := range maps {
for k, v := range m {
ret[k] = v
}
}
return ret
}
func (c *clientImpl) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD {
c.authorityMu.Lock()
defer c.authorityMu.Unlock()
maps := make([]map[string]xdsresource.UpdateWithMD, 0, len(c.authorities))
for _, a := range c.authorities {
maps = append(maps, a.dump(t))
}
return mergeMaps(maps)
}
// DumpLDS returns the status and contents of LDS.
func (c *clientImpl) DumpLDS() map[string]xdsresource.UpdateWithMD {
return c.pubsub.Dump(xdsresource.ListenerResource)
return c.dump(xdsresource.ListenerResource)
}
// DumpRDS returns the status and contents of RDS.
func (c *clientImpl) DumpRDS() map[string]xdsresource.UpdateWithMD {
return c.pubsub.Dump(xdsresource.RouteConfigResource)
return c.dump(xdsresource.RouteConfigResource)
}
// DumpCDS returns the status and contents of CDS.
func (c *clientImpl) DumpCDS() map[string]xdsresource.UpdateWithMD {
return c.pubsub.Dump(xdsresource.ClusterResource)
return c.dump(xdsresource.ClusterResource)
}
// DumpEDS returns the status and contents of EDS.
func (c *clientImpl) DumpEDS() map[string]xdsresource.UpdateWithMD {
return c.pubsub.Dump(xdsresource.EndpointsResource)
return c.dump(xdsresource.EndpointsResource)
}

View File

@ -16,7 +16,7 @@
*
*/
package xdsclient_test
package xdsclient
import (
"fmt"
@ -30,8 +30,6 @@ import (
v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"google.golang.org/grpc/xds/internal/xdsclient"
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/protobuf/testing/protocmp"
"google.golang.org/protobuf/types/known/anypb"
@ -44,8 +42,6 @@ import (
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
)
const defaultTestWatchExpiryTimeout = 500 * time.Millisecond
func (s) TestLDSConfigDump(t *testing.T) {
const testVersion = "test-version-lds"
var (
@ -76,7 +72,7 @@ func (s) TestLDSConfigDump(t *testing.T) {
listenerRaws[ldsTargets[i]] = testutils.MarshalAny(listenersT)
}
client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
client, err := NewWithConfigForTesting(&bootstrap.Config{
XDSServer: &bootstrap.ServerConfig{
ServerURI: testXDSServer,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
@ -87,7 +83,6 @@ func (s) TestLDSConfigDump(t *testing.T) {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
updateHandler := client.(pubsub.UpdateHandler)
// Expected unknown.
if err := compareDump(client.DumpLDS, map[string]xdsresource.UpdateWithMD{}); err != nil {
@ -114,6 +109,7 @@ func (s) TestLDSConfigDump(t *testing.T) {
Raw: r,
}
}
updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "")
updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion})
// Expect ACK.
@ -192,7 +188,7 @@ func (s) TestRDSConfigDump(t *testing.T) {
routeRaws[rdsTargets[i]] = testutils.MarshalAny(routeConfigT)
}
client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
client, err := NewWithConfigForTesting(&bootstrap.Config{
XDSServer: &bootstrap.ServerConfig{
ServerURI: testXDSServer,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
@ -203,7 +199,6 @@ func (s) TestRDSConfigDump(t *testing.T) {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
updateHandler := client.(pubsub.UpdateHandler)
// Expected unknown.
if err := compareDump(client.DumpRDS, map[string]xdsresource.UpdateWithMD{}); err != nil {
@ -230,6 +225,7 @@ func (s) TestRDSConfigDump(t *testing.T) {
Raw: r,
}
}
updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "")
updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion})
// Expect ACK.
@ -308,7 +304,7 @@ func (s) TestCDSConfigDump(t *testing.T) {
clusterRaws[cdsTargets[i]] = testutils.MarshalAny(clusterT)
}
client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
client, err := NewWithConfigForTesting(&bootstrap.Config{
XDSServer: &bootstrap.ServerConfig{
ServerURI: testXDSServer,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
@ -319,7 +315,6 @@ func (s) TestCDSConfigDump(t *testing.T) {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
updateHandler := client.(pubsub.UpdateHandler)
// Expected unknown.
if err := compareDump(client.DumpCDS, map[string]xdsresource.UpdateWithMD{}); err != nil {
@ -346,6 +341,7 @@ func (s) TestCDSConfigDump(t *testing.T) {
Raw: r,
}
}
updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "")
updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion})
// Expect ACK.
@ -410,7 +406,7 @@ func (s) TestEDSConfigDump(t *testing.T) {
endpointRaws[edsTargets[i]] = testutils.MarshalAny(claT)
}
client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
client, err := NewWithConfigForTesting(&bootstrap.Config{
XDSServer: &bootstrap.ServerConfig{
ServerURI: testXDSServer,
Creds: grpc.WithTransportCredentials(insecure.NewCredentials()),
@ -421,7 +417,6 @@ func (s) TestEDSConfigDump(t *testing.T) {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
updateHandler := client.(pubsub.UpdateHandler)
// Expected unknown.
if err := compareDump(client.DumpEDS, map[string]xdsresource.UpdateWithMD{}); err != nil {
@ -448,6 +443,7 @@ func (s) TestEDSConfigDump(t *testing.T) {
Raw: r,
}
}
updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "")
updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion})
// Expect ACK.

View File

@ -17,7 +17,10 @@
package xdsclient
import "google.golang.org/grpc/xds/internal/xdsclient/load"
import (
"google.golang.org/grpc/xds/internal/xdsclient/load"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
// ReportLoad starts an load reporting stream to the given server. If the server
// is not an empty string, and is different from the management server, a new
@ -29,5 +32,16 @@ import "google.golang.org/grpc/xds/internal/xdsclient/load"
// It returns a Store for the user to report loads, a function to cancel the
// load reporting stream.
func (c *clientImpl) ReportLoad(server string) (*load.Store, func()) {
return c.controller.ReportLoad(server)
// TODO: load reporting with federation also needs find the authority for
// this server first, then reports load to it. Currently always report to
// the default authority. This is needed to avoid a nil pointer panic.
a, unref, err := c.findAuthority(xdsresource.ParseName(""))
if err != nil {
return nil, func() {}
}
store, cancelF := a.reportLoad(server)
return store, func() {
cancelF()
unref()
}
}

View File

@ -65,13 +65,15 @@ func (s) TestLRSClient(t *testing.T) {
defer xdsC.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
if u, err := fs.NewConnChan.Receive(ctx); err != nil {
t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err)
}
// Report to the same address should not create new ClientConn.
store1, lrsCancel1 := xdsC.ReportLoad(fs.Address)
defer lrsCancel1()
if u, err := fs.NewConnChan.Receive(ctx); err != nil {
t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err)
}
sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
defer sCancel()
if u, err := fs.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded {

View File

@ -28,7 +28,10 @@ import (
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
)
const defaultWatchExpiryTimeout = 15 * time.Second
const (
defaultWatchExpiryTimeout = 15 * time.Second
defaultIdleAuthorityDeleteTimeout = 5 * time.Minute
)
// This is the Client returned by New(). It contains one client implementation,
// and maintains the refcount.
@ -82,7 +85,7 @@ func newRefCounted() (*clientRefCounted, error) {
if err != nil {
return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err)
}
c, err := newWithConfig(config, defaultWatchExpiryTimeout)
c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
return nil, err
}
@ -114,7 +117,7 @@ func NewWithConfig(config *bootstrap.Config) (XDSClient, error) {
}
// Create the new client implementation.
c, err := newWithConfig(config, defaultWatchExpiryTimeout)
c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
return nil, err
}
@ -144,7 +147,7 @@ func (c *clientRefCounted) Close() {
// Note that this function doesn't set the singleton, so that the testing states
// don't leak.
func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) {
cl, err := newWithConfig(config, watchExpiryTimeout)
cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
return nil, err
}
@ -182,7 +185,7 @@ func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) {
return nil, fmt.Errorf("xds: error with bootstrap config: %v", err)
}
cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout)
cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
return nil, err
}

View File

@ -27,31 +27,35 @@ import (
// calling cancel()), there's a small window where the callback can be called
// after the watcher is canceled. The caller needs to handle this case.
func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) {
first, cancelF := c.pubsub.WatchListener(serviceName, cb)
if first {
c.controller.AddWatch(xdsresource.ListenerResource, serviceName)
n := xdsresource.ParseName(serviceName)
a, unref, err := c.findAuthority(n)
if err != nil {
cb(xdsresource.ListenerUpdate{}, err)
return func() {}
}
cancelF := a.watchListener(n.String(), cb)
return func() {
if cancelF() {
c.controller.RemoveWatch(xdsresource.ListenerResource, serviceName)
}
cancelF()
unref()
}
}
// WatchRouteConfig starts a listener watcher for the service..
// WatchRouteConfig starts a listener watcher for the service.
//
// Note that during race (e.g. an xDS response is received while the user is
// calling cancel()), there's a small window where the callback can be called
// after the watcher is canceled. The caller needs to handle this case.
func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) {
first, cancelF := c.pubsub.WatchRouteConfig(routeName, cb)
if first {
c.controller.AddWatch(xdsresource.RouteConfigResource, routeName)
n := xdsresource.ParseName(routeName)
a, unref, err := c.findAuthority(n)
if err != nil {
cb(xdsresource.RouteConfigUpdate{}, err)
return func() {}
}
cancelF := a.watchRouteConfig(n.String(), cb)
return func() {
if cancelF() {
c.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName)
}
cancelF()
unref()
}
}
@ -65,14 +69,16 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.Rout
// calling cancel()), there's a small window where the callback can be called
// after the watcher is canceled. The caller needs to handle this case.
func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) {
first, cancelF := c.pubsub.WatchCluster(clusterName, cb)
if first {
c.controller.AddWatch(xdsresource.ClusterResource, clusterName)
n := xdsresource.ParseName(clusterName)
a, unref, err := c.findAuthority(n)
if err != nil {
cb(xdsresource.ClusterUpdate{}, err)
return func() {}
}
cancelF := a.watchCluster(n.String(), cb)
return func() {
if cancelF() {
c.controller.RemoveWatch(xdsresource.ClusterResource, clusterName)
}
cancelF()
unref()
}
}
@ -85,13 +91,15 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.Cluste
// calling cancel()), there's a small window where the callback can be called
// after the watcher is canceled. The caller needs to handle this case.
func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) {
first, cancelF := c.pubsub.WatchEndpoints(clusterName, cb)
if first {
c.controller.AddWatch(xdsresource.EndpointsResource, clusterName)
n := xdsresource.ParseName(clusterName)
a, unref, err := c.findAuthority(n)
if err != nil {
cb(xdsresource.EndpointsUpdate{}, err)
return func() {}
}
cancelF := a.watchEndpoints(n.String(), cb)
return func() {
if cancelF() {
c.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName)
}
cancelF()
unref()
}
}

View File

@ -25,9 +25,6 @@ import (
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/grpc/internal/testutils"
)
// TestClusterWatch covers the cases:
@ -35,285 +32,38 @@ import (
// - an update for another resource name
// - an update is received after cancel()
func (s) TestClusterWatch(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Push an update, with an extra resource for a different resource name.
// Specify a non-nil raw proto in the original resource to ensure that the
// new update is not considered equal to the old one.
newUpdate := wantUpdate
newUpdate.Raw = &anypb.Any{}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName: {Update: newUpdate},
"randomName": {},
}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, newUpdate, nil); err != nil {
t.Fatal(err)
}
// Cancel watch, and send update again.
cancelWatch()
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatch(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
}
// TestClusterTwoWatchSameResourceName covers the case where an update is received
// after two watch() for the same resource name.
func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
var clusterUpdateChs []*testutils.Channel
var cancelLastWatch func()
const count = 2
for i := 0; i < count; i++ {
clusterUpdateCh := testutils.NewChannel()
clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh)
cancelLastWatch = client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil {
t.Fatal(err)
}
}
// Cancel the last watch, and send update again. None of the watchers should
// be notified because one has been cancelled, and the other is receiving
// the same update.
cancelLastWatch()
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
func() {
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := clusterUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ClusterUpdate: %v, %v, want channel recv timeout", u, err)
}
}()
}
// Push a new update and make sure the uncancelled watcher is invoked.
// Specify a non-nil raw proto to ensure that the new update is not
// considered equal to the old one.
newUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateChs[0], newUpdate, nil); err != nil {
t.Fatal(err)
}
testTwoWatchSameResourceName(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
}
// TestClusterThreeWatchDifferentResourceName covers the case where an update is
// received after three watch() for different resource names.
func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
// Two watches for the same name.
var clusterUpdateChs []*testutils.Channel
const count = 2
for i := 0; i < count; i++ {
clusterUpdateCh := testutils.NewChannel()
clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh)
client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
// Third watch for a different name.
clusterUpdateCh2 := testutils.NewChannel()
client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}
wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName + "1": {Update: wantUpdate1},
testCDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1, nil); err != nil {
t.Fatal(err)
}
}
if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
testThreeWatchDifferentResourceName(t, xdsresource.ClusterResource,
xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1",
xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2",
)
}
// TestClusterWatchAfterCache covers the case where watch is called after the update
// is in cache.
func (s) TestClusterWatchAfterCache(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh := testutils.NewChannel()
client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName: {Update: wantUpdate},
}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Another watch for the resource in cache.
clusterUpdateCh2 := testutils.NewChannel()
client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if n, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(sCtx); err != context.DeadlineExceeded {
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
}
// New watch should receives the update.
if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Old watch should see nothing.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatchAfterCache(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
}
// TestClusterWatchExpiryTimer tests the case where the client does not receive
// an CDS response for the request that it sends out. We want the watch callback
// to be invoked with an error once the watchExpiryTimer fires.
func (s) TestClusterWatchExpiryTimer(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, true))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh := testutils.NewChannel()
client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, _ := testClientSetup(t, true)
clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
u, err := clusterUpdateCh.Receive(ctx)
if err != nil {
@ -329,33 +79,14 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) {
// an CDS response for the request that it sends out. We want no error even
// after expiry timeout.
func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, true))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh := testutils.NewChannel()
client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, true)
clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName)
wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName: {Update: wantUpdate},
}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
@ -377,117 +108,23 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
// - one more update without the removed resource
// - the callback (above) shouldn't receive any update
func (s) TestClusterResourceRemoved(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh1 := testutils.NewChannel()
client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
// Another watch for a different name.
clusterUpdateCh2 := testutils.NewChannel()
client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}
wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName + "1": {Update: wantUpdate1},
testCDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil {
t.Fatal(err)
}
if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
// Send another update to remove resource 1.
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
// Watcher 1 should get an error.
if u, err := clusterUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound {
t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
}
// Watcher 2 should not see an update since the resource has not changed.
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := clusterUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u)
}
// Send another update with resource 2 modified. Specify a non-nil raw proto
// to ensure that the new update is not considered equal to the old one.
wantUpdate2 = xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}}
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
// Watcher 1 should not see an update.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := clusterUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u)
}
// Watcher 2 should get the update.
if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
testResourceRemoved(t, xdsresource.ClusterResource,
xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1",
xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2",
)
}
// TestClusterWatchNACKError covers the case that an update is NACK'ed, and the
// watcher should also receive the error.
func (s) TestClusterWatchNACKError(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
clusterUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
defer cancelWatch()
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, false)
clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName)
wantError := fmt.Errorf("testing error")
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {
updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {
Err: wantError,
}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil {
@ -500,57 +137,5 @@ func (s) TestClusterWatchNACKError(t *testing.T) {
// But the watchers with valid resources should receive the update, those with
// invalida resources should receive an error.
func (s) TestClusterWatchPartialValid(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const badResourceName = "bad-resource"
updateChs := make(map[string]*testutils.Channel)
for _, name := range []string{testCDSName, badResourceName} {
clusterUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchCluster(name, func(update xdsresource.ClusterUpdate, err error) {
clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
defer func() {
cancelWatch()
if _, err := apiClient.removeWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want watch to be canceled, got err: %v", err)
}
}()
if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
updateChs[name] = clusterUpdateCh
}
wantError := fmt.Errorf("testing error")
wantError2 := fmt.Errorf("individual error")
client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
testCDSName: {Update: xdsresource.ClusterUpdate{ClusterName: testEDSName}},
badResourceName: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
// The valid resource should be sent to the watcher.
if err := verifyClusterUpdate(ctx, updateChs[testCDSName], xdsresource.ClusterUpdate{ClusterName: testEDSName}, nil); err != nil {
t.Fatal(err)
}
// The failed watcher should receive an error.
if err := verifyClusterUpdate(ctx, updateChs[badResourceName], xdsresource.ClusterUpdate{}, wantError2); err != nil {
t.Fatal(err)
}
testWatchPartialValid(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
}

View File

@ -25,9 +25,7 @@ import (
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/grpc/internal/testutils"
"google.golang.org/grpc/xds/internal"
)
@ -53,285 +51,38 @@ var (
// - an update for another resource name (which doesn't trigger callback)
// - an update is received after cancel()
func (s) TestEndpointsWatch(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
endpointsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Push an update, with an extra resource for a different resource name.
// Specify a non-nil raw proto in the original resource to ensure that the
// new update is not considered equal to the old one.
newUpdate := wantUpdate
newUpdate.Raw = &anypb.Any{}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{
testCDSName: {Update: newUpdate},
"randomName": {},
}, xdsresource.UpdateMetadata{})
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, newUpdate, nil); err != nil {
t.Fatal(err)
}
// Cancel watch, and send update again.
cancelWatch()
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatch(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName)
}
// TestEndpointsTwoWatchSameResourceName covers the case where an update is received
// after two watch() for the same resource name.
func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const count = 2
var (
endpointsUpdateChs []*testutils.Channel
cancelLastWatch func()
)
for i := 0; i < count; i++ {
endpointsUpdateCh := testutils.NewChannel()
endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh)
cancelLastWatch = client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil {
t.Fatal(err)
}
}
// Cancel the last watch, and send update again. None of the watchers should
// be notified because one has been cancelled, and the other is receiving
// the same update.
cancelLastWatch()
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
func() {
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := endpointsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err)
}
}()
}
// Push a new update and make sure the uncancelled watcher is invoked.
// Specify a non-nil raw proto to ensure that the new update is not
// considered equal to the old one.
newUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}, Raw: &anypb.Any{}}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[0], newUpdate, nil); err != nil {
t.Fatal(err)
}
testTwoWatchSameResourceName(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName)
}
// TestEndpointsThreeWatchDifferentResourceName covers the case where an update is
// received after three watch() for different resource names.
func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
// Two watches for the same name.
var endpointsUpdateChs []*testutils.Channel
const count = 2
for i := 0; i < count; i++ {
endpointsUpdateCh := testutils.NewChannel()
endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh)
client.WatchEndpoints(testCDSName+"1", func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
// Third watch for a different name.
endpointsUpdateCh2 := testutils.NewChannel()
client.WatchEndpoints(testCDSName+"2", func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}
wantUpdate2 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{
testCDSName + "1": {Update: wantUpdate1},
testCDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1, nil); err != nil {
t.Fatal(err)
}
}
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
testThreeWatchDifferentResourceName(t, xdsresource.EndpointsResource,
xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName+"1",
xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}}, testCDSName+"2",
)
}
// TestEndpointsWatchAfterCache covers the case where watch is called after the update
// is in cache.
func (s) TestEndpointsWatchAfterCache(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
endpointsUpdateCh := testutils.NewChannel()
client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Another watch for the resource in cache.
endpointsUpdateCh2 := testutils.NewChannel()
client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if n, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(sCtx); err != context.DeadlineExceeded {
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
}
// New watch should receives the update.
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Old watch should see nothing.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatchAfterCache(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName)
}
// TestEndpointsWatchExpiryTimer tests the case where the client does not receive
// an CDS response for the request that it sends out. We want the watch callback
// to be invoked with an error once the watchExpiryTimer fires.
func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, true))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
endpointsUpdateCh := testutils.NewChannel()
client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, _ := testClientSetup(t, true)
endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName)
u, err := endpointsUpdateCh.Receive(ctx)
if err != nil {
@ -346,34 +97,14 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
// TestEndpointsWatchNACKError covers the case that an update is NACK'ed, and
// the watcher should also receive the error.
func (s) TestEndpointsWatchNACKError(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
endpointsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
defer cancelWatch()
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, false)
endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.EndpointsResource, testCDSName)
wantError := fmt.Errorf("testing error")
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, xdsresource.EndpointsUpdate{}, wantError); err != nil {
t.Fatal(err)
}
@ -384,57 +115,5 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) {
// But the watchers with valid resources should receive the update, those with
// invalida resources should receive an error.
func (s) TestEndpointsWatchPartialValid(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const badResourceName = "bad-resource"
updateChs := make(map[string]*testutils.Channel)
for _, name := range []string{testCDSName, badResourceName} {
endpointsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchEndpoints(name, func(update xdsresource.EndpointsUpdate, err error) {
endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
defer func() {
cancelWatch()
if _, err := apiClient.removeWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want watch to be canceled, got err: %v", err)
}
}()
if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
updateChs[name] = endpointsUpdateCh
}
wantError := fmt.Errorf("testing error")
wantError2 := fmt.Errorf("individual error")
client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{
testCDSName: {Update: xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}},
badResourceName: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
// The valid resource should be sent to the watcher.
if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, nil); err != nil {
t.Fatal(err)
}
// The failed watcher should receive an error.
if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], xdsresource.EndpointsUpdate{}, wantError2); err != nil {
t.Fatal(err)
}
testWatchPartialValid(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName)
}

View File

@ -0,0 +1,99 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xdsclient
import (
"context"
"testing"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
func overrideFedEnvVar(t *testing.T) {
oldFed := envconfig.XDSFederation
envconfig.XDSFederation = true
t.Cleanup(func() { envconfig.XDSFederation = oldFed })
}
func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource.ResourceType, update interface{}) {
overrideFedEnvVar(t)
var (
// Two resource names only differ in context parameter __order__.
resourceName1 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?a=1&b=2"
resourceName2 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?b=2&a=1"
)
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, resourceName1)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName1)
newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
// Start a watch on the second resource name.
updateCh2, _ := newWatchF(client, resourceName2)
// Send an update on the first resoruce, both watchers should be updated.
newUpdateF(updateHandler, map[string]interface{}{resourceName1: update})
verifyUpdateF(ctx, t, updateCh, update, nil)
verifyUpdateF(ctx, t, updateCh2, update, nil)
}
// TestLDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
// - Two watches with the same query string, but in different order. The two
// watches should watch the same resource.
// - The response has the same query string, but in different order. The watch
// should still be notified.
func (s) TestLDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName})
}
// TestRDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
// - Two watches with the same query string, but in different order. The two
// watches should watch the same resource.
// - The response has the same query string, but in different order. The watch
// should still be notified.
func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
},
},
})
}
// TestClusterFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
// - Two watches with the same query string, but in different order. The two
// watches should watch the same resource.
// - The response has the same query string, but in different order. The watch
// should still be notified.
func (s) TestClusterFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName})
}
// TestEndpointsFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name
// - Two watches with the same query string, but in different order. The two
// watches should watch the same resource.
// - The response has the same query string, but in different order. The watch
// should still be notified.
func (s) TestEndpointsFedTwoWatchDifferentContextParameterOrder(t *testing.T) {
testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}})
}

View File

@ -35,256 +35,28 @@ import (
// - an update for another resource name
// - an update is received after cancel()
func (s) TestLDSWatch(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
ldsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Push an update, with an extra resource for a different resource name.
// Specify a non-nil raw proto in the original resource to ensure that the
// new update is not considered equal to the old one.
newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{
testLDSName: {Update: newUpdate},
"randomName": {},
}, xdsresource.UpdateMetadata{})
if err := verifyListenerUpdate(ctx, ldsUpdateCh, newUpdate, nil); err != nil {
t.Fatal(err)
}
// Cancel watch, and send update again.
cancelWatch()
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Fatalf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatch(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName)
}
// TestLDSTwoWatchSameResourceName covers the case where an update is received
// after two watch() for the same resource name.
func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const count = 2
var (
ldsUpdateChs []*testutils.Channel
cancelLastWatch func()
)
for i := 0; i < count; i++ {
ldsUpdateCh := testutils.NewChannel()
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
cancelLastWatch = client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil {
t.Fatal(err)
}
}
// Cancel the last watch, and send update again. None of the watchers should
// be notified because one has been cancelled, and the other is receiving
// the same update.
cancelLastWatch()
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
func() {
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := ldsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
}
}()
}
// Push a new update and make sure the uncancelled watcher is invoked.
// Specify a non-nil raw proto to ensure that the new update is not
// considered equal to the old one.
newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyListenerUpdate(ctx, ldsUpdateChs[0], newUpdate, nil); err != nil {
t.Fatal(err)
}
testTwoWatchSameResourceName(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName)
}
// TestLDSThreeWatchDifferentResourceName covers the case where an update is
// received after three watch() for different resource names.
func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
var ldsUpdateChs []*testutils.Channel
const count = 2
// Two watches for the same name.
for i := 0; i < count; i++ {
ldsUpdateCh := testutils.NewChannel()
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
// Third watch for a different name.
ldsUpdateCh2 := testutils.NewChannel()
client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}
wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{
testLDSName + "1": {Update: wantUpdate1},
testLDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1, nil); err != nil {
t.Fatal(err)
}
}
if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
testThreeWatchDifferentResourceName(t, xdsresource.ListenerResource,
xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1",
xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2",
)
}
// TestLDSWatchAfterCache covers the case where watch is called after the update
// is in cache.
func (s) TestLDSWatchAfterCache(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
ldsUpdateCh := testutils.NewChannel()
client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Another watch for the resource in cache.
ldsUpdateCh2 := testutils.NewChannel()
client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if n, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(sCtx); err != context.DeadlineExceeded {
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
}
// New watch should receive the update.
if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Old watch should see nothing.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err)
}
testWatchAfterCache(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName)
}
// TestLDSResourceRemoved covers the cases:
@ -294,116 +66,23 @@ func (s) TestLDSWatchAfterCache(t *testing.T) {
// - one more update without the removed resource
// - the callback (above) shouldn't receive any update
func (s) TestLDSResourceRemoved(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
ldsUpdateCh1 := testutils.NewChannel()
client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
// Another watch for a different name.
ldsUpdateCh2 := testutils.NewChannel()
client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "1"}
wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2"}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{
testLDSName + "1": {Update: wantUpdate1},
testLDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil {
t.Fatal(err)
}
if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
// Send another update to remove resource 1.
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
// Watcher 1 should get an error.
if u, err := ldsUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound {
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
}
// Watcher 2 should not see an update since the resource has not changed.
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := ldsUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u)
}
// Send another update with resource 2 modified. Specify a non-nil raw proto
// to ensure that the new update is not considered equal to the old one.
wantUpdate2 = xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}}
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{})
// Watcher 1 should not see an update.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := ldsUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u)
}
// Watcher 2 should get the update.
if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
testResourceRemoved(t, xdsresource.ListenerResource,
xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1",
xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2",
)
}
// TestListenerWatchNACKError covers the case that an update is NACK'ed, and the
// watcher should also receive the error.
func (s) TestListenerWatchNACKError(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
ldsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
defer cancelWatch()
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, false)
ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName)
wantError := fmt.Errorf("testing error")
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
if err := verifyListenerUpdate(ctx, ldsUpdateCh, xdsresource.ListenerUpdate{}, wantError); err != nil {
t.Fatal(err)
}
@ -414,88 +93,17 @@ func (s) TestListenerWatchNACKError(t *testing.T) {
// But the watchers with valid resources should receive the update, those with
// invalida resources should receive an error.
func (s) TestListenerWatchPartialValid(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const badResourceName = "bad-resource"
updateChs := make(map[string]*testutils.Channel)
for _, name := range []string{testLDSName, badResourceName} {
ldsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchListener(name, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
defer func() {
cancelWatch()
if _, err := apiClient.removeWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want watch to be canceled, got err: %v", err)
}
}()
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
updateChs[name] = ldsUpdateCh
}
wantError := fmt.Errorf("testing error")
wantError2 := fmt.Errorf("individual error")
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{
testLDSName: {Update: xdsresource.ListenerUpdate{RouteConfigName: testEDSName}},
badResourceName: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
// The valid resource should be sent to the watcher.
if err := verifyListenerUpdate(ctx, updateChs[testLDSName], xdsresource.ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil {
t.Fatal(err)
}
// The failed watcher should receive an error.
if err := verifyListenerUpdate(ctx, updateChs[badResourceName], xdsresource.ListenerUpdate{}, wantError2); err != nil {
t.Fatal(err)
}
testWatchPartialValid(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName)
}
// TestListenerWatch_RedundantUpdateSupression tests scenarios where an update
// with an unmodified resource is suppressed, and modified resource is not.
func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
ldsUpdateCh := testutils.NewChannel()
client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) {
ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, false)
ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName)
basicListener := testutils.MarshalAny(&v3listenerpb.Listener{
Name: testLDSName,
@ -582,7 +190,7 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) {
},
}
for _, test := range tests {
client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{})
updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{})
if test.wantCallback {
if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil {
t.Fatal(err)

View File

@ -23,11 +23,7 @@ import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/grpc/internal/testutils"
)
// TestRDSWatch covers the cases:
@ -35,324 +31,76 @@ import (
// - an update for another resource name (which doesn't trigger callback)
// - an update is received after cancel()
func (s) TestRDSWatch(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
rdsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.RouteConfigUpdate{
testWatch(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
},
},
}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Push an update, with an extra resource for a different resource name.
// Specify a non-nil raw proto in the original resource to ensure that the
// new update is not considered equal to the old one.
newUpdate := wantUpdate
newUpdate.Raw = &anypb.Any{}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{
testRDSName: {Update: newUpdate},
"randomName": {},
}, xdsresource.UpdateMetadata{})
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, newUpdate, nil); err != nil {
t.Fatal(err)
}
// Cancel watch, and send update again.
cancelWatch()
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
}
}, testRDSName)
}
// TestRDSTwoWatchSameResourceName covers the case where an update is received
// after two watch() for the same resource name.
func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const count = 2
var (
rdsUpdateChs []*testutils.Channel
cancelLastWatch func()
)
for i := 0; i < count; i++ {
rdsUpdateCh := testutils.NewChannel()
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
wantUpdate := xdsresource.RouteConfigUpdate{
testTwoWatchSameResourceName(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
},
},
}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil {
t.Fatal(err)
}
}
// Cancel the last watch, and send update again. None of the watchers should
// be notified because one has been cancelled, and the other is receiving
// the same update.
cancelLastWatch()
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
func() {
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := rdsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
}
}()
}
// Push a new update and make sure the uncancelled watcher is invoked.
// Specify a non-nil raw proto to ensure that the new update is not
// considered equal to the old one.
newUpdate := wantUpdate
newUpdate.Raw = &anypb.Any{}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[0], newUpdate, nil); err != nil {
t.Fatal(err)
}
}, testRDSName)
}
// TestRDSThreeWatchDifferentResourceName covers the case where an update is
// received after three watch() for different resource names.
func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
// Two watches for the same name.
var rdsUpdateChs []*testutils.Channel
const count = 2
for i := 0; i < count; i++ {
rdsUpdateCh := testutils.NewChannel()
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
client.WatchRouteConfig(testRDSName+"1", func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
if i == 0 {
// A new watch is registered on the underlying API client only for
// the first iteration because we are using the same resource name.
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
}
}
// Third watch for a different name.
rdsUpdateCh2 := testutils.NewChannel()
client.WatchRouteConfig(testRDSName+"2", func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate1 := xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}},
testThreeWatchDifferentResourceName(t, xdsresource.RouteConfigResource,
xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}},
},
},
},
}
wantUpdate2 := xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}},
}, testRDSName+"1",
xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}},
},
},
},
}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{
testRDSName + "1": {Update: wantUpdate1},
testRDSName + "2": {Update: wantUpdate2},
}, xdsresource.UpdateMetadata{})
for i := 0; i < count; i++ {
if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1, nil); err != nil {
t.Fatal(err)
}
}
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh2, wantUpdate2, nil); err != nil {
t.Fatal(err)
}
}, testRDSName+"2",
)
}
// TestRDSWatchAfterCache covers the case where watch is called after the update
// is in cache.
func (s) TestRDSWatchAfterCache(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
rdsUpdateCh := testutils.NewChannel()
client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
wantUpdate := xdsresource.RouteConfigUpdate{
testWatchAfterCache(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
},
},
}
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{})
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil {
t.Fatal(err)
}
// Another watch for the resource in cache.
rdsUpdateCh2 := testutils.NewChannel()
client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if n, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(sCtx); err != context.DeadlineExceeded {
t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err)
}
// New watch should receives the update.
if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate}, cmp.AllowUnexported(xdsresource.RouteConfigUpdateErrTuple{})) {
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
}
// Old watch should see nothing.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err)
}
}, testRDSName)
}
// TestRouteWatchNACKError covers the case that an update is NACK'ed, and the
// watcher should also receive the error.
func (s) TestRouteWatchNACKError(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
rdsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchRouteConfig(testCDSName, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
defer cancelWatch()
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
client, ctrlCh := testClientSetup(t, false)
rdsUpdateCh, _ := newWatch(t, client, xdsresource.RouteConfigResource, testRDSName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.RouteConfigResource, testRDSName)
wantError := fmt.Errorf("testing error")
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, xdsresource.RouteConfigUpdate{}, wantError); err != nil {
t.Fatal(err)
}
@ -363,63 +111,12 @@ func (s) TestRouteWatchNACKError(t *testing.T) {
// But the watchers with valid resources should receive the update, those with
// invalida resources should receive an error.
func (s) TestRouteWatchPartialValid(t *testing.T) {
apiClientCh, cleanup := overrideNewController()
defer cleanup()
client, err := newWithConfig(clientOpts(testXDSServer, false))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
c, err := apiClientCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
apiClient := c.(*testController)
const badResourceName = "bad-resource"
updateChs := make(map[string]*testutils.Channel)
for _, name := range []string{testRDSName, badResourceName} {
rdsUpdateCh := testutils.NewChannel()
cancelWatch := client.WatchRouteConfig(name, func(update xdsresource.RouteConfigUpdate, err error) {
rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
defer func() {
cancelWatch()
if _, err := apiClient.removeWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want watch to be canceled, got err: %v", err)
}
}()
if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
updateChs[name] = rdsUpdateCh
}
wantError := fmt.Errorf("testing error")
wantError2 := fmt.Errorf("individual error")
client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{
testRDSName: {Update: xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
}}}},
badResourceName: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
// The valid resource should be sent to the watcher.
if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
}}}, nil); err != nil {
t.Fatal(err)
}
// The failed watcher should receive an error.
if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], xdsresource.RouteConfigUpdate{}, wantError2); err != nil {
t.Fatal(err)
}
testWatchPartialValid(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{
VirtualHosts: []*xdsresource.VirtualHost{
{
Domains: []string{testLDSName},
Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}},
},
},
}, testRDSName)
}

View File

@ -0,0 +1,614 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xdsclient
import (
"context"
"fmt"
"testing"
"google.golang.org/grpc/internal/testutils"
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
"google.golang.org/protobuf/types/known/anypb"
)
// testClientSetup sets up the client and controller for the test. It returns a
// newly created client, and a channel where new controllers will be sent to.
func testClientSetup(t *testing.T, overrideWatchExpiryTimeout bool) (*clientImpl, *testutils.Channel) {
t.Helper()
ctrlCh := overrideNewController(t)
watchExpiryTimeout := defaultWatchExpiryTimeout
if overrideWatchExpiryTimeout {
watchExpiryTimeout = defaultTestWatchExpiryTimeout
}
client, err := newWithConfig(clientOpts(), watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
t.Cleanup(client.Close)
return client, ctrlCh
}
// newWatch starts a new watch on the client.
func newWatch(t *testing.T, client *clientImpl, typ xdsresource.ResourceType, resourceName string) (updateCh *testutils.Channel, cancelWatch func()) {
newWatchF, _, _ := typeToTestFuncs(typ)
updateCh, cancelWatch = newWatchF(client, resourceName)
t.Cleanup(cancelWatch)
if u, ok := updateCh.ReceiveOrFail(); ok {
t.Fatalf("received unexpected update immediately after watch: %+v", u)
}
return
}
// getControllerAndPubsub returns the controller and pubsub for the given
// type+resourceName from the client.
func getControllerAndPubsub(ctx context.Context, t *testing.T, client *clientImpl, ctrlCh *testutils.Channel, typ xdsresource.ResourceType, resourceName string) (*testController, pubsub.UpdateHandler) {
c, err := ctrlCh.Receive(ctx)
if err != nil {
t.Fatalf("timeout when waiting for API client to be created: %v", err)
}
ctrl := c.(*testController)
if _, err := ctrl.addWatches[typ].Receive(ctx); err != nil {
t.Fatalf("want new watch to start, got error %v", err)
}
updateHandler := findPubsubForTest(t, client, xdsresource.ParseName(resourceName).Authority)
return ctrl, updateHandler
}
// findPubsubForTest returns the pubsub for the given authority, to send updates
// to. If authority is "", the default is returned. If the authority is not
// found, the test will fail.
func findPubsubForTest(t *testing.T, c *clientImpl, authority string) pubsub.UpdateHandler {
t.Helper()
var config *bootstrap.ServerConfig
if authority == "" {
config = c.config.XDSServer
} else {
authConfig, ok := c.config.Authorities[authority]
if !ok {
t.Fatalf("failed to find authority %q", authority)
}
config = authConfig.XDSServer
}
a := c.authorities[config.String()]
if a == nil {
t.Fatalf("authority for %q is not created", authority)
}
return a.pubsub
}
var (
newLDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) {
updateCh := testutils.NewChannel()
cancelLastWatch := client.WatchListener(resourceName, func(update xdsresource.ListenerUpdate, err error) {
updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err})
})
return updateCh, cancelLastWatch
}
newLDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) {
wantUpdates := map[string]xdsresource.ListenerUpdateErrTuple{}
for n, u := range updates {
wantUpdate := u.(xdsresource.ListenerUpdate)
wantUpdates[n] = xdsresource.ListenerUpdateErrTuple{Update: wantUpdate}
}
updateHandler.NewListeners(wantUpdates, xdsresource.UpdateMetadata{})
}
verifyLDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) {
t.Helper()
wantUpdate := update.(xdsresource.ListenerUpdate)
if err := verifyListenerUpdate(ctx, updateCh, wantUpdate, err); err != nil {
t.Fatal(err)
}
}
newRDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) {
updateCh := testutils.NewChannel()
cancelLastWatch := client.WatchRouteConfig(resourceName, func(update xdsresource.RouteConfigUpdate, err error) {
updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err})
})
return updateCh, cancelLastWatch
}
newRDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) {
wantUpdates := map[string]xdsresource.RouteConfigUpdateErrTuple{}
for n, u := range updates {
wantUpdate := u.(xdsresource.RouteConfigUpdate)
wantUpdates[n] = xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate}
}
updateHandler.NewRouteConfigs(wantUpdates, xdsresource.UpdateMetadata{})
}
verifyRDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) {
t.Helper()
wantUpdate := update.(xdsresource.RouteConfigUpdate)
if err := verifyRouteConfigUpdate(ctx, updateCh, wantUpdate, err); err != nil {
t.Fatal(err)
}
}
newCDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) {
updateCh := testutils.NewChannel()
cancelLastWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) {
updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err})
})
return updateCh, cancelLastWatch
}
newCDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) {
wantUpdates := map[string]xdsresource.ClusterUpdateErrTuple{}
for n, u := range updates {
wantUpdate := u.(xdsresource.ClusterUpdate)
wantUpdates[n] = xdsresource.ClusterUpdateErrTuple{Update: wantUpdate}
}
updateHandler.NewClusters(wantUpdates, xdsresource.UpdateMetadata{})
}
verifyCDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) {
t.Helper()
wantUpdate := update.(xdsresource.ClusterUpdate)
if err := verifyClusterUpdate(ctx, updateCh, wantUpdate, err); err != nil {
t.Fatal(err)
}
}
newEDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) {
updateCh := testutils.NewChannel()
cancelLastWatch := client.WatchEndpoints(resourceName, func(update xdsresource.EndpointsUpdate, err error) {
updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err})
})
return updateCh, cancelLastWatch
}
newEDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) {
wantUpdates := map[string]xdsresource.EndpointsUpdateErrTuple{}
for n, u := range updates {
wantUpdate := u.(xdsresource.EndpointsUpdate)
wantUpdates[n] = xdsresource.EndpointsUpdateErrTuple{Update: wantUpdate}
}
updateHandler.NewEndpoints(wantUpdates, xdsresource.UpdateMetadata{})
}
verifyEDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) {
t.Helper()
wantUpdate := update.(xdsresource.EndpointsUpdate)
if err := verifyEndpointsUpdate(ctx, updateCh, wantUpdate, err); err != nil {
t.Fatal(err)
}
}
)
func typeToTestFuncs(typ xdsresource.ResourceType) (
newWatchF func(client *clientImpl, resourceName string) (*testutils.Channel, func()),
newUpdateF func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}),
verifyUpdateF func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error),
) {
switch typ {
case xdsresource.ListenerResource:
newWatchF = newLDSWatchF
newUpdateF = newLDSUpdateF
verifyUpdateF = verifyLDSUpdateF
case xdsresource.RouteConfigResource:
newWatchF = newRDSWatchF
newUpdateF = newRDSUpdateF
verifyUpdateF = verifyRDSUpdateF
case xdsresource.ClusterResource:
newWatchF = newCDSWatchF
newUpdateF = newCDSUpdateF
verifyUpdateF = verifyCDSUpdateF
case xdsresource.EndpointsResource:
newWatchF = newEDSWatchF
newUpdateF = newEDSUpdateF
verifyUpdateF = verifyEDSUpdateF
}
return
}
func buildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams map[string]string) string {
var typS string
switch typ {
case xdsresource.ListenerResource:
typS = version.V3ListenerType
case xdsresource.RouteConfigResource:
typS = version.V3RouteConfigType
case xdsresource.ClusterResource:
typS = version.V3ClusterType
case xdsresource.EndpointsResource:
typS = version.V3EndpointsType
}
return (&xdsresource.Name{
Scheme: "xdstp",
Authority: auth,
Type: typS,
ID: id,
ContextParams: ctxParams,
}).String()
}
// TestClusterWatch covers the cases:
// - an update is received after a watch()
// - an update for another resource name
// - an update is received after cancel()
func testWatch(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) {
overrideFedEnvVar(t)
for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} {
t.Run(rName, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, cancelWatch := newWatch(t, client, typ, rName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName)
_, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
// Send an update, and check the result.
newUpdateF(updateHandler, map[string]interface{}{rName: update})
verifyUpdateF(ctx, t, updateCh, update, nil)
// Push an update, with an extra resource for a different resource name.
// Specify a non-nil raw proto in the original resource to ensure that the
// new update is not considered equal to the old one.
var newUpdate interface{}
switch typ {
case xdsresource.ListenerResource:
newU := update.(xdsresource.ListenerUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.RouteConfigResource:
newU := update.(xdsresource.RouteConfigUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.ClusterResource:
newU := update.(xdsresource.ClusterUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.EndpointsResource:
newU := update.(xdsresource.EndpointsUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
}
newUpdateF(updateHandler, map[string]interface{}{rName: newUpdate})
verifyUpdateF(ctx, t, updateCh, newUpdate, nil)
// Cancel watch, and send update again.
cancelWatch()
newUpdateF(updateHandler, map[string]interface{}{rName: update})
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err)
}
})
}
}
// testClusterTwoWatchSameResourceName covers the case where an update is
// received after two watch() for the same resource name.
func testTwoWatchSameResourceName(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) {
overrideFedEnvVar(t)
for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} {
t.Run(rName, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, resourceName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName)
newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
updateChs := []*testutils.Channel{updateCh}
var cancelLastWatch func()
const count = 1
for i := 0; i < count; i++ {
var updateCh *testutils.Channel
updateCh, cancelLastWatch = newWatchF(client, resourceName)
updateChs = append(updateChs, updateCh)
}
newUpdateF(updateHandler, map[string]interface{}{resourceName: update})
for i := 0; i < count+1; i++ {
verifyUpdateF(ctx, t, updateChs[i], update, nil)
}
// Cancel the last watch, and send update again. None of the watchers should
// be notified because one has been cancelled, and the other is receiving
// the same update.
cancelLastWatch()
newUpdateF(updateHandler, map[string]interface{}{resourceName: update})
for i := 0; i < count+1; i++ {
func() {
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := updateChs[i].Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err)
}
}()
}
// Push a new update and make sure the uncancelled watcher is invoked.
// Specify a non-nil raw proto to ensure that the new update is not
// considered equal to the old one.
var newUpdate interface{}
switch typ {
case xdsresource.ListenerResource:
newU := update.(xdsresource.ListenerUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.RouteConfigResource:
newU := update.(xdsresource.RouteConfigUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.ClusterResource:
newU := update.(xdsresource.ClusterUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.EndpointsResource:
newU := update.(xdsresource.EndpointsUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
}
newUpdateF(updateHandler, map[string]interface{}{resourceName: newUpdate})
verifyUpdateF(ctx, t, updateCh, newUpdate, nil)
})
}
}
// testThreeWatchDifferentResourceName starts two watches for name1, and one
// watch for name2. This test verifies that two watches for name1 receive the
// same update, and name2 watch receives a different update.
func testThreeWatchDifferentResourceName(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) {
overrideFedEnvVar(t)
for _, rName := range [][]string{
{resourceName1, resourceName2},
{buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)},
} {
t.Run(rName[0], func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, rName[0])
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0])
newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
// Two watches for the same name.
updateChs := []*testutils.Channel{updateCh}
const count = 1
for i := 0; i < count; i++ {
var updateCh *testutils.Channel
updateCh, _ = newWatchF(client, rName[0])
updateChs = append(updateChs, updateCh)
}
// Third watch for a different name.
updateCh2, _ := newWatchF(client, rName[1])
newUpdateF(updateHandler, map[string]interface{}{
rName[0]: update1,
rName[1]: update2,
})
// The first several watches for the same resource should all
// receive the first update.
for i := 0; i < count+1; i++ {
verifyUpdateF(ctx, t, updateChs[i], update1, nil)
}
// The last watch for the different resource should receive the
// second update.
verifyUpdateF(ctx, t, updateCh2, update2, nil)
})
}
}
// testWatchAfterCache covers the case where watch is called after the update is
// in cache.
func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) {
overrideFedEnvVar(t)
for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} {
t.Run(rName, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, rName)
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName)
newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
newUpdateF(updateHandler, map[string]interface{}{rName: update})
verifyUpdateF(ctx, t, updateCh, update, nil)
// Another watch for the resource in cache.
updateCh2, _ := newWatchF(client, rName)
// New watch should receive the update.
verifyUpdateF(ctx, t, updateCh2, update, nil)
// Old watch should see nothing.
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err)
}
})
}
}
// testResourceRemoved covers the cases:
// - an update is received after a watch()
// - another update is received, with one resource removed
// - this should trigger callback with resource removed error
// - one more update without the removed resource
// - the callback (above) shouldn't receive any update
func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) {
overrideFedEnvVar(t)
for _, rName := range [][]string{
{resourceName1, resourceName2},
{buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)},
} {
t.Run(rName[0], func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, rName[0])
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0])
newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ)
// Another watch for a different name.
updateCh2, _ := newWatchF(client, rName[1])
newUpdateF(updateHandler, map[string]interface{}{
rName[0]: update1,
rName[1]: update2,
})
verifyUpdateF(ctx, t, updateCh, update1, nil)
verifyUpdateF(ctx, t, updateCh2, update2, nil)
// Send another update to remove resource 1.
newUpdateF(updateHandler, map[string]interface{}{
rName[1]: update2,
})
// Watcher 1 should get an error.
if u, err := updateCh.Receive(ctx); err != nil {
t.Errorf("failed to receive update: %v", err)
} else {
var gotErr error
switch typ {
case xdsresource.ListenerResource:
newU := u.(xdsresource.ListenerUpdateErrTuple)
gotErr = newU.Err
case xdsresource.RouteConfigResource:
newU := u.(xdsresource.RouteConfigUpdateErrTuple)
gotErr = newU.Err
case xdsresource.ClusterResource:
newU := u.(xdsresource.ClusterUpdateErrTuple)
gotErr = newU.Err
case xdsresource.EndpointsResource:
newU := u.(xdsresource.EndpointsUpdateErrTuple)
gotErr = newU.Err
}
if xdsresource.ErrType(gotErr) != xdsresource.ErrorTypeResourceNotFound {
t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
}
}
// Watcher 2 should not see an update since the resource has not changed.
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := updateCh2.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u)
}
// Send another update with resource 2 modified. Specify a non-nil raw proto
// to ensure that the new update is not considered equal to the old one.
var newUpdate interface{}
switch typ {
case xdsresource.ListenerResource:
newU := update2.(xdsresource.ListenerUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.RouteConfigResource:
newU := update2.(xdsresource.RouteConfigUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.ClusterResource:
newU := update2.(xdsresource.ClusterUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
case xdsresource.EndpointsResource:
newU := update2.(xdsresource.EndpointsUpdate)
newU.Raw = &anypb.Any{}
newUpdate = newU
}
newUpdateF(updateHandler, map[string]interface{}{
rName[1]: newUpdate,
})
// Watcher 1 should not see an update.
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
defer sCancel()
if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u)
}
// Watcher 2 should get the update.
verifyUpdateF(ctx, t, updateCh2, newUpdate, nil)
})
}
}
// testWatchPartialValid covers the case that a response contains both
// valid and invalid resources. This response will be NACK'ed by the xdsclient.
// But the watchers with valid resources should receive the update, those with
// invalid resources should receive an error.
func testWatchPartialValid(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) {
overrideFedEnvVar(t)
const badResourceName = "bad-resource"
for _, rName := range [][]string{
{resourceName, badResourceName},
{buildResourceName(typ, testAuthority, resourceName, nil), buildResourceName(typ, testAuthority, badResourceName, nil)},
} {
t.Run(rName[0], func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
client, ctrlCh := testClientSetup(t, false)
updateCh, _ := newWatch(t, client, typ, rName[0])
_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0])
newWatchF, _, verifyUpdateF := typeToTestFuncs(typ)
updateChs := map[string]*testutils.Channel{
rName[0]: updateCh,
}
for _, name := range []string{rName[1]} {
updateChT, _ := newWatchF(client, rName[1])
updateChs[name] = updateChT
}
wantError := fmt.Errorf("testing error")
wantError2 := fmt.Errorf("individual error")
switch typ {
case xdsresource.ListenerResource:
updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{
rName[0]: {Update: update.(xdsresource.ListenerUpdate)},
rName[1]: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
case xdsresource.RouteConfigResource:
updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{
rName[0]: {Update: update.(xdsresource.RouteConfigUpdate)},
rName[1]: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
case xdsresource.ClusterResource:
updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
rName[0]: {Update: update.(xdsresource.ClusterUpdate)},
rName[1]: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
case xdsresource.EndpointsResource:
updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{
rName[0]: {Update: update.(xdsresource.EndpointsUpdate)},
rName[1]: {Err: wantError2},
}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
}
// The valid resource should be sent to the watcher.
verifyUpdateF(ctx, t, updateChs[rName[0]], update, nil)
// The failed watcher should receive an error.
verifyUpdateF(ctx, t, updateChs[rName[1]], update, wantError2)
})
}
}

View File

@ -32,5 +32,3 @@ type s struct {
func Test(t *testing.T) {
grpctest.RunSubTests(t, s{})
}
const testXDSServer = "xds-server"

View File

@ -64,6 +64,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat
switch ret2 := ret.(type) {
case map[string]ListenerUpdateErrTuple:
name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger)
name = ParseName(name).String()
if err == nil {
ret2[name] = ListenerUpdateErrTuple{Update: update}
continue
@ -78,6 +79,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat
ret2[name] = ListenerUpdateErrTuple{Err: err}
case map[string]RouteConfigUpdateErrTuple:
name, update, err := unmarshalRouteConfigResource(r, opts.Logger)
name = ParseName(name).String()
if err == nil {
ret2[name] = RouteConfigUpdateErrTuple{Update: update}
continue
@ -92,6 +94,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat
ret2[name] = RouteConfigUpdateErrTuple{Err: err}
case map[string]ClusterUpdateErrTuple:
name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger)
name = ParseName(name).String()
if err == nil {
ret2[name] = ClusterUpdateErrTuple{Update: update}
continue
@ -106,6 +109,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat
ret2[name] = ClusterUpdateErrTuple{Err: err}
case map[string]EndpointsUpdateErrTuple:
name, update, err := unmarshalEndpointsResource(r, opts.Logger)
name = ParseName(name).String()
if err == nil {
ret2[name] = EndpointsUpdateErrTuple{Update: update}
continue

View File

@ -35,17 +35,29 @@ const (
// Resource URLs. We need to be able to accept either version of the resource
// regardless of the version of the transport protocol in use.
const (
V2ListenerURL = "type.googleapis.com/envoy.api.v2.Listener"
V2RouteConfigURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration"
V2ClusterURL = "type.googleapis.com/envoy.api.v2.Cluster"
V2EndpointsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"
V2HTTPConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"
googleapiPrefix = "type.googleapis.com/"
V3ListenerURL = "type.googleapis.com/envoy.config.listener.v3.Listener"
V3RouteConfigURL = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"
V3ClusterURL = "type.googleapis.com/envoy.config.cluster.v3.Cluster"
V3EndpointsURL = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"
V3HTTPConnManagerURL = "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
V3UpstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"
V3DownstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"
V2ListenerType = "envoy.api.v2.Listener"
V2RouteConfigType = "envoy.api.v2.RouteConfiguration"
V2ClusterType = "envoy.api.v2.Cluster"
V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment"
V2ListenerURL = googleapiPrefix + V2ListenerType
V2RouteConfigURL = googleapiPrefix + V2RouteConfigType
V2ClusterURL = googleapiPrefix + V2ClusterType
V2EndpointsURL = googleapiPrefix + V2EndpointsType
V2HTTPConnManagerURL = googleapiPrefix + "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"
V3ListenerType = "envoy.config.listener.v3.Listener"
V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration"
V3ClusterType = "envoy.config.cluster.v3.Cluster"
V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment"
V3ListenerURL = googleapiPrefix + V3ListenerType
V3RouteConfigURL = googleapiPrefix + V3RouteConfigType
V3ClusterURL = googleapiPrefix + V3ClusterType
V3EndpointsURL = googleapiPrefix + V3EndpointsType
V3HTTPConnManagerURL = googleapiPrefix + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
V3UpstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"
V3DownstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"
)