mirror of https://github.com/grpc/grpc-go.git
xds: update xdsclient NACK to keep valid resources (#4743)
This commit is contained in:
parent
43e8fd4f69
commit
16cf65612e
|
@ -211,22 +211,28 @@ func TestCSDS(t *testing.T) {
|
|||
}
|
||||
|
||||
const nackResourceIdx = 0
|
||||
var (
|
||||
nackListeners = append([]*v3listenerpb.Listener{}, listeners...)
|
||||
nackRoutes = append([]*v3routepb.RouteConfiguration{}, routes...)
|
||||
nackClusters = append([]*v3clusterpb.Cluster{}, clusters...)
|
||||
nackEndpoints = append([]*v3endpointpb.ClusterLoadAssignment{}, endpoints...)
|
||||
)
|
||||
nackListeners[0] = &v3listenerpb.Listener{Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}} // 0 will be nacked. 1 will stay the same.
|
||||
nackRoutes[0] = &v3routepb.RouteConfiguration{
|
||||
Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}},
|
||||
}
|
||||
nackClusters[0] = &v3clusterpb.Cluster{
|
||||
Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC},
|
||||
}
|
||||
nackEndpoints[0] = &v3endpointpb.ClusterLoadAssignment{
|
||||
ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}},
|
||||
}
|
||||
if err := mgmServer.Update(ctx, e2e.UpdateOptions{
|
||||
NodeID: nodeID,
|
||||
Listeners: []*v3listenerpb.Listener{
|
||||
{Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}}, // 0 will be nacked. 1 will stay the same.
|
||||
},
|
||||
Routes: []*v3routepb.RouteConfiguration{
|
||||
{Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{
|
||||
Routes: []*v3routepb.Route{{}},
|
||||
}}},
|
||||
},
|
||||
Clusters: []*v3clusterpb.Cluster{
|
||||
{Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}},
|
||||
},
|
||||
Endpoints: []*v3endpointpb.ClusterLoadAssignment{
|
||||
{ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}}},
|
||||
},
|
||||
NodeID: nodeID,
|
||||
Listeners: nackListeners,
|
||||
Routes: nackRoutes,
|
||||
Clusters: nackClusters,
|
||||
Endpoints: nackEndpoints,
|
||||
SkipValidation: true,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -490,7 +496,6 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco
|
|||
ackVersion = "1"
|
||||
nackVersion = "2"
|
||||
)
|
||||
|
||||
if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil {
|
||||
return fmt.Errorf("failed to send: %v", err)
|
||||
}
|
||||
|
@ -514,13 +519,14 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco
|
|||
configDump := &v3adminpb.ListenersConfigDump_DynamicListener{
|
||||
Name: ldsTargets[i],
|
||||
ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{
|
||||
VersionInfo: ackVersion,
|
||||
VersionInfo: nackVersion,
|
||||
Listener: listenerAnys[i],
|
||||
LastUpdated: nil,
|
||||
},
|
||||
ClientStatus: v3adminpb.ClientResourceStatus_ACKED,
|
||||
}
|
||||
if i == nackResourceIdx {
|
||||
configDump.ActiveState.VersionInfo = ackVersion
|
||||
configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED
|
||||
configDump.ErrorState = &v3adminpb.UpdateFailureState{
|
||||
Details: "blahblah",
|
||||
|
@ -540,12 +546,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco
|
|||
var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig
|
||||
for i := range rdsTargets {
|
||||
configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{
|
||||
VersionInfo: ackVersion,
|
||||
VersionInfo: nackVersion,
|
||||
RouteConfig: routeAnys[i],
|
||||
LastUpdated: nil,
|
||||
ClientStatus: v3adminpb.ClientResourceStatus_ACKED,
|
||||
}
|
||||
if i == nackResourceIdx {
|
||||
configDump.VersionInfo = ackVersion
|
||||
configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED
|
||||
configDump.ErrorState = &v3adminpb.UpdateFailureState{
|
||||
Details: "blahblah",
|
||||
|
@ -564,12 +571,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco
|
|||
var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster
|
||||
for i := range cdsTargets {
|
||||
configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{
|
||||
VersionInfo: ackVersion,
|
||||
VersionInfo: nackVersion,
|
||||
Cluster: clusterAnys[i],
|
||||
LastUpdated: nil,
|
||||
ClientStatus: v3adminpb.ClientResourceStatus_ACKED,
|
||||
}
|
||||
if i == nackResourceIdx {
|
||||
configDump.VersionInfo = ackVersion
|
||||
configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED
|
||||
configDump.ErrorState = &v3adminpb.UpdateFailureState{
|
||||
Details: "blahblah",
|
||||
|
@ -589,12 +597,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco
|
|||
var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig
|
||||
for i := range cdsTargets {
|
||||
configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{
|
||||
VersionInfo: ackVersion,
|
||||
VersionInfo: nackVersion,
|
||||
EndpointConfig: endpointAnys[i],
|
||||
LastUpdated: nil,
|
||||
ClientStatus: v3adminpb.ClientResourceStatus_ACKED,
|
||||
}
|
||||
if i == nackResourceIdx {
|
||||
configDump.VersionInfo = ackVersion
|
||||
configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED
|
||||
configDump.ErrorState = &v3adminpb.UpdateFailureState{
|
||||
Details: "blahblah",
|
||||
|
|
|
@ -76,15 +76,17 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) {
|
|||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata UpdateMetadata) {
|
||||
func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.ldsVersion = metadata.Version
|
||||
if metadata.ErrState != nil {
|
||||
// On NACK, update overall version to the NACKed resp.
|
||||
c.ldsVersion = metadata.ErrState.Version
|
||||
for name := range updates {
|
||||
if s, ok := c.ldsWatchers[name]; ok {
|
||||
}
|
||||
for name, uErr := range updates {
|
||||
if s, ok := c.ldsWatchers[name]; ok {
|
||||
if uErr.Err != nil {
|
||||
// On error, keep previous version for each resource. But update
|
||||
// status and error.
|
||||
mdCopy := c.ldsMD[name]
|
||||
|
@ -92,25 +94,27 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up
|
|||
mdCopy.Status = metadata.Status
|
||||
c.ldsMD[name] = mdCopy
|
||||
for wi := range s {
|
||||
wi.newError(metadata.ErrState.Err)
|
||||
wi.newError(uErr.Err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no error received, the status is ACK.
|
||||
c.ldsVersion = metadata.Version
|
||||
for name, update := range updates {
|
||||
if s, ok := c.ldsWatchers[name]; ok {
|
||||
// Only send the update if this is not an error.
|
||||
// If the resource is valid, send the update.
|
||||
for wi := range s {
|
||||
wi.newUpdate(update)
|
||||
wi.newUpdate(uErr.Update)
|
||||
}
|
||||
// Sync cache.
|
||||
c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update))
|
||||
c.ldsCache[name] = update
|
||||
c.ldsMD[name] = metadata
|
||||
c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
||||
c.ldsCache[name] = uErr.Update
|
||||
// Set status to ACK, and clear error state. The metadata might be a
|
||||
// NACK metadata because some other resources in the same response
|
||||
// are invalid.
|
||||
mdCopy := metadata
|
||||
mdCopy.Status = ServiceStatusACKed
|
||||
mdCopy.ErrState = nil
|
||||
if metadata.ErrState != nil {
|
||||
mdCopy.Version = metadata.ErrState.Version
|
||||
}
|
||||
c.ldsMD[name] = mdCopy
|
||||
}
|
||||
}
|
||||
// Resources not in the new update were removed by the server, so delete
|
||||
|
@ -137,15 +141,18 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up
|
|||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metadata UpdateMetadata) {
|
||||
func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// If no error received, the status is ACK.
|
||||
c.rdsVersion = metadata.Version
|
||||
if metadata.ErrState != nil {
|
||||
// On NACK, update overall version to the NACKed resp.
|
||||
c.rdsVersion = metadata.ErrState.Version
|
||||
for name := range updates {
|
||||
if s, ok := c.rdsWatchers[name]; ok {
|
||||
}
|
||||
for name, uErr := range updates {
|
||||
if s, ok := c.rdsWatchers[name]; ok {
|
||||
if uErr.Err != nil {
|
||||
// On error, keep previous version for each resource. But update
|
||||
// status and error.
|
||||
mdCopy := c.rdsMD[name]
|
||||
|
@ -153,25 +160,27 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad
|
|||
mdCopy.Status = metadata.Status
|
||||
c.rdsMD[name] = mdCopy
|
||||
for wi := range s {
|
||||
wi.newError(metadata.ErrState.Err)
|
||||
wi.newError(uErr.Err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no error received, the status is ACK.
|
||||
c.rdsVersion = metadata.Version
|
||||
for name, update := range updates {
|
||||
if s, ok := c.rdsWatchers[name]; ok {
|
||||
// Only send the update if this is not an error.
|
||||
// If the resource is valid, send the update.
|
||||
for wi := range s {
|
||||
wi.newUpdate(update)
|
||||
wi.newUpdate(uErr.Update)
|
||||
}
|
||||
// Sync cache.
|
||||
c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update))
|
||||
c.rdsCache[name] = update
|
||||
c.rdsMD[name] = metadata
|
||||
c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
||||
c.rdsCache[name] = uErr.Update
|
||||
// Set status to ACK, and clear error state. The metadata might be a
|
||||
// NACK metadata because some other resources in the same response
|
||||
// are invalid.
|
||||
mdCopy := metadata
|
||||
mdCopy.Status = ServiceStatusACKed
|
||||
mdCopy.ErrState = nil
|
||||
if metadata.ErrState != nil {
|
||||
mdCopy.Version = metadata.ErrState.Version
|
||||
}
|
||||
c.rdsMD[name] = mdCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -181,15 +190,17 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad
|
|||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata UpdateMetadata) {
|
||||
func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.cdsVersion = metadata.Version
|
||||
if metadata.ErrState != nil {
|
||||
// On NACK, update overall version to the NACKed resp.
|
||||
c.cdsVersion = metadata.ErrState.Version
|
||||
for name := range updates {
|
||||
if s, ok := c.cdsWatchers[name]; ok {
|
||||
}
|
||||
for name, uErr := range updates {
|
||||
if s, ok := c.cdsWatchers[name]; ok {
|
||||
if uErr.Err != nil {
|
||||
// On error, keep previous version for each resource. But update
|
||||
// status and error.
|
||||
mdCopy := c.cdsMD[name]
|
||||
|
@ -197,25 +208,29 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda
|
|||
mdCopy.Status = metadata.Status
|
||||
c.cdsMD[name] = mdCopy
|
||||
for wi := range s {
|
||||
wi.newError(metadata.ErrState.Err)
|
||||
// Send the watcher the individual error, instead of the
|
||||
// overall combined error from the metadata.ErrState.
|
||||
wi.newError(uErr.Err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no error received, the status is ACK.
|
||||
c.cdsVersion = metadata.Version
|
||||
for name, update := range updates {
|
||||
if s, ok := c.cdsWatchers[name]; ok {
|
||||
// Only send the update if this is not an error.
|
||||
// If the resource is valid, send the update.
|
||||
for wi := range s {
|
||||
wi.newUpdate(update)
|
||||
wi.newUpdate(uErr.Update)
|
||||
}
|
||||
// Sync cache.
|
||||
c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update))
|
||||
c.cdsCache[name] = update
|
||||
c.cdsMD[name] = metadata
|
||||
c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
||||
c.cdsCache[name] = uErr.Update
|
||||
// Set status to ACK, and clear error state. The metadata might be a
|
||||
// NACK metadata because some other resources in the same response
|
||||
// are invalid.
|
||||
mdCopy := metadata
|
||||
mdCopy.Status = ServiceStatusACKed
|
||||
mdCopy.ErrState = nil
|
||||
if metadata.ErrState != nil {
|
||||
mdCopy.Version = metadata.ErrState.Version
|
||||
}
|
||||
c.cdsMD[name] = mdCopy
|
||||
}
|
||||
}
|
||||
// Resources not in the new update were removed by the server, so delete
|
||||
|
@ -242,15 +257,17 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda
|
|||
//
|
||||
// A response can contain multiple resources. They will be parsed and put in a
|
||||
// map from resource name to the resource content.
|
||||
func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata UpdateMetadata) {
|
||||
func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.edsVersion = metadata.Version
|
||||
if metadata.ErrState != nil {
|
||||
// On NACK, update overall version to the NACKed resp.
|
||||
c.edsVersion = metadata.ErrState.Version
|
||||
for name := range updates {
|
||||
if s, ok := c.edsWatchers[name]; ok {
|
||||
}
|
||||
for name, uErr := range updates {
|
||||
if s, ok := c.edsWatchers[name]; ok {
|
||||
if uErr.Err != nil {
|
||||
// On error, keep previous version for each resource. But update
|
||||
// status and error.
|
||||
mdCopy := c.edsMD[name]
|
||||
|
@ -258,25 +275,29 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata U
|
|||
mdCopy.Status = metadata.Status
|
||||
c.edsMD[name] = mdCopy
|
||||
for wi := range s {
|
||||
wi.newError(metadata.ErrState.Err)
|
||||
// Send the watcher the individual error, instead of the
|
||||
// overall combined error from the metadata.ErrState.
|
||||
wi.newError(uErr.Err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no error received, the status is ACK.
|
||||
c.edsVersion = metadata.Version
|
||||
for name, update := range updates {
|
||||
if s, ok := c.edsWatchers[name]; ok {
|
||||
// Only send the update if this is not an error.
|
||||
// If the resource is valid, send the update.
|
||||
for wi := range s {
|
||||
wi.newUpdate(update)
|
||||
wi.newUpdate(uErr.Update)
|
||||
}
|
||||
// Sync cache.
|
||||
c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update))
|
||||
c.edsCache[name] = update
|
||||
c.edsMD[name] = metadata
|
||||
c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
||||
c.edsCache[name] = uErr.Update
|
||||
// Set status to ACK, and clear error state. The metadata might be a
|
||||
// NACK metadata because some other resources in the same response
|
||||
// are invalid.
|
||||
mdCopy := metadata
|
||||
mdCopy.Status = ServiceStatusACKed
|
||||
mdCopy.ErrState = nil
|
||||
if metadata.ErrState != nil {
|
||||
mdCopy.Version = metadata.ErrState.Version
|
||||
}
|
||||
c.edsMD[name] = mdCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1187,7 +1187,7 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]ClusterUpdate
|
||||
wantUpdate map[string]ClusterUpdateErrTuple
|
||||
wantMD UpdateMetadata
|
||||
wantErr bool
|
||||
}{
|
||||
|
@ -1199,7 +1199,7 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -1217,7 +1217,7 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -1230,13 +1230,15 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC},
|
||||
}),
|
||||
},
|
||||
wantUpdate: map[string]ClusterUpdate{"test": {}},
|
||||
wantUpdate: map[string]ClusterUpdateErrTuple{
|
||||
"test": {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -1244,12 +1246,12 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
{
|
||||
name: "v2 cluster",
|
||||
resources: []*anypb.Any{v2ClusterAny},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v2ClusterName: {
|
||||
wantUpdate: map[string]ClusterUpdateErrTuple{
|
||||
v2ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v2ClusterName,
|
||||
EDSServiceName: v2Service, EnableLRS: true,
|
||||
Raw: v2ClusterAny,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1259,12 +1261,12 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
{
|
||||
name: "v3 cluster",
|
||||
resources: []*anypb.Any{v3ClusterAny},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v3ClusterName: {
|
||||
wantUpdate: map[string]ClusterUpdateErrTuple{
|
||||
v3ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v3ClusterName,
|
||||
EDSServiceName: v3Service, EnableLRS: true,
|
||||
Raw: v3ClusterAny,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1274,17 +1276,17 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
{
|
||||
name: "multiple clusters",
|
||||
resources: []*anypb.Any{v2ClusterAny, v3ClusterAny},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v2ClusterName: {
|
||||
wantUpdate: map[string]ClusterUpdateErrTuple{
|
||||
v2ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v2ClusterName,
|
||||
EDSServiceName: v2Service, EnableLRS: true,
|
||||
Raw: v2ClusterAny,
|
||||
},
|
||||
v3ClusterName: {
|
||||
}},
|
||||
v3ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v3ClusterName,
|
||||
EDSServiceName: v3Service, EnableLRS: true,
|
||||
Raw: v3ClusterAny,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1303,25 +1305,25 @@ func (s) TestUnmarshalCluster(t *testing.T) {
|
|||
}),
|
||||
v3ClusterAny,
|
||||
},
|
||||
wantUpdate: map[string]ClusterUpdate{
|
||||
v2ClusterName: {
|
||||
wantUpdate: map[string]ClusterUpdateErrTuple{
|
||||
v2ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v2ClusterName,
|
||||
EDSServiceName: v2Service, EnableLRS: true,
|
||||
Raw: v2ClusterAny,
|
||||
},
|
||||
v3ClusterName: {
|
||||
}},
|
||||
v3ClusterName: {Update: ClusterUpdate{
|
||||
ClusterName: v3ClusterName,
|
||||
EDSServiceName: v3Service, EnableLRS: true,
|
||||
Raw: v3ClusterAny,
|
||||
},
|
||||
"bad": {},
|
||||
}},
|
||||
"bad": {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
|
|
@ -134,14 +134,14 @@ type loadReportingOptions struct {
|
|||
// resource updates from an APIClient for a specific version.
|
||||
type UpdateHandler interface {
|
||||
// NewListeners handles updates to xDS listener resources.
|
||||
NewListeners(map[string]ListenerUpdate, UpdateMetadata)
|
||||
NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata)
|
||||
// NewRouteConfigs handles updates to xDS RouteConfiguration resources.
|
||||
NewRouteConfigs(map[string]RouteConfigUpdate, UpdateMetadata)
|
||||
NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata)
|
||||
// NewClusters handles updates to xDS Cluster resources.
|
||||
NewClusters(map[string]ClusterUpdate, UpdateMetadata)
|
||||
NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata)
|
||||
// NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely
|
||||
// referred to as Endpoints) resources.
|
||||
NewEndpoints(map[string]EndpointsUpdate, UpdateMetadata)
|
||||
NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata)
|
||||
// NewConnectionError handles connection errors from the xDS stream. The
|
||||
// error will be reported to all the resource watchers.
|
||||
NewConnectionError(err error)
|
||||
|
|
|
@ -62,19 +62,11 @@ const (
|
|||
var (
|
||||
cmpOpts = cmp.Options{
|
||||
cmpopts.EquateEmpty(),
|
||||
cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()),
|
||||
cmp.Comparer(func(a, b time.Time) bool { return true }),
|
||||
cmp.Comparer(func(x, y error) bool {
|
||||
if x == nil || y == nil {
|
||||
return x == nil && y == nil
|
||||
}
|
||||
return x.Error() == y.Error()
|
||||
}),
|
||||
protocmp.Transform(),
|
||||
}
|
||||
|
||||
// When comparing NACK UpdateMetadata, we only care if error is nil, but not
|
||||
// the details in error.
|
||||
errPlaceHolder = fmt.Errorf("error whose details don't matter")
|
||||
cmpOptsIgnoreDetails = cmp.Options{
|
||||
cmp.Comparer(func(a, b time.Time) bool { return true }),
|
||||
cmp.Comparer(func(x, y error) bool {
|
||||
|
@ -170,7 +162,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
|||
clusterUpdateCh := testutils.NewChannel()
|
||||
firstTime := true
|
||||
client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
// Calls another watch inline, to ensure there's deadlock.
|
||||
client.WatchCluster("another-random-name", func(ClusterUpdate, error) {})
|
||||
|
||||
|
@ -184,13 +176,13 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) {
|
|||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ClusterName: testEDSName}
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"}
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -201,15 +193,15 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want
|
|||
if err != nil {
|
||||
return fmt.Errorf("timeout when waiting for listener update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(ldsUpdateErr)
|
||||
gotUpdate := u.(ListenerUpdateErrTuple)
|
||||
if wantErr != nil {
|
||||
if gotUpdate.err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr)
|
||||
if gotUpdate.Err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) {
|
||||
return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate)
|
||||
if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) {
|
||||
return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -219,15 +211,15 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w
|
|||
if err != nil {
|
||||
return fmt.Errorf("timeout when waiting for route configuration update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(rdsUpdateErr)
|
||||
gotUpdate := u.(RouteConfigUpdateErrTuple)
|
||||
if wantErr != nil {
|
||||
if gotUpdate.err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr)
|
||||
if gotUpdate.Err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) {
|
||||
return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate)
|
||||
if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) {
|
||||
return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -237,15 +229,15 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU
|
|||
if err != nil {
|
||||
return fmt.Errorf("timeout when waiting for cluster update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(clusterUpdateErr)
|
||||
gotUpdate := u.(ClusterUpdateErrTuple)
|
||||
if wantErr != nil {
|
||||
if gotUpdate.err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr)
|
||||
if gotUpdate.Err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !cmp.Equal(gotUpdate.u, wantUpdate) {
|
||||
return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate)
|
||||
if !cmp.Equal(gotUpdate.Update, wantUpdate) {
|
||||
return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -255,15 +247,15 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan
|
|||
if err != nil {
|
||||
return fmt.Errorf("timeout when waiting for endpoints update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(endpointsUpdateErr)
|
||||
gotUpdate := u.(EndpointsUpdateErrTuple)
|
||||
if wantErr != nil {
|
||||
if gotUpdate.err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr)
|
||||
if gotUpdate.Err != wantErr {
|
||||
return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate, cmpopts.EquateEmpty()) {
|
||||
return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate)
|
||||
if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, cmpopts.EquateEmpty()) {
|
||||
return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -101,16 +101,16 @@ func (s) TestLDSConfigDump(t *testing.T) {
|
|||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
update0 := make(map[string]xdsclient.ListenerUpdate)
|
||||
update0 := make(map[string]xdsclient.ListenerUpdateErrTuple)
|
||||
want0 := make(map[string]xdsclient.UpdateWithMD)
|
||||
for n, r := range listenerRaws {
|
||||
update0[n] = xdsclient.ListenerUpdate{Raw: r}
|
||||
update0[n] = xdsclient.ListenerUpdateErrTuple{Update: xdsclient.ListenerUpdate{Raw: r}}
|
||||
want0[n] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion},
|
||||
Raw: r,
|
||||
}
|
||||
}
|
||||
updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Version: testVersion})
|
||||
updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion})
|
||||
|
||||
// Expect ACK.
|
||||
if err := compareDump(client.DumpLDS, testVersion, want0); err != nil {
|
||||
|
@ -120,10 +120,12 @@ func (s) TestLDSConfigDump(t *testing.T) {
|
|||
const nackVersion = "lds-version-nack"
|
||||
var nackErr = fmt.Errorf("lds nack error")
|
||||
updateHandler.NewListeners(
|
||||
map[string]xdsclient.ListenerUpdate{
|
||||
ldsTargets[0]: {},
|
||||
map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
ldsTargets[0]: {Err: nackErr},
|
||||
ldsTargets[1]: {Update: xdsclient.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}},
|
||||
},
|
||||
xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
Err: nackErr,
|
||||
|
@ -137,6 +139,7 @@ func (s) TestLDSConfigDump(t *testing.T) {
|
|||
// message, as well as the NACK error.
|
||||
wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
|
@ -147,7 +150,7 @@ func (s) TestLDSConfigDump(t *testing.T) {
|
|||
}
|
||||
|
||||
wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion},
|
||||
Raw: listenerRaws[ldsTargets[1]],
|
||||
}
|
||||
if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil {
|
||||
|
@ -212,16 +215,16 @@ func (s) TestRDSConfigDump(t *testing.T) {
|
|||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
update0 := make(map[string]xdsclient.RouteConfigUpdate)
|
||||
update0 := make(map[string]xdsclient.RouteConfigUpdateErrTuple)
|
||||
want0 := make(map[string]xdsclient.UpdateWithMD)
|
||||
for n, r := range routeRaws {
|
||||
update0[n] = xdsclient.RouteConfigUpdate{Raw: r}
|
||||
update0[n] = xdsclient.RouteConfigUpdateErrTuple{Update: xdsclient.RouteConfigUpdate{Raw: r}}
|
||||
want0[n] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion},
|
||||
Raw: r,
|
||||
}
|
||||
}
|
||||
updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Version: testVersion})
|
||||
updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion})
|
||||
|
||||
// Expect ACK.
|
||||
if err := compareDump(client.DumpRDS, testVersion, want0); err != nil {
|
||||
|
@ -231,10 +234,12 @@ func (s) TestRDSConfigDump(t *testing.T) {
|
|||
const nackVersion = "rds-version-nack"
|
||||
var nackErr = fmt.Errorf("rds nack error")
|
||||
updateHandler.NewRouteConfigs(
|
||||
map[string]xdsclient.RouteConfigUpdate{
|
||||
rdsTargets[0]: {},
|
||||
map[string]xdsclient.RouteConfigUpdateErrTuple{
|
||||
rdsTargets[0]: {Err: nackErr},
|
||||
rdsTargets[1]: {Update: xdsclient.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}},
|
||||
},
|
||||
xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
Err: nackErr,
|
||||
|
@ -248,6 +253,7 @@ func (s) TestRDSConfigDump(t *testing.T) {
|
|||
// message, as well as the NACK error.
|
||||
wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
|
@ -257,7 +263,7 @@ func (s) TestRDSConfigDump(t *testing.T) {
|
|||
Raw: routeRaws[rdsTargets[0]],
|
||||
}
|
||||
wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion},
|
||||
Raw: routeRaws[rdsTargets[1]],
|
||||
}
|
||||
if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil {
|
||||
|
@ -323,16 +329,16 @@ func (s) TestCDSConfigDump(t *testing.T) {
|
|||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
update0 := make(map[string]xdsclient.ClusterUpdate)
|
||||
update0 := make(map[string]xdsclient.ClusterUpdateErrTuple)
|
||||
want0 := make(map[string]xdsclient.UpdateWithMD)
|
||||
for n, r := range clusterRaws {
|
||||
update0[n] = xdsclient.ClusterUpdate{Raw: r}
|
||||
update0[n] = xdsclient.ClusterUpdateErrTuple{Update: xdsclient.ClusterUpdate{Raw: r}}
|
||||
want0[n] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion},
|
||||
Raw: r,
|
||||
}
|
||||
}
|
||||
updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Version: testVersion})
|
||||
updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion})
|
||||
|
||||
// Expect ACK.
|
||||
if err := compareDump(client.DumpCDS, testVersion, want0); err != nil {
|
||||
|
@ -342,10 +348,12 @@ func (s) TestCDSConfigDump(t *testing.T) {
|
|||
const nackVersion = "cds-version-nack"
|
||||
var nackErr = fmt.Errorf("cds nack error")
|
||||
updateHandler.NewClusters(
|
||||
map[string]xdsclient.ClusterUpdate{
|
||||
cdsTargets[0]: {},
|
||||
map[string]xdsclient.ClusterUpdateErrTuple{
|
||||
cdsTargets[0]: {Err: nackErr},
|
||||
cdsTargets[1]: {Update: xdsclient.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}},
|
||||
},
|
||||
xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
Err: nackErr,
|
||||
|
@ -359,6 +367,7 @@ func (s) TestCDSConfigDump(t *testing.T) {
|
|||
// message, as well as the NACK error.
|
||||
wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
|
@ -368,7 +377,7 @@ func (s) TestCDSConfigDump(t *testing.T) {
|
|||
Raw: clusterRaws[cdsTargets[0]],
|
||||
}
|
||||
wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion},
|
||||
Raw: clusterRaws[cdsTargets[1]],
|
||||
}
|
||||
if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil {
|
||||
|
@ -420,16 +429,16 @@ func (s) TestEDSConfigDump(t *testing.T) {
|
|||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
update0 := make(map[string]xdsclient.EndpointsUpdate)
|
||||
update0 := make(map[string]xdsclient.EndpointsUpdateErrTuple)
|
||||
want0 := make(map[string]xdsclient.UpdateWithMD)
|
||||
for n, r := range endpointRaws {
|
||||
update0[n] = xdsclient.EndpointsUpdate{Raw: r}
|
||||
update0[n] = xdsclient.EndpointsUpdateErrTuple{Update: xdsclient.EndpointsUpdate{Raw: r}}
|
||||
want0[n] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion},
|
||||
Raw: r,
|
||||
}
|
||||
}
|
||||
updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Version: testVersion})
|
||||
updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion})
|
||||
|
||||
// Expect ACK.
|
||||
if err := compareDump(client.DumpEDS, testVersion, want0); err != nil {
|
||||
|
@ -439,10 +448,12 @@ func (s) TestEDSConfigDump(t *testing.T) {
|
|||
const nackVersion = "eds-version-nack"
|
||||
var nackErr = fmt.Errorf("eds nack error")
|
||||
updateHandler.NewEndpoints(
|
||||
map[string]xdsclient.EndpointsUpdate{
|
||||
edsTargets[0]: {},
|
||||
map[string]xdsclient.EndpointsUpdateErrTuple{
|
||||
edsTargets[0]: {Err: nackErr},
|
||||
edsTargets[1]: {Update: xdsclient.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}},
|
||||
},
|
||||
xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
Err: nackErr,
|
||||
|
@ -456,6 +467,7 @@ func (s) TestEDSConfigDump(t *testing.T) {
|
|||
// message, as well as the NACK error.
|
||||
wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Version: nackVersion,
|
||||
|
@ -465,7 +477,7 @@ func (s) TestEDSConfigDump(t *testing.T) {
|
|||
Raw: endpointRaws[edsTargets[0]],
|
||||
}
|
||||
wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{
|
||||
MD: xdsclient.UpdateMetadata{Version: testVersion},
|
||||
MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion},
|
||||
Raw: endpointRaws[edsTargets[1]],
|
||||
}
|
||||
if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil {
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
|
@ -137,7 +138,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]EndpointsUpdate
|
||||
wantUpdate map[string]EndpointsUpdateErrTuple
|
||||
wantMD UpdateMetadata
|
||||
wantErr bool
|
||||
}{
|
||||
|
@ -149,7 +150,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -167,7 +168,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -180,13 +181,13 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil)
|
||||
return clab0.Build()
|
||||
}())},
|
||||
wantUpdate: map[string]EndpointsUpdate{"test": {}},
|
||||
wantUpdate: map[string]EndpointsUpdateErrTuple{"test": {Err: cmpopts.AnyError}},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -194,8 +195,8 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
{
|
||||
name: "v3 endpoints",
|
||||
resources: []*anypb.Any{v3EndpointsAny},
|
||||
wantUpdate: map[string]EndpointsUpdate{
|
||||
"test": {
|
||||
wantUpdate: map[string]EndpointsUpdateErrTuple{
|
||||
"test": {Update: EndpointsUpdate{
|
||||
Drops: nil,
|
||||
Localities: []Locality{
|
||||
{
|
||||
|
@ -220,7 +221,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v3EndpointsAny,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -239,8 +240,8 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
return clab0.Build()
|
||||
}()),
|
||||
},
|
||||
wantUpdate: map[string]EndpointsUpdate{
|
||||
"test": {
|
||||
wantUpdate: map[string]EndpointsUpdateErrTuple{
|
||||
"test": {Update: EndpointsUpdate{
|
||||
Drops: nil,
|
||||
Localities: []Locality{
|
||||
{
|
||||
|
@ -265,15 +266,15 @@ func (s) TestUnmarshalEndpoints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v3EndpointsAny,
|
||||
},
|
||||
"bad": {},
|
||||
}},
|
||||
"bad": {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
spb "github.com/golang/protobuf/ptypes/struct"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
|
@ -176,7 +177,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -184,7 +185,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]ListenerUpdate
|
||||
wantUpdate map[string]ListenerUpdateErrTuple
|
||||
wantMD UpdateMetadata
|
||||
wantErr bool
|
||||
}{
|
||||
|
@ -214,7 +215,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}(),
|
||||
},
|
||||
},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -226,7 +227,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
ApiListener: testutils.MarshalAny(&v2xdspb.Listener{}),
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -242,7 +243,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -256,7 +257,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -279,7 +280,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -293,8 +294,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with no filters",
|
||||
resources: []*anypb.Any{v3LisWithFilters()},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -322,15 +323,15 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with custom filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(customFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second,
|
||||
HTTPFilters: []HTTPFilter{
|
||||
{
|
||||
|
@ -341,7 +342,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
routerFilter,
|
||||
},
|
||||
Raw: v3LisWithFilters(customFilter),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -351,8 +352,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with custom filter in typed struct",
|
||||
resources: []*anypb.Any{v3LisWithFilters(typedStructFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second,
|
||||
HTTPFilters: []HTTPFilter{
|
||||
{
|
||||
|
@ -363,7 +364,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
routerFilter,
|
||||
},
|
||||
Raw: v3LisWithFilters(typedStructFilter),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -373,8 +374,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with optional custom filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(customOptionalFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second,
|
||||
HTTPFilters: []HTTPFilter{
|
||||
{
|
||||
|
@ -385,7 +386,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
routerFilter,
|
||||
},
|
||||
Raw: v3LisWithFilters(customOptionalFilter),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -395,15 +396,15 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with two filters with same name",
|
||||
resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with two filters - same type different name",
|
||||
resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter2)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second,
|
||||
HTTPFilters: []HTTPFilter{{
|
||||
Name: "customFilter",
|
||||
|
@ -417,7 +418,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
routerFilter,
|
||||
},
|
||||
Raw: v3LisWithFilters(customFilter, customFilter2),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -427,20 +428,20 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with server-only filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(serverOnlyCustomFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with optional server-only filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(serverOnlyOptionalCustomFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName,
|
||||
MaxStreamDuration: time.Second,
|
||||
Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter),
|
||||
HTTPFilters: routerFilterList,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -450,8 +451,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with client-only filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(clientOnlyCustomFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second,
|
||||
HTTPFilters: []HTTPFilter{
|
||||
{
|
||||
|
@ -461,7 +462,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
},
|
||||
routerFilter},
|
||||
Raw: v3LisWithFilters(clientOnlyCustomFilter),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -471,34 +472,34 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 with err filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(errFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with optional err filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(errOptionalFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with unknown filter",
|
||||
resources: []*anypb.Any{v3LisWithFilters(unknownFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "v3 with unknown filter (optional)",
|
||||
resources: []*anypb.Any{v3LisWithFilters(unknownOptionalFilter)},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
RouteConfigName: v3RouteConfigName,
|
||||
MaxStreamDuration: time.Second,
|
||||
HTTPFilters: routerFilterList,
|
||||
Raw: v3LisWithFilters(unknownOptionalFilter),
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -508,8 +509,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v2 listener resource",
|
||||
resources: []*anypb.Any{v2Lis},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -519,8 +520,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 listener resource",
|
||||
resources: []*anypb.Any{v3LisWithFilters()},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -530,8 +531,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "v3 listener with inline route configuration",
|
||||
resources: []*anypb.Any{v3LisWithInlineRoute},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InlineRouteConfig: &RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{{
|
||||
Domains: []string{v3LDSTarget},
|
||||
|
@ -540,7 +541,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
MaxStreamDuration: time.Second,
|
||||
Raw: v3LisWithInlineRoute,
|
||||
HTTPFilters: routerFilterList,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -550,9 +551,9 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
{
|
||||
name: "multiple listener resources",
|
||||
resources: []*anypb.Any{v2Lis, v3LisWithFilters()},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis},
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}},
|
||||
v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -573,10 +574,10 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) {
|
|||
}}),
|
||||
v3LisWithFilters(),
|
||||
},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis},
|
||||
v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList},
|
||||
"bad": {},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}},
|
||||
v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}},
|
||||
"bad": {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantMD: errMD,
|
||||
wantErr: true,
|
||||
|
@ -857,7 +858,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -865,7 +866,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]ListenerUpdate
|
||||
wantUpdate map[string]ListenerUpdateErrTuple
|
||||
wantMD UpdateMetadata
|
||||
wantErr string
|
||||
}{
|
||||
|
@ -877,7 +878,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
{Name: "listener-filter-1"},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "unsupported field 'listener_filters'",
|
||||
},
|
||||
|
@ -887,14 +888,14 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
Name: v3LDSTarget,
|
||||
UseOriginalDst: &wrapperspb.BoolValue{Value: true},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "unsupported field 'use_original_dst'",
|
||||
},
|
||||
{
|
||||
name: "no address field",
|
||||
resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "no address field in LDS response",
|
||||
},
|
||||
|
@ -904,7 +905,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
Name: v3LDSTarget,
|
||||
Address: &v3corepb.Address{},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "no socket_address field in LDS response",
|
||||
},
|
||||
|
@ -920,7 +921,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "no supported filter chains and no default filter chain",
|
||||
},
|
||||
|
@ -935,7 +936,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "missing HttpConnectionManager filter",
|
||||
},
|
||||
|
@ -957,7 +958,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "missing name field in filter",
|
||||
},
|
||||
|
@ -996,7 +997,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "duplicate filter name",
|
||||
},
|
||||
|
@ -1023,7 +1024,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "http filters list is empty",
|
||||
},
|
||||
|
@ -1051,7 +1052,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "is a terminal filter but it is not last in the filter chain",
|
||||
},
|
||||
|
@ -1079,7 +1080,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "is not a terminal filter",
|
||||
},
|
||||
|
@ -1100,7 +1101,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "unsupported config_type",
|
||||
},
|
||||
|
@ -1124,7 +1125,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "multiple filter chains with overlapping matching rules are defined",
|
||||
},
|
||||
|
@ -1147,7 +1148,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "unsupported network filter",
|
||||
},
|
||||
|
@ -1173,7 +1174,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "failed unmarshaling of network filter",
|
||||
},
|
||||
|
@ -1192,7 +1193,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "transport_socket field has unexpected name",
|
||||
},
|
||||
|
@ -1214,7 +1215,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "transport_socket field has unexpected typeURL",
|
||||
},
|
||||
|
@ -1239,7 +1240,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "failed to unmarshal DownstreamTlsContext in LDS response",
|
||||
},
|
||||
|
@ -1261,7 +1262,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext",
|
||||
},
|
||||
|
@ -1291,15 +1292,15 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "validation context contains unexpected type",
|
||||
},
|
||||
{
|
||||
name: "empty transport socket",
|
||||
resources: []*anypb.Any{listenerEmptyTransportSocket},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InboundListenerCfg: &InboundListenerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: "9999",
|
||||
|
@ -1325,7 +1326,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: listenerEmptyTransportSocket,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1358,7 +1359,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set",
|
||||
},
|
||||
|
@ -1388,7 +1389,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set",
|
||||
},
|
||||
|
@ -1412,15 +1413,15 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})},
|
||||
wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}},
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}},
|
||||
wantMD: errMD,
|
||||
wantErr: "security configuration on the server-side does not contain identity certificate provider instance name",
|
||||
},
|
||||
{
|
||||
name: "happy case with no validation context using deprecated fields",
|
||||
resources: []*anypb.Any{listenerNoValidationContextDeprecatedFields},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InboundListenerCfg: &InboundListenerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: "9999",
|
||||
|
@ -1458,7 +1459,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: listenerNoValidationContextDeprecatedFields,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1468,8 +1469,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
{
|
||||
name: "happy case with no validation context using new fields",
|
||||
resources: []*anypb.Any{listenerNoValidationContextNewFields},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InboundListenerCfg: &InboundListenerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: "9999",
|
||||
|
@ -1507,7 +1508,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: listenerNoValidationContextNewFields,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1517,8 +1518,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
{
|
||||
name: "happy case with validation context provider instance with deprecated fields",
|
||||
resources: []*anypb.Any{listenerWithValidationContextDeprecatedFields},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InboundListenerCfg: &InboundListenerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: "9999",
|
||||
|
@ -1562,7 +1563,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: listenerWithValidationContextDeprecatedFields,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -1572,8 +1573,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
{
|
||||
name: "happy case with validation context provider instance with new fields",
|
||||
resources: []*anypb.Any{listenerWithValidationContextNewFields},
|
||||
wantUpdate: map[string]ListenerUpdate{
|
||||
v3LDSTarget: {
|
||||
wantUpdate: map[string]ListenerUpdateErrTuple{
|
||||
v3LDSTarget: {Update: ListenerUpdate{
|
||||
InboundListenerCfg: &InboundListenerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: "9999",
|
||||
|
@ -1617,7 +1618,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: listenerWithValidationContextNewFields,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
|
|
@ -668,7 +668,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
resources []*anypb.Any
|
||||
wantUpdate map[string]RouteConfigUpdate
|
||||
wantUpdate map[string]RouteConfigUpdateErrTuple
|
||||
wantMD UpdateMetadata
|
||||
wantErr bool
|
||||
}{
|
||||
|
@ -680,7 +680,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -698,7 +698,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
@ -713,8 +713,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
{
|
||||
name: "v2 routeConfig resource",
|
||||
resources: []*anypb.Any{v2RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v2RouteConfigName: {
|
||||
wantUpdate: map[string]RouteConfigUpdateErrTuple{
|
||||
v2RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -730,7 +730,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v2RouteConfig,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -740,8 +740,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
{
|
||||
name: "v3 routeConfig resource",
|
||||
resources: []*anypb.Any{v3RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v3RouteConfigName: {
|
||||
wantUpdate: map[string]RouteConfigUpdateErrTuple{
|
||||
v3RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -757,7 +757,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v3RouteConfig,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -767,8 +767,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
{
|
||||
name: "multiple routeConfig resources",
|
||||
resources: []*anypb.Any{v2RouteConfig, v3RouteConfig},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v3RouteConfigName: {
|
||||
wantUpdate: map[string]RouteConfigUpdateErrTuple{
|
||||
v3RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -784,8 +784,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v3RouteConfig,
|
||||
},
|
||||
v2RouteConfigName: {
|
||||
}},
|
||||
v2RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -801,7 +801,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v2RouteConfig,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusACKed,
|
||||
|
@ -822,8 +822,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
}}}}}),
|
||||
v3RouteConfig,
|
||||
},
|
||||
wantUpdate: map[string]RouteConfigUpdate{
|
||||
v3RouteConfigName: {
|
||||
wantUpdate: map[string]RouteConfigUpdateErrTuple{
|
||||
v3RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -839,8 +839,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v3RouteConfig,
|
||||
},
|
||||
v2RouteConfigName: {
|
||||
}},
|
||||
v2RouteConfigName: {Update: RouteConfigUpdate{
|
||||
VirtualHosts: []*VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -856,15 +856,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: v2RouteConfig,
|
||||
},
|
||||
"bad": {},
|
||||
}},
|
||||
"bad": {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantMD: UpdateMetadata{
|
||||
Status: ServiceStatusNACKed,
|
||||
Version: testVersion,
|
||||
ErrState: &UpdateErrorMetadata{
|
||||
Version: testVersion,
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal/version"
|
||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||
|
@ -100,7 +101,7 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
name string
|
||||
cdsResponse *xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate map[string]xdsclient.ClusterUpdate
|
||||
wantUpdate map[string]xdsclient.ClusterUpdateErrTuple
|
||||
wantUpdateMD xdsclient.UpdateMetadata
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
|
@ -113,7 +114,7 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -127,7 +128,7 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -148,8 +149,8 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
name: "one-uninteresting-cluster",
|
||||
cdsResponse: goodCDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.ClusterUpdate{
|
||||
goodClusterName2: {ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2},
|
||||
wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{
|
||||
goodClusterName2: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -161,8 +162,8 @@ func (s) TestCDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-cluster",
|
||||
cdsResponse: goodCDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.ClusterUpdate{
|
||||
goodClusterName1: {ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1},
|
||||
wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{
|
||||
goodClusterName1: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
|
|
@ -21,7 +21,6 @@ package v2
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -285,9 +284,6 @@ var (
|
|||
},
|
||||
TypeUrl: version.V2RouteConfigURL,
|
||||
}
|
||||
// An place holder error. When comparing UpdateErrorMetadata, we only check
|
||||
// if error is nil, and don't compare error content.
|
||||
errPlaceHolder = fmt.Errorf("err place holder")
|
||||
)
|
||||
|
||||
type watchHandleTestcase struct {
|
||||
|
@ -305,7 +301,7 @@ type testUpdateReceiver struct {
|
|||
f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate, metadata xdsclient.UpdateMetadata) {
|
||||
func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateErrTuple, metadata xdsclient.UpdateMetadata) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
|
@ -313,7 +309,7 @@ func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate,
|
|||
t.newUpdate(xdsclient.ListenerResource, dd, metadata)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdate, metadata xdsclient.UpdateMetadata) {
|
||||
func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdateErrTuple, metadata xdsclient.UpdateMetadata) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
|
@ -321,7 +317,7 @@ func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigU
|
|||
t.newUpdate(xdsclient.RouteConfigResource, dd, metadata)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate, metadata xdsclient.UpdateMetadata) {
|
||||
func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErrTuple, metadata xdsclient.UpdateMetadata) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
|
@ -329,7 +325,7 @@ func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate, m
|
|||
t.newUpdate(xdsclient.ClusterResource, dd, metadata)
|
||||
}
|
||||
|
||||
func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate, metadata xdsclient.UpdateMetadata) {
|
||||
func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdateErrTuple, metadata xdsclient.UpdateMetadata) {
|
||||
dd := make(map[string]interface{})
|
||||
for k, v := range d {
|
||||
dd[k] = v
|
||||
|
@ -367,27 +363,27 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) {
|
|||
if rType == test.rType {
|
||||
switch test.rType {
|
||||
case xdsclient.ListenerResource:
|
||||
dd := make(map[string]xdsclient.ListenerUpdate)
|
||||
dd := make(map[string]xdsclient.ListenerUpdateErrTuple)
|
||||
for n, u := range d {
|
||||
dd[n] = u.(xdsclient.ListenerUpdate)
|
||||
dd[n] = u.(xdsclient.ListenerUpdateErrTuple)
|
||||
}
|
||||
gotUpdateCh.Send(updateErr{dd, md, nil})
|
||||
case xdsclient.RouteConfigResource:
|
||||
dd := make(map[string]xdsclient.RouteConfigUpdate)
|
||||
dd := make(map[string]xdsclient.RouteConfigUpdateErrTuple)
|
||||
for n, u := range d {
|
||||
dd[n] = u.(xdsclient.RouteConfigUpdate)
|
||||
dd[n] = u.(xdsclient.RouteConfigUpdateErrTuple)
|
||||
}
|
||||
gotUpdateCh.Send(updateErr{dd, md, nil})
|
||||
case xdsclient.ClusterResource:
|
||||
dd := make(map[string]xdsclient.ClusterUpdate)
|
||||
dd := make(map[string]xdsclient.ClusterUpdateErrTuple)
|
||||
for n, u := range d {
|
||||
dd[n] = u.(xdsclient.ClusterUpdate)
|
||||
dd[n] = u.(xdsclient.ClusterUpdateErrTuple)
|
||||
}
|
||||
gotUpdateCh.Send(updateErr{dd, md, nil})
|
||||
case xdsclient.EndpointsResource:
|
||||
dd := make(map[string]xdsclient.EndpointsUpdate)
|
||||
dd := make(map[string]xdsclient.EndpointsUpdateErrTuple)
|
||||
for n, u := range d {
|
||||
dd[n] = u.(xdsclient.EndpointsUpdate)
|
||||
dd[n] = u.(xdsclient.EndpointsUpdateErrTuple)
|
||||
}
|
||||
gotUpdateCh.Send(updateErr{dd, md, nil})
|
||||
}
|
||||
|
@ -437,7 +433,7 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) {
|
|||
cmpopts.EquateEmpty(), protocmp.Transform(),
|
||||
cmpopts.IgnoreFields(xdsclient.UpdateMetadata{}, "Timestamp"),
|
||||
cmpopts.IgnoreFields(xdsclient.UpdateErrorMetadata{}, "Timestamp"),
|
||||
cmp.Comparer(func(x, y error) bool { return (x == nil) == (y == nil) }),
|
||||
cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()),
|
||||
}
|
||||
uErr, err := gotUpdateCh.Receive(ctx)
|
||||
if err == context.DeadlineExceeded {
|
||||
|
@ -668,7 +664,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) {
|
|||
|
||||
if v, err := callbackCh.Receive(ctx); err != nil {
|
||||
t.Fatal("Timeout when expecting LDS update")
|
||||
} else if _, ok := v.(xdsclient.ListenerUpdate); !ok {
|
||||
} else if _, ok := v.(xdsclient.ListenerUpdateErrTuple); !ok {
|
||||
t.Fatalf("Expect an LDS update from watcher, got %v", v)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/xds/internal"
|
||||
xtestutils "google.golang.org/grpc/xds/internal/testutils"
|
||||
|
@ -75,7 +76,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
name string
|
||||
edsResponse *v2xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate map[string]xdsclient.EndpointsUpdate
|
||||
wantUpdate map[string]xdsclient.EndpointsUpdateErrTuple
|
||||
wantUpdateMD xdsclient.UpdateMetadata
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
|
@ -88,7 +89,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -102,7 +103,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -112,8 +113,8 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
name: "one-uninterestring-assignment",
|
||||
edsResponse: goodEDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.EndpointsUpdate{
|
||||
"not-goodEDSName": {
|
||||
wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{
|
||||
"not-goodEDSName": {Update: xdsclient.EndpointsUpdate{
|
||||
Localities: []xdsclient.Locality{
|
||||
{
|
||||
Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}},
|
||||
|
@ -123,7 +124,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: marshaledGoodCLA2,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -135,8 +136,8 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-assignment",
|
||||
edsResponse: goodEDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.EndpointsUpdate{
|
||||
goodEDSName: {
|
||||
wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{
|
||||
goodEDSName: {Update: xdsclient.EndpointsUpdate{
|
||||
Localities: []xdsclient.Locality{
|
||||
{
|
||||
Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}},
|
||||
|
@ -152,7 +153,7 @@ func (s) TestEDSHandleResponse(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: marshaledGoodCLA1,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||
)
|
||||
|
@ -35,7 +36,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name string
|
||||
ldsResponse *v2xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate map[string]xdsclient.ListenerUpdate
|
||||
wantUpdate map[string]xdsclient.ListenerUpdateErrTuple
|
||||
wantUpdateMD xdsclient.UpdateMetadata
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
|
@ -48,7 +49,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -62,7 +63,7 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -74,13 +75,13 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "no-apiListener-in-response",
|
||||
ldsResponse: noAPIListenerLDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdate{
|
||||
goodLDSTarget1: {},
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
goodLDSTarget1: {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -90,8 +91,8 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "one-good-listener",
|
||||
ldsResponse: goodLDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdate{
|
||||
goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1},
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -104,9 +105,9 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "multiple-good-listener",
|
||||
ldsResponse: ldsResponseWithMultipleResources,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdate{
|
||||
goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1},
|
||||
goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2},
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}},
|
||||
goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -120,14 +121,14 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "good-bad-ugly-listeners",
|
||||
ldsResponse: goodBadUglyLDSResponse,
|
||||
wantErr: true,
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdate{
|
||||
goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1},
|
||||
goodLDSTarget2: {},
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}},
|
||||
goodLDSTarget2: {Err: cmpopts.AnyError},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -137,8 +138,8 @@ func (s) TestLDSHandleResponse(t *testing.T) {
|
|||
name: "one-uninteresting-listener",
|
||||
ldsResponse: goodLDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdate{
|
||||
goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2},
|
||||
wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{
|
||||
goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"google.golang.org/grpc/xds/internal/testutils/fakeserver"
|
||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||
|
@ -49,7 +50,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
name string
|
||||
rdsResponse *xdspb.DiscoveryResponse
|
||||
wantErr bool
|
||||
wantUpdate map[string]xdsclient.RouteConfigUpdate
|
||||
wantUpdate map[string]xdsclient.RouteConfigUpdateErrTuple
|
||||
wantUpdateMD xdsclient.UpdateMetadata
|
||||
wantUpdateErr bool
|
||||
}{
|
||||
|
@ -62,7 +63,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -76,7 +77,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusNACKed,
|
||||
ErrState: &xdsclient.UpdateErrorMetadata{
|
||||
Err: errPlaceHolder,
|
||||
Err: cmpopts.AnyError,
|
||||
},
|
||||
},
|
||||
wantUpdateErr: false,
|
||||
|
@ -88,11 +89,11 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
name: "no-virtual-hosts-in-response",
|
||||
rdsResponse: noVirtualHostsInRDSResponse,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdate{
|
||||
goodRouteName1: {
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{
|
||||
goodRouteName1: {Update: xdsclient.RouteConfigUpdate{
|
||||
VirtualHosts: nil,
|
||||
Raw: marshaledNoVirtualHostsRouteConfig,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -104,8 +105,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
name: "one-uninteresting-route-config",
|
||||
rdsResponse: goodRDSResponse2,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdate{
|
||||
goodRouteName2: {
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{
|
||||
goodRouteName2: {Update: xdsclient.RouteConfigUpdate{
|
||||
VirtualHosts: []*xdsclient.VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -122,7 +123,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: marshaledGoodRouteConfig2,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
@ -134,8 +135,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
name: "one-good-route-config",
|
||||
rdsResponse: goodRDSResponse1,
|
||||
wantErr: false,
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdate{
|
||||
goodRouteName1: {
|
||||
wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{
|
||||
goodRouteName1: {Update: xdsclient.RouteConfigUpdate{
|
||||
VirtualHosts: []*xdsclient.VirtualHost{
|
||||
{
|
||||
Domains: []string{uninterestingDomain},
|
||||
|
@ -152,7 +153,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Raw: marshaledGoodRouteConfig1,
|
||||
},
|
||||
}},
|
||||
},
|
||||
wantUpdateMD: xdsclient.UpdateMetadata{
|
||||
Status: xdsclient.ServiceStatusACKed,
|
||||
|
|
|
@ -28,11 +28,6 @@ import (
|
|||
"google.golang.org/grpc/internal/testutils"
|
||||
)
|
||||
|
||||
type clusterUpdateErr struct {
|
||||
u ClusterUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
// TestClusterWatch covers the cases:
|
||||
// - an update is received after a watch()
|
||||
// - an update for another resource name
|
||||
|
@ -57,21 +52,21 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ClusterName: testEDSName}
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Another update, with an extra resource for a different resource name.
|
||||
client.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName: {Update: wantUpdate},
|
||||
"randomName": {},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
|
||||
|
@ -80,7 +75,7 @@ func (s) TestClusterWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -115,7 +110,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
clusterUpdateCh := testutils.NewChannel()
|
||||
clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh)
|
||||
cancelLastWatch = client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -128,7 +123,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ClusterName: testEDSName}
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count; i++ {
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -137,7 +132,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count-1; i++ {
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -178,7 +173,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
clusterUpdateCh := testutils.NewChannel()
|
||||
clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh)
|
||||
client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -193,7 +188,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// Third watch for a different name.
|
||||
clusterUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -201,9 +196,9 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
|
|||
|
||||
wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"}
|
||||
wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"}
|
||||
client.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName + "1": {Update: wantUpdate1},
|
||||
testCDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
|
@ -238,15 +233,15 @@ func (s) TestClusterWatchAfterCache(t *testing.T) {
|
|||
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ClusterName: testEDSName}
|
||||
client.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName: {Update: wantUpdate},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -255,7 +250,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) {
|
|||
// Another watch for the resource in cache.
|
||||
clusterUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
|
@ -299,7 +294,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) {
|
|||
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -309,9 +304,9 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for cluster update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(clusterUpdateErr)
|
||||
if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, ClusterUpdate{}) {
|
||||
t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.u, gotUpdate.err)
|
||||
gotUpdate := u.(ClusterUpdateErrTuple)
|
||||
if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, ClusterUpdate{}) {
|
||||
t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -338,15 +333,15 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
|
|||
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ClusterUpdate{ClusterName: testEDSName}
|
||||
client.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName: wantUpdate,
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName: {Update: wantUpdate},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -386,7 +381,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
|
||||
clusterUpdateCh1 := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh1.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh1.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -395,7 +390,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
// Another watch for a different name.
|
||||
clusterUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -403,9 +398,9 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
|
||||
wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"}
|
||||
wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"}
|
||||
client.NewClusters(map[string]ClusterUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName + "1": {Update: wantUpdate1},
|
||||
testCDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -415,10 +410,10 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send another update to remove resource 1.
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{})
|
||||
|
||||
// Watcher 1 should get an error.
|
||||
if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(clusterUpdateErr).err) != ErrorTypeResourceNotFound {
|
||||
if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound {
|
||||
t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
|
||||
}
|
||||
|
||||
|
@ -428,7 +423,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send one more update without resource 1.
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{})
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{})
|
||||
|
||||
// Watcher 1 should not see an update.
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
|
@ -465,7 +460,7 @@ func (s) TestClusterWatchNACKError(t *testing.T) {
|
|||
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
|
@ -473,8 +468,70 @@ func (s) TestClusterWatchNACKError(t *testing.T) {
|
|||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
client.NewClusters(map[string]ClusterUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, nil); err != nil {
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {
|
||||
Err: wantError,
|
||||
}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, wantError); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestClusterWatchPartialValid covers the case that a response contains both
|
||||
// valid and invalid resources. This response will be NACK'ed by the xdsclient.
|
||||
// But the watchers with valid resources should receive the update, those with
|
||||
// invalida resources should receive an error.
|
||||
func (s) TestClusterWatchPartialValid(t *testing.T) {
|
||||
apiClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
client, err := newWithConfig(clientOpts(testXDSServer, false))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
c, err := apiClientCh.Receive(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for API client to be created: %v", err)
|
||||
}
|
||||
apiClient := c.(*testAPIClient)
|
||||
|
||||
const badResourceName = "bad-resource"
|
||||
updateChs := make(map[string]*testutils.Channel)
|
||||
|
||||
for _, name := range []string{testCDSName, badResourceName} {
|
||||
clusterUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchCluster(name, func(update ClusterUpdate, err error) {
|
||||
clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer func() {
|
||||
cancelWatch()
|
||||
if _, err := apiClient.removeWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want watch to be canceled, got err: %v", err)
|
||||
}
|
||||
}()
|
||||
if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
updateChs[name] = clusterUpdateCh
|
||||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
wantError2 := fmt.Errorf("individual error")
|
||||
client.NewClusters(map[string]ClusterUpdateErrTuple{
|
||||
testCDSName: {Update: ClusterUpdate{ClusterName: testEDSName}},
|
||||
badResourceName: {Err: wantError2},
|
||||
}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
|
||||
// The valid resource should be sent to the watcher.
|
||||
if err := verifyClusterUpdate(ctx, updateChs[testCDSName], ClusterUpdate{ClusterName: testEDSName}, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The failed watcher should receive an error.
|
||||
if err := verifyClusterUpdate(ctx, updateChs[badResourceName], ClusterUpdate{}, wantError2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,11 +46,6 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
type endpointsUpdateErr struct {
|
||||
u EndpointsUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
// TestEndpointsWatch covers the cases:
|
||||
// - an update is received after a watch()
|
||||
// - an update for another resource name (which doesn't trigger callback)
|
||||
|
@ -75,20 +70,20 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
|
||||
endpointsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Another update for a different resource name.
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{"randomName": {}}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{"randomName": {}}, UpdateMetadata{})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -97,7 +92,7 @@ func (s) TestEndpointsWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -134,7 +129,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
endpointsUpdateCh := testutils.NewChannel()
|
||||
endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh)
|
||||
cancelLastWatch = client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -147,7 +142,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count; i++ {
|
||||
if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -156,7 +151,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count-1; i++ {
|
||||
if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -197,7 +192,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
endpointsUpdateCh := testutils.NewChannel()
|
||||
endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh)
|
||||
client.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -212,7 +207,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// Third watch for a different name.
|
||||
endpointsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -220,9 +215,9 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) {
|
|||
|
||||
wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}}
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{
|
||||
testCDSName + "1": wantUpdate1,
|
||||
testCDSName + "2": wantUpdate2,
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{
|
||||
testCDSName + "1": {Update: wantUpdate1},
|
||||
testCDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
|
@ -257,14 +252,14 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) {
|
|||
|
||||
endpointsUpdateCh := testutils.NewChannel()
|
||||
client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}}
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -272,7 +267,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) {
|
|||
// Another watch for the resource in cache.
|
||||
endpointsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
|
@ -316,7 +311,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
|
|||
|
||||
endpointsUpdateCh := testutils.NewChannel()
|
||||
client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -326,9 +321,9 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for endpoints update: %v", err)
|
||||
}
|
||||
gotUpdate := u.(endpointsUpdateErr)
|
||||
if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, EndpointsUpdate{}) {
|
||||
t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.u, gotUpdate.err)
|
||||
gotUpdate := u.(EndpointsUpdateErrTuple)
|
||||
if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, EndpointsUpdate{}) {
|
||||
t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,7 +349,7 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) {
|
|||
|
||||
endpointsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err})
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
|
@ -362,8 +357,68 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) {
|
|||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, EndpointsUpdate{}, wantError); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEndpointsWatchPartialValid covers the case that a response contains both
|
||||
// valid and invalid resources. This response will be NACK'ed by the xdsclient.
|
||||
// But the watchers with valid resources should receive the update, those with
|
||||
// invalida resources should receive an error.
|
||||
func (s) TestEndpointsWatchPartialValid(t *testing.T) {
|
||||
apiClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
client, err := newWithConfig(clientOpts(testXDSServer, false))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
c, err := apiClientCh.Receive(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for API client to be created: %v", err)
|
||||
}
|
||||
apiClient := c.(*testAPIClient)
|
||||
|
||||
const badResourceName = "bad-resource"
|
||||
updateChs := make(map[string]*testutils.Channel)
|
||||
|
||||
for _, name := range []string{testCDSName, badResourceName} {
|
||||
endpointsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchEndpoints(name, func(update EndpointsUpdate, err error) {
|
||||
endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer func() {
|
||||
cancelWatch()
|
||||
if _, err := apiClient.removeWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want watch to be canceled, got err: %v", err)
|
||||
}
|
||||
}()
|
||||
if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
updateChs[name] = endpointsUpdateCh
|
||||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
wantError2 := fmt.Errorf("individual error")
|
||||
client.NewEndpoints(map[string]EndpointsUpdateErrTuple{
|
||||
testCDSName: {Update: EndpointsUpdate{Localities: []Locality{testLocalities[0]}}},
|
||||
badResourceName: {Err: wantError2},
|
||||
}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
|
||||
// The valid resource should be sent to the watcher.
|
||||
if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], EndpointsUpdate{Localities: []Locality{testLocalities[0]}}, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The failed watcher should receive an error.
|
||||
if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], EndpointsUpdate{}, wantError2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,11 +26,6 @@ import (
|
|||
"google.golang.org/grpc/internal/testutils"
|
||||
)
|
||||
|
||||
type ldsUpdateErr struct {
|
||||
u ListenerUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
// TestLDSWatch covers the cases:
|
||||
// - an update is received after a watch()
|
||||
// - an update for another resource name
|
||||
|
@ -55,21 +50,21 @@ func (s) TestLDSWatch(t *testing.T) {
|
|||
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Another update, with an extra resource for a different resource name.
|
||||
client.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName: wantUpdate,
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{
|
||||
testLDSName: {Update: wantUpdate},
|
||||
"randomName": {},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil {
|
||||
|
@ -78,7 +73,7 @@ func (s) TestLDSWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -116,7 +111,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
|
|||
ldsUpdateCh := testutils.NewChannel()
|
||||
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
|
||||
cancelLastWatch = client.WatchListener(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -129,7 +124,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
|
|||
}
|
||||
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count; i++ {
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -138,7 +133,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count-1; i++ {
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -180,7 +175,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
ldsUpdateCh := testutils.NewChannel()
|
||||
ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh)
|
||||
client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -195,7 +190,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// Third watch for a different name.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -203,9 +198,9 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
|
||||
wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"}
|
||||
wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"}
|
||||
client.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "1": wantUpdate1,
|
||||
testLDSName + "2": wantUpdate2,
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{
|
||||
testLDSName + "1": {Update: wantUpdate1},
|
||||
testLDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
|
@ -240,14 +235,14 @@ func (s) TestLDSWatchAfterCache(t *testing.T) {
|
|||
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
client.WatchListener(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
|
||||
wantUpdate := ListenerUpdate{RouteConfigName: testRDSName}
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -255,7 +250,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) {
|
|||
// Another watch for the resource in cache.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchListener(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
|
@ -302,7 +297,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
|
||||
ldsUpdateCh1 := testutils.NewChannel()
|
||||
client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh1.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh1.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -310,7 +305,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
// Another watch for a different name.
|
||||
ldsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -318,9 +313,9 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
|
||||
wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"}
|
||||
wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"}
|
||||
client.NewListeners(map[string]ListenerUpdate{
|
||||
testLDSName + "1": wantUpdate1,
|
||||
testLDSName + "2": wantUpdate2,
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{
|
||||
testLDSName + "1": {Update: wantUpdate1},
|
||||
testLDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -330,10 +325,10 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send another update to remove resource 1.
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{})
|
||||
|
||||
// Watcher 1 should get an error.
|
||||
if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ldsUpdateErr).err) != ErrorTypeResourceNotFound {
|
||||
if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound {
|
||||
t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err)
|
||||
}
|
||||
|
||||
|
@ -343,7 +338,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send one more update without resource 1.
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{})
|
||||
|
||||
// Watcher 1 should not see an update.
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
|
@ -380,7 +375,7 @@ func (s) TestListenerWatchNACKError(t *testing.T) {
|
|||
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err})
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
|
@ -388,8 +383,68 @@ func (s) TestListenerWatchNACKError(t *testing.T) {
|
|||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
client.NewListeners(map[string]ListenerUpdate{testLDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
if err := verifyListenerUpdate(ctx, ldsUpdateCh, ListenerUpdate{}, wantError); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestListenerWatchPartialValid covers the case that a response contains both
|
||||
// valid and invalid resources. This response will be NACK'ed by the xdsclient.
|
||||
// But the watchers with valid resources should receive the update, those with
|
||||
// invalida resources should receive an error.
|
||||
func (s) TestListenerWatchPartialValid(t *testing.T) {
|
||||
apiClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
client, err := newWithConfig(clientOpts(testXDSServer, false))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
c, err := apiClientCh.Receive(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for API client to be created: %v", err)
|
||||
}
|
||||
apiClient := c.(*testAPIClient)
|
||||
|
||||
const badResourceName = "bad-resource"
|
||||
updateChs := make(map[string]*testutils.Channel)
|
||||
|
||||
for _, name := range []string{testLDSName, badResourceName} {
|
||||
ldsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchListener(name, func(update ListenerUpdate, err error) {
|
||||
ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer func() {
|
||||
cancelWatch()
|
||||
if _, err := apiClient.removeWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want watch to be canceled, got err: %v", err)
|
||||
}
|
||||
}()
|
||||
if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
updateChs[name] = ldsUpdateCh
|
||||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
wantError2 := fmt.Errorf("individual error")
|
||||
client.NewListeners(map[string]ListenerUpdateErrTuple{
|
||||
testLDSName: {Update: ListenerUpdate{RouteConfigName: testEDSName}},
|
||||
badResourceName: {Err: wantError2},
|
||||
}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
|
||||
// The valid resource should be sent to the watcher.
|
||||
if err := verifyListenerUpdate(ctx, updateChs[testLDSName], ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The failed watcher should receive an error.
|
||||
if err := verifyListenerUpdate(ctx, updateChs[badResourceName], ListenerUpdate{}, wantError2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,11 +28,6 @@ import (
|
|||
"google.golang.org/grpc/internal/testutils"
|
||||
)
|
||||
|
||||
type rdsUpdateErr struct {
|
||||
u RouteConfigUpdate
|
||||
err error
|
||||
}
|
||||
|
||||
// TestRDSWatch covers the cases:
|
||||
// - an update is received after a watch()
|
||||
// - an update for another resource name (which doesn't trigger callback)
|
||||
|
@ -57,7 +52,7 @@ func (s) TestRDSWatch(t *testing.T) {
|
|||
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -71,13 +66,13 @@ func (s) TestRDSWatch(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Another update for a different resource name.
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{"randomName": {}}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{"randomName": {}}, UpdateMetadata{})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -86,7 +81,7 @@ func (s) TestRDSWatch(t *testing.T) {
|
|||
|
||||
// Cancel watch, and send update again.
|
||||
cancelWatch()
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
|
||||
|
@ -123,7 +118,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
|
|||
rdsUpdateCh := testutils.NewChannel()
|
||||
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
|
||||
cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -143,7 +138,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count; i++ {
|
||||
if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -152,7 +147,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) {
|
|||
|
||||
// Cancel the last watch, and send update again.
|
||||
cancelLastWatch()
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
for i := 0; i < count-1; i++ {
|
||||
if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -193,7 +188,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
rdsUpdateCh := testutils.NewChannel()
|
||||
rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh)
|
||||
client.WatchRouteConfig(testRDSName+"1", func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
|
||||
if i == 0 {
|
||||
|
@ -208,7 +203,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
// Third watch for a different name.
|
||||
rdsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchRouteConfig(testRDSName+"2", func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -230,9 +225,9 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{
|
||||
testRDSName + "1": wantUpdate1,
|
||||
testRDSName + "2": wantUpdate2,
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{
|
||||
testRDSName + "1": {Update: wantUpdate1},
|
||||
testRDSName + "2": {Update: wantUpdate2},
|
||||
}, UpdateMetadata{})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
|
@ -267,7 +262,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) {
|
|||
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
|
@ -281,7 +276,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{})
|
||||
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -289,7 +284,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) {
|
|||
// Another watch for the resource in cache.
|
||||
rdsUpdateCh2 := testutils.NewChannel()
|
||||
client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
|
||||
defer sCancel()
|
||||
|
@ -298,7 +293,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) {
|
|||
}
|
||||
|
||||
// New watch should receives the update.
|
||||
if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) {
|
||||
if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, RouteConfigUpdateErrTuple{wantUpdate, nil}, cmp.AllowUnexported(RouteConfigUpdateErrTuple{})) {
|
||||
t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err)
|
||||
}
|
||||
|
||||
|
@ -332,7 +327,7 @@ func (s) TestRouteWatchNACKError(t *testing.T) {
|
|||
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchRouteConfig(testCDSName, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err})
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer cancelWatch()
|
||||
if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
|
@ -340,8 +335,74 @@ func (s) TestRouteWatchNACKError(t *testing.T) {
|
|||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, RouteConfigUpdate{}, wantError); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRouteWatchPartialValid covers the case that a response contains both
|
||||
// valid and invalid resources. This response will be NACK'ed by the xdsclient.
|
||||
// But the watchers with valid resources should receive the update, those with
|
||||
// invalida resources should receive an error.
|
||||
func (s) TestRouteWatchPartialValid(t *testing.T) {
|
||||
apiClientCh, cleanup := overrideNewAPIClient()
|
||||
defer cleanup()
|
||||
|
||||
client, err := newWithConfig(clientOpts(testXDSServer, false))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||
defer cancel()
|
||||
c, err := apiClientCh.Receive(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("timeout when waiting for API client to be created: %v", err)
|
||||
}
|
||||
apiClient := c.(*testAPIClient)
|
||||
|
||||
const badResourceName = "bad-resource"
|
||||
updateChs := make(map[string]*testutils.Channel)
|
||||
|
||||
for _, name := range []string{testRDSName, badResourceName} {
|
||||
rdsUpdateCh := testutils.NewChannel()
|
||||
cancelWatch := client.WatchRouteConfig(name, func(update RouteConfigUpdate, err error) {
|
||||
rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err})
|
||||
})
|
||||
defer func() {
|
||||
cancelWatch()
|
||||
if _, err := apiClient.removeWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want watch to be canceled, got err: %v", err)
|
||||
}
|
||||
}()
|
||||
if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil {
|
||||
t.Fatalf("want new watch to start, got error %v", err)
|
||||
}
|
||||
updateChs[name] = rdsUpdateCh
|
||||
}
|
||||
|
||||
wantError := fmt.Errorf("testing error")
|
||||
wantError2 := fmt.Errorf("individual error")
|
||||
client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{
|
||||
testRDSName: {Update: RouteConfigUpdate{VirtualHosts: []*VirtualHost{{
|
||||
Domains: []string{testLDSName},
|
||||
Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}},
|
||||
}}}},
|
||||
badResourceName: {Err: wantError2},
|
||||
}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}})
|
||||
|
||||
// The valid resource should be sent to the watcher.
|
||||
if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], RouteConfigUpdate{VirtualHosts: []*VirtualHost{{
|
||||
Domains: []string{testLDSName},
|
||||
Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}},
|
||||
}}}, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The failed watcher should receive an error.
|
||||
if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], RouteConfigUpdate{}, wantError2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,8 +58,8 @@ const transportSocketName = "envoy.transport_sockets.tls"
|
|||
// UnmarshalListener processes resources received in an LDS response, validates
|
||||
// them, and transforms them into a native struct which contains only fields we
|
||||
// are interested in.
|
||||
func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdate, UpdateMetadata, error) {
|
||||
update := make(map[string]ListenerUpdate)
|
||||
func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) {
|
||||
update := make(map[string]ListenerUpdateErrTuple)
|
||||
md, err := processAllResources(version, resources, logger, update)
|
||||
return update, md, err
|
||||
}
|
||||
|
@ -296,8 +296,8 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err
|
|||
// validates them, and transforms them into a native struct which contains only
|
||||
// fields we are interested in. The provided hostname determines the route
|
||||
// configuration resources of interest.
|
||||
func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdate, UpdateMetadata, error) {
|
||||
update := make(map[string]RouteConfigUpdate)
|
||||
func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) {
|
||||
update := make(map[string]RouteConfigUpdateErrTuple)
|
||||
md, err := processAllResources(version, resources, logger, update)
|
||||
return update, md, err
|
||||
}
|
||||
|
@ -631,8 +631,8 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg
|
|||
// UnmarshalCluster processes resources received in an CDS response, validates
|
||||
// them, and transforms them into a native struct which contains only fields we
|
||||
// are interested in.
|
||||
func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdate, UpdateMetadata, error) {
|
||||
update := make(map[string]ClusterUpdate)
|
||||
func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) {
|
||||
update := make(map[string]ClusterUpdateErrTuple)
|
||||
md, err := processAllResources(version, resources, logger, update)
|
||||
return update, md, err
|
||||
}
|
||||
|
@ -995,8 +995,8 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 {
|
|||
// UnmarshalEndpoints processes resources received in an EDS response,
|
||||
// validates them, and transforms them into a native struct which contains only
|
||||
// fields we are interested in.
|
||||
func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdate, UpdateMetadata, error) {
|
||||
update := make(map[string]EndpointsUpdate)
|
||||
func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) {
|
||||
update := make(map[string]EndpointsUpdateErrTuple)
|
||||
md, err := processAllResources(version, resources, logger, update)
|
||||
return update, md, err
|
||||
}
|
||||
|
@ -1090,9 +1090,45 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate,
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
// ListenerUpdateErrTuple is a tuple with the update and error. It contains the
|
||||
// results from unmarshal functions. It's used to pass unmarshal results of
|
||||
// multiple resources together, e.g. in maps like `map[string]{Update,error}`.
|
||||
type ListenerUpdateErrTuple struct {
|
||||
Update ListenerUpdate
|
||||
Err error
|
||||
}
|
||||
|
||||
// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains
|
||||
// the results from unmarshal functions. It's used to pass unmarshal results of
|
||||
// multiple resources together, e.g. in maps like `map[string]{Update,error}`.
|
||||
type RouteConfigUpdateErrTuple struct {
|
||||
Update RouteConfigUpdate
|
||||
Err error
|
||||
}
|
||||
|
||||
// ClusterUpdateErrTuple is a tuple with the update and error. It contains the
|
||||
// results from unmarshal functions. It's used to pass unmarshal results of
|
||||
// multiple resources together, e.g. in maps like `map[string]{Update,error}`.
|
||||
type ClusterUpdateErrTuple struct {
|
||||
Update ClusterUpdate
|
||||
Err error
|
||||
}
|
||||
|
||||
// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the
|
||||
// results from unmarshal functions. It's used to pass unmarshal results of
|
||||
// multiple resources together, e.g. in maps like `map[string]{Update,error}`.
|
||||
type EndpointsUpdateErrTuple struct {
|
||||
Update EndpointsUpdate
|
||||
Err error
|
||||
}
|
||||
|
||||
// processAllResources unmarshals and validates the resources, populates the
|
||||
// provided ret (a map), and returns metadata and error.
|
||||
//
|
||||
// After this function, the ret map will be populated with both valid and
|
||||
// invalid updates. Invalid resources will have an entry with the key as the
|
||||
// resource name, value as an empty update.
|
||||
//
|
||||
// The type of the resource is determined by the type of ret. E.g.
|
||||
// map[string]ListenerUpdate means this is for LDS.
|
||||
func processAllResources(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger, ret interface{}) (UpdateMetadata, error) {
|
||||
|
@ -1106,10 +1142,10 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog
|
|||
|
||||
for _, r := range resources {
|
||||
switch ret2 := ret.(type) {
|
||||
case map[string]ListenerUpdate:
|
||||
case map[string]ListenerUpdateErrTuple:
|
||||
name, update, err := unmarshalListenerResource(r, logger)
|
||||
if err == nil {
|
||||
ret2[name] = update
|
||||
ret2[name] = ListenerUpdateErrTuple{Update: update}
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
|
@ -1119,11 +1155,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog
|
|||
perResourceErrors[name] = err
|
||||
// Add place holder in the map so we know this resource name was in
|
||||
// the response.
|
||||
ret2[name] = ListenerUpdate{}
|
||||
case map[string]RouteConfigUpdate:
|
||||
ret2[name] = ListenerUpdateErrTuple{Err: err}
|
||||
case map[string]RouteConfigUpdateErrTuple:
|
||||
name, update, err := unmarshalRouteConfigResource(r, logger)
|
||||
if err == nil {
|
||||
ret2[name] = update
|
||||
ret2[name] = RouteConfigUpdateErrTuple{Update: update}
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
|
@ -1133,11 +1169,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog
|
|||
perResourceErrors[name] = err
|
||||
// Add place holder in the map so we know this resource name was in
|
||||
// the response.
|
||||
ret2[name] = RouteConfigUpdate{}
|
||||
case map[string]ClusterUpdate:
|
||||
ret2[name] = RouteConfigUpdateErrTuple{Err: err}
|
||||
case map[string]ClusterUpdateErrTuple:
|
||||
name, update, err := unmarshalClusterResource(r, logger)
|
||||
if err == nil {
|
||||
ret2[name] = update
|
||||
ret2[name] = ClusterUpdateErrTuple{Update: update}
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
|
@ -1147,11 +1183,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog
|
|||
perResourceErrors[name] = err
|
||||
// Add place holder in the map so we know this resource name was in
|
||||
// the response.
|
||||
ret2[name] = ClusterUpdate{}
|
||||
case map[string]EndpointsUpdate:
|
||||
ret2[name] = ClusterUpdateErrTuple{Err: err}
|
||||
case map[string]EndpointsUpdateErrTuple:
|
||||
name, update, err := unmarshalEndpointsResource(r, logger)
|
||||
if err == nil {
|
||||
ret2[name] = update
|
||||
ret2[name] = EndpointsUpdateErrTuple{Update: update}
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
|
@ -1161,7 +1197,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog
|
|||
perResourceErrors[name] = err
|
||||
// Add place holder in the map so we know this resource name was in
|
||||
// the response.
|
||||
ret2[name] = EndpointsUpdate{}
|
||||
ret2[name] = EndpointsUpdateErrTuple{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue