mirror of https://github.com/grpc/grpc-go.git
xds: split routing balancer into config selector + cluster manager (#4083)
This commit is contained in:
parent
17e2cbe887
commit
4cd551eca7
|
|
@ -21,7 +21,7 @@ package balancer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer
|
||||||
|
_ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/xdsrouting" // Register the xds_routing balancer
|
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package clustermanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
@ -47,8 +47,6 @@ type balancerStateAggregator struct {
|
||||||
logger *grpclog.PrefixLogger
|
logger *grpclog.PrefixLogger
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// routes, one for each matcher.
|
|
||||||
routes []route
|
|
||||||
// If started is false, no updates should be sent to the parent cc. A closed
|
// If started is false, no updates should be sent to the parent cc. A closed
|
||||||
// sub-balancer could still send pickers to this aggregator. This makes sure
|
// sub-balancer could still send pickers to this aggregator. This makes sure
|
||||||
// that no updates will be forwarded to parent when the whole balancer group
|
// that no updates will be forwarded to parent when the whole balancer group
|
||||||
|
|
@ -71,29 +69,29 @@ func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLo
|
||||||
|
|
||||||
// Start starts the aggregator. It can be called after Close to restart the
|
// Start starts the aggregator. It can be called after Close to restart the
|
||||||
// aggretator.
|
// aggretator.
|
||||||
func (rbsa *balancerStateAggregator) start() {
|
func (bsa *balancerStateAggregator) start() {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
rbsa.started = true
|
bsa.started = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the aggregator. When the aggregator is closed, it won't call
|
// Close closes the aggregator. When the aggregator is closed, it won't call
|
||||||
// parent ClientConn to update balancer state.
|
// parent ClientConn to update balancer state.
|
||||||
func (rbsa *balancerStateAggregator) close() {
|
func (bsa *balancerStateAggregator) close() {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
rbsa.started = false
|
bsa.started = false
|
||||||
rbsa.clearStates()
|
bsa.clearStates()
|
||||||
}
|
}
|
||||||
|
|
||||||
// add adds a sub-balancer state with weight. It adds a place holder, and waits
|
// add adds a sub-balancer state with weight. It adds a place holder, and waits
|
||||||
// for the real sub-balancer to update state.
|
// for the real sub-balancer to update state.
|
||||||
//
|
//
|
||||||
// This is called when there's a new action.
|
// This is called when there's a new child.
|
||||||
func (rbsa *balancerStateAggregator) add(id string) {
|
func (bsa *balancerStateAggregator) add(id string) {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
rbsa.idToPickerState[id] = &subBalancerState{
|
bsa.idToPickerState[id] = &subBalancerState{
|
||||||
// Start everything in CONNECTING, so if one of the sub-balancers
|
// Start everything in CONNECTING, so if one of the sub-balancers
|
||||||
// reports TransientFailure, the RPCs will still wait for the other
|
// reports TransientFailure, the RPCs will still wait for the other
|
||||||
// sub-balancers.
|
// sub-balancers.
|
||||||
|
|
@ -108,35 +106,26 @@ func (rbsa *balancerStateAggregator) add(id string) {
|
||||||
// remove removes the sub-balancer state. Future updates from this sub-balancer,
|
// remove removes the sub-balancer state. Future updates from this sub-balancer,
|
||||||
// if any, will be ignored.
|
// if any, will be ignored.
|
||||||
//
|
//
|
||||||
// This is called when an action is removed.
|
// This is called when a child is removed.
|
||||||
func (rbsa *balancerStateAggregator) remove(id string) {
|
func (bsa *balancerStateAggregator) remove(id string) {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
if _, ok := rbsa.idToPickerState[id]; !ok {
|
if _, ok := bsa.idToPickerState[id]; !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Remove id and picker from picker map. This also results in future updates
|
// Remove id and picker from picker map. This also results in future updates
|
||||||
// for this ID to be ignored.
|
// for this ID to be ignored.
|
||||||
delete(rbsa.idToPickerState, id)
|
delete(bsa.idToPickerState, id)
|
||||||
}
|
|
||||||
|
|
||||||
// updateRoutes updates the routes. Note that it doesn't trigger an update to
|
|
||||||
// the parent ClientConn. The caller should decide when it's necessary, and call
|
|
||||||
// buildAndUpdate.
|
|
||||||
func (rbsa *balancerStateAggregator) updateRoutes(newRoutes []route) {
|
|
||||||
rbsa.mu.Lock()
|
|
||||||
defer rbsa.mu.Unlock()
|
|
||||||
rbsa.routes = newRoutes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateState is called to report a balancer state change from sub-balancer.
|
// UpdateState is called to report a balancer state change from sub-balancer.
|
||||||
// It's usually called by the balancer group.
|
// It's usually called by the balancer group.
|
||||||
//
|
//
|
||||||
// It calls parent ClientConn's UpdateState with the new aggregated state.
|
// It calls parent ClientConn's UpdateState with the new aggregated state.
|
||||||
func (rbsa *balancerStateAggregator) UpdateState(id string, state balancer.State) {
|
func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
pickerSt, ok := rbsa.idToPickerState[id]
|
pickerSt, ok := bsa.idToPickerState[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
// All state starts with an entry in pickStateMap. If ID is not in map,
|
// All state starts with an entry in pickStateMap. If ID is not in map,
|
||||||
// it's either removed, or never existed.
|
// it's either removed, or never existed.
|
||||||
|
|
@ -151,18 +140,18 @@ func (rbsa *balancerStateAggregator) UpdateState(id string, state balancer.State
|
||||||
}
|
}
|
||||||
pickerSt.state = state
|
pickerSt.state = state
|
||||||
|
|
||||||
if !rbsa.started {
|
if !bsa.started {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rbsa.cc.UpdateState(rbsa.build())
|
bsa.cc.UpdateState(bsa.build())
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearState Reset everything to init state (Connecting) but keep the entry in
|
// clearState Reset everything to init state (Connecting) but keep the entry in
|
||||||
// map (to keep the weight).
|
// map (to keep the weight).
|
||||||
//
|
//
|
||||||
// Caller must hold rbsa.mu.
|
// Caller must hold bsa.mu.
|
||||||
func (rbsa *balancerStateAggregator) clearStates() {
|
func (bsa *balancerStateAggregator) clearStates() {
|
||||||
for _, pState := range rbsa.idToPickerState {
|
for _, pState := range bsa.idToPickerState {
|
||||||
pState.state = balancer.State{
|
pState.state = balancer.State{
|
||||||
ConnectivityState: connectivity.Connecting,
|
ConnectivityState: connectivity.Connecting,
|
||||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||||
|
|
@ -173,19 +162,19 @@ func (rbsa *balancerStateAggregator) clearStates() {
|
||||||
|
|
||||||
// buildAndUpdate combines the sub-state from each sub-balancer into one state,
|
// buildAndUpdate combines the sub-state from each sub-balancer into one state,
|
||||||
// and update it to parent ClientConn.
|
// and update it to parent ClientConn.
|
||||||
func (rbsa *balancerStateAggregator) buildAndUpdate() {
|
func (bsa *balancerStateAggregator) buildAndUpdate() {
|
||||||
rbsa.mu.Lock()
|
bsa.mu.Lock()
|
||||||
defer rbsa.mu.Unlock()
|
defer bsa.mu.Unlock()
|
||||||
if !rbsa.started {
|
if !bsa.started {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rbsa.cc.UpdateState(rbsa.build())
|
bsa.cc.UpdateState(bsa.build())
|
||||||
}
|
}
|
||||||
|
|
||||||
// build combines sub-states into one. The picker will do routing pick.
|
// build combines sub-states into one. The picker will do a child pick.
|
||||||
//
|
//
|
||||||
// Caller must hold rbsa.mu.
|
// Caller must hold bsa.mu.
|
||||||
func (rbsa *balancerStateAggregator) build() balancer.State {
|
func (bsa *balancerStateAggregator) build() balancer.State {
|
||||||
// TODO: the majority of this function (and UpdateState) is exactly the same
|
// TODO: the majority of this function (and UpdateState) is exactly the same
|
||||||
// as weighted_target's state aggregator. Try to make a general utility
|
// as weighted_target's state aggregator. Try to make a general utility
|
||||||
// function/struct to handle the logic.
|
// function/struct to handle the logic.
|
||||||
|
|
@ -195,7 +184,7 @@ func (rbsa *balancerStateAggregator) build() balancer.State {
|
||||||
// function to calculate the aggregated connectivity state as in this
|
// function to calculate the aggregated connectivity state as in this
|
||||||
// function.
|
// function.
|
||||||
var readyN, connectingN int
|
var readyN, connectingN int
|
||||||
for _, ps := range rbsa.idToPickerState {
|
for _, ps := range bsa.idToPickerState {
|
||||||
switch ps.stateToAggregate {
|
switch ps.stateToAggregate {
|
||||||
case connectivity.Ready:
|
case connectivity.Ready:
|
||||||
readyN++
|
readyN++
|
||||||
|
|
@ -214,13 +203,13 @@ func (rbsa *balancerStateAggregator) build() balancer.State {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The picker's return error might not be consistent with the
|
// The picker's return error might not be consistent with the
|
||||||
// aggregatedState. Because for routing, we want to always build picker with
|
// aggregatedState. Because for this LB policy, we want to always build
|
||||||
// all sub-pickers (not even ready sub-pickers), so even if the overall
|
// picker with all sub-pickers (not only ready sub-pickers), so even if the
|
||||||
// state is Ready, pick for certain RPCs can behave like Connecting or
|
// overall state is Ready, pick for certain RPCs can behave like Connecting
|
||||||
// TransientFailure.
|
// or TransientFailure.
|
||||||
rbsa.logger.Infof("Child pickers with routes: %s, actions: %+v", rbsa.routes, rbsa.idToPickerState)
|
bsa.logger.Infof("Child pickers: %+v", bsa.idToPickerState)
|
||||||
return balancer.State{
|
return balancer.State{
|
||||||
ConnectivityState: aggregatedState,
|
ConnectivityState: aggregatedState,
|
||||||
Picker: newPickerGroup(rbsa.routes, rbsa.idToPickerState),
|
Picker: newPickerGroup(bsa.idToPickerState),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -0,0 +1,143 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package clustermanager implements the cluster manager LB policy for xds.
|
||||||
|
package clustermanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/hierarchy"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
"google.golang.org/grpc/serviceconfig"
|
||||||
|
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const balancerName = "xds_cluster_manager_experimental"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
balancer.Register(builder{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct{}
|
||||||
|
|
||||||
|
func (builder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
|
||||||
|
b := &bal{}
|
||||||
|
b.logger = prefixLogger(b)
|
||||||
|
b.stateAggregator = newBalancerStateAggregator(cc, b.logger)
|
||||||
|
b.stateAggregator.start()
|
||||||
|
b.bg = balancergroup.New(cc, b.stateAggregator, nil, b.logger)
|
||||||
|
b.bg.Start()
|
||||||
|
b.logger.Infof("Created")
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (builder) Name() string {
|
||||||
|
return balancerName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (builder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||||
|
return parseConfig(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bal struct {
|
||||||
|
logger *internalgrpclog.PrefixLogger
|
||||||
|
|
||||||
|
// TODO: make this package not dependent on xds specific code. Same as for
|
||||||
|
// weighted target balancer.
|
||||||
|
bg *balancergroup.BalancerGroup
|
||||||
|
stateAggregator *balancerStateAggregator
|
||||||
|
|
||||||
|
children map[string]childConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) {
|
||||||
|
update := false
|
||||||
|
addressesSplit := hierarchy.Group(s.ResolverState.Addresses)
|
||||||
|
|
||||||
|
// Remove sub-pickers and sub-balancers that are not in the new cluster list.
|
||||||
|
for name := range b.children {
|
||||||
|
if _, ok := newConfig.Children[name]; !ok {
|
||||||
|
b.stateAggregator.remove(name)
|
||||||
|
b.bg.Remove(name)
|
||||||
|
update = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For sub-balancers in the new cluster list,
|
||||||
|
// - add to balancer group if it's new,
|
||||||
|
// - forward the address/balancer config update.
|
||||||
|
for name, newT := range newConfig.Children {
|
||||||
|
if _, ok := b.children[name]; !ok {
|
||||||
|
// If this is a new sub-balancer, add it to the picker map.
|
||||||
|
b.stateAggregator.add(name)
|
||||||
|
// Then add to the balancer group.
|
||||||
|
b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name))
|
||||||
|
}
|
||||||
|
// TODO: handle error? How to aggregate errors and return?
|
||||||
|
_ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{
|
||||||
|
ResolverState: resolver.State{
|
||||||
|
Addresses: addressesSplit[name],
|
||||||
|
ServiceConfig: s.ResolverState.ServiceConfig,
|
||||||
|
Attributes: s.ResolverState.Attributes,
|
||||||
|
},
|
||||||
|
BalancerConfig: newT.ChildPolicy.Config,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
b.children = newConfig.Children
|
||||||
|
if update {
|
||||||
|
b.stateAggregator.buildAndUpdate()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
|
newConfig, ok := s.BalancerConfig.(*lbConfig)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
|
||||||
|
}
|
||||||
|
b.logger.Infof("update with config %+v, resolver state %+v", s.BalancerConfig, s.ResolverState)
|
||||||
|
|
||||||
|
b.updateChildren(s, newConfig)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bal) ResolverError(err error) {
|
||||||
|
b.bg.ResolverError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
b.bg.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bal) Close() {
|
||||||
|
b.stateAggregator.close()
|
||||||
|
b.bg.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
const prefix = "[xds-cluster-manager-lb %p] "
|
||||||
|
|
||||||
|
var logger = grpclog.Component("xds")
|
||||||
|
|
||||||
|
func prefixLogger(p *bal) *internalgrpclog.PrefixLogger {
|
||||||
|
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p))
|
||||||
|
}
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package clustermanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
@ -27,11 +27,12 @@ import (
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/internal/grpctest"
|
"google.golang.org/grpc/internal/grpctest"
|
||||||
"google.golang.org/grpc/internal/hierarchy"
|
"google.golang.org/grpc/internal/hierarchy"
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
||||||
"google.golang.org/grpc/xds/internal/testutils"
|
"google.golang.org/grpc/xds/internal/testutils"
|
||||||
)
|
)
|
||||||
|
|
@ -94,7 +95,7 @@ func init() {
|
||||||
for i := 0; i < testBackendAddrsCount; i++ {
|
for i := 0; i < testBackendAddrsCount; i++ {
|
||||||
testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
|
testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
|
||||||
}
|
}
|
||||||
rtBuilder = balancer.Get(xdsRoutingName)
|
rtBuilder = balancer.Get(balancerName)
|
||||||
rtParser = rtBuilder.(balancer.ConfigParser)
|
rtParser = rtBuilder.(balancer.ConfigParser)
|
||||||
|
|
||||||
balancer.Register(&ignoreAttrsRRBuilder{balancer.Get(roundrobin.Name)})
|
balancer.Register(&ignoreAttrsRRBuilder{balancer.Get(roundrobin.Name)})
|
||||||
|
|
@ -106,7 +107,7 @@ func testPick(t *testing.T, p balancer.Picker, info balancer.PickInfo, wantSC ba
|
||||||
t.Helper()
|
t.Helper()
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
gotSCSt, err := p.Pick(info)
|
gotSCSt, err := p.Pick(info)
|
||||||
if err != wantErr {
|
if fmt.Sprint(err) != fmt.Sprint(wantErr) {
|
||||||
t.Fatalf("picker.Pick(%+v), got error %v, want %v", info, err, wantErr)
|
t.Fatalf("picker.Pick(%+v), got error %v, want %v", info, err, wantErr)
|
||||||
}
|
}
|
||||||
if !cmp.Equal(gotSCSt.SubConn, wantSC, cmp.AllowUnexported(testutils.TestSubConn{})) {
|
if !cmp.Equal(gotSCSt.SubConn, wantSC, cmp.AllowUnexported(testutils.TestSubConn{})) {
|
||||||
|
|
@ -115,19 +116,15 @@ func testPick(t *testing.T, p balancer.Picker, info balancer.PickInfo, wantSC ba
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouting(t *testing.T) {
|
func TestClusterPicks(t *testing.T) {
|
||||||
cc := testutils.NewTestClientConn(t)
|
cc := testutils.NewTestClientConn(t)
|
||||||
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
||||||
|
|
||||||
configJSON1 := `{
|
configJSON1 := `{
|
||||||
"Action": {
|
"children": {
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
||||||
},
|
}
|
||||||
"Route": [
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"prefix":"", "headers":[{"name":"header-1", "exactMatch":"value-1"}], "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
}`
|
||||||
|
|
||||||
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
||||||
|
|
@ -173,212 +170,39 @@ func TestRouting(t *testing.T) {
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"),
|
||||||
wantErr: nil,
|
},
|
||||||
},
|
wantSC: m1[wantAddrs[0]],
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{
|
pickInfo: balancer.PickInfo{
|
||||||
FullMethodName: "/z/y",
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"),
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("header-1", "value-1")),
|
|
||||||
},
|
},
|
||||||
wantSC: m1[wantAddrs[1]],
|
wantSC: m1[wantAddrs[1]],
|
||||||
wantErr: nil,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{
|
pickInfo: balancer.PickInfo{
|
||||||
FullMethodName: "/z/y",
|
Ctx: SetPickedCluster(context.Background(), "notacluster"),
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("h", "v")),
|
|
||||||
},
|
},
|
||||||
wantSC: nil,
|
wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "notacluster"`),
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRoutingConfigUpdateAddRoute covers the cases the routing balancer
|
// TestConfigUpdateAddCluster covers the cases the balancer receives config
|
||||||
// receives config update with extra route, but the same actions.
|
// update with extra clusters.
|
||||||
func TestRoutingConfigUpdateAddRoute(t *testing.T) {
|
func TestConfigUpdateAddCluster(t *testing.T) {
|
||||||
cc := testutils.NewTestClientConn(t)
|
cc := testutils.NewTestClientConn(t)
|
||||||
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
||||||
|
|
||||||
configJSON1 := `{
|
configJSON1 := `{
|
||||||
"Action": {
|
"children": {
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
||||||
},
|
|
||||||
"Route": [
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"path":"/z/y", "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
|
||||||
|
|
||||||
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to parse balancer config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the config, and an address with hierarchy path ["cluster_1"].
|
|
||||||
wantAddrs := []resolver.Address{
|
|
||||||
{Addr: testBackendAddrStrs[0], Attributes: nil},
|
|
||||||
{Addr: testBackendAddrStrs[1], Attributes: nil},
|
|
||||||
}
|
|
||||||
if err := rtb.UpdateClientConnState(balancer.ClientConnState{
|
|
||||||
ResolverState: resolver.State{Addresses: []resolver.Address{
|
|
||||||
hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}),
|
|
||||||
hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}),
|
|
||||||
}},
|
|
||||||
BalancerConfig: config1,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("failed to update ClientConn state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m1 := make(map[resolver.Address]balancer.SubConn)
|
|
||||||
// Verify that a subconn is created with the address, and the hierarchy path
|
|
||||||
// in the address is cleared.
|
|
||||||
for range wantAddrs {
|
|
||||||
addrs := <-cc.NewSubConnAddrsCh
|
|
||||||
if len(hierarchy.Get(addrs[0])) != 0 {
|
|
||||||
t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes)
|
|
||||||
}
|
|
||||||
sc := <-cc.NewSubConnCh
|
|
||||||
// Clear the attributes before adding to map.
|
|
||||||
addrs[0].Attributes = nil
|
|
||||||
m1[addrs[0]] = sc
|
|
||||||
rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
|
|
||||||
rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
|
||||||
}
|
|
||||||
|
|
||||||
p1 := <-cc.NewPickerCh
|
|
||||||
for _, tt := range []struct {
|
|
||||||
pickInfo balancer.PickInfo
|
|
||||||
wantSC balancer.SubConn
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
|
||||||
wantSC: m1[wantAddrs[1]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/c/d"},
|
|
||||||
wantSC: nil,
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A config update with different routes, but the same actions. Expect a
|
|
||||||
// picker update, but no subconn changes.
|
|
||||||
configJSON2 := `{
|
|
||||||
"Action": {
|
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
|
||||||
},
|
|
||||||
"Route": [
|
|
||||||
{"prefix":"", "headers":[{"name":"header-1", "presentMatch":true}], "action":"cds:cluster_2"},
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"path":"/z/y", "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
|
||||||
config2, err := rtParser.ParseConfig([]byte(configJSON2))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to parse balancer config: %v", err)
|
|
||||||
}
|
|
||||||
// Send update with the same addresses.
|
|
||||||
if err := rtb.UpdateClientConnState(balancer.ClientConnState{
|
|
||||||
ResolverState: resolver.State{Addresses: []resolver.Address{
|
|
||||||
hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}),
|
|
||||||
hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}),
|
|
||||||
}},
|
|
||||||
BalancerConfig: config2,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("failed to update ClientConn state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New change to actions, expect no newSubConn.
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Millisecond * 500):
|
|
||||||
case <-cc.NewSubConnCh:
|
|
||||||
addrs := <-cc.NewSubConnAddrsCh
|
|
||||||
t.Fatalf("unexpected NewSubConn with address %v", addrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
p2 := <-cc.NewPickerCh
|
|
||||||
for _, tt := range []struct {
|
|
||||||
pickInfo balancer.PickInfo
|
|
||||||
wantSC balancer.SubConn
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
|
||||||
wantSC: m1[wantAddrs[1]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{
|
|
||||||
FullMethodName: "/a/z",
|
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("header-1", "value-1")),
|
|
||||||
},
|
|
||||||
wantSC: m1[wantAddrs[1]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{
|
|
||||||
FullMethodName: "/c/d",
|
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("h", "v")),
|
|
||||||
},
|
|
||||||
wantSC: nil,
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
testPick(t, p2, tt.pickInfo, tt.wantSC, tt.wantErr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRoutingConfigUpdateAddRouteAndAction covers the cases the routing
|
|
||||||
// balancer receives config update with extra route and actions.
|
|
||||||
func TestRoutingConfigUpdateAddRouteAndAction(t *testing.T) {
|
|
||||||
cc := testutils.NewTestClientConn(t)
|
|
||||||
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
|
||||||
|
|
||||||
configJSON1 := `{
|
|
||||||
"Action": {
|
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
|
||||||
},
|
|
||||||
"Route": [
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"path":"/z/y", "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
}`
|
||||||
|
|
||||||
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
||||||
|
|
@ -424,24 +248,22 @@ func TestRoutingConfigUpdateAddRouteAndAction(t *testing.T) {
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m1[wantAddrs[0]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m1[wantAddrs[1]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[1]],
|
Ctx: SetPickedCluster(context.Background(), "cds:notacluster"),
|
||||||
wantErr: nil,
|
},
|
||||||
},
|
wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/c/d"},
|
|
||||||
wantSC: nil,
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
||||||
|
|
@ -450,16 +272,11 @@ func TestRoutingConfigUpdateAddRouteAndAction(t *testing.T) {
|
||||||
// A config update with different routes, and different actions. Expect a
|
// A config update with different routes, and different actions. Expect a
|
||||||
// new subconn and a picker update.
|
// new subconn and a picker update.
|
||||||
configJSON2 := `{
|
configJSON2 := `{
|
||||||
"Action": {
|
"children": {
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
||||||
"cds:cluster_3":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
"cds:cluster_3":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
||||||
},
|
}
|
||||||
"Route": [
|
|
||||||
{"prefix":"", "headers":[{"name":"header-1", "presentMatch":false, "invertMatch":true}], "action":"cds:cluster_3"},
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"path":"/z/y", "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
}`
|
||||||
config2, err := rtParser.ParseConfig([]byte(configJSON2))
|
config2, err := rtParser.ParseConfig([]byte(configJSON2))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -504,56 +321,45 @@ func TestRoutingConfigUpdateAddRouteAndAction(t *testing.T) {
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"),
|
||||||
wantErr: nil,
|
},
|
||||||
},
|
wantSC: m1[wantAddrs[0]],
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
|
||||||
wantSC: m1[wantAddrs[0]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
|
||||||
wantSC: m1[wantAddrs[1]],
|
|
||||||
wantErr: nil,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{
|
pickInfo: balancer.PickInfo{
|
||||||
FullMethodName: "/a/z",
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"),
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("header-1", "value-1")),
|
|
||||||
},
|
},
|
||||||
wantSC: m1[wantAddrs[2]],
|
wantSC: m1[wantAddrs[1]],
|
||||||
wantErr: nil,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{
|
pickInfo: balancer.PickInfo{
|
||||||
FullMethodName: "/c/d",
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_3"),
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("h", "v")),
|
|
||||||
},
|
},
|
||||||
wantSC: nil,
|
wantSC: m1[wantAddrs[2]],
|
||||||
wantErr: errNoMatchedRouteFound,
|
},
|
||||||
|
{
|
||||||
|
pickInfo: balancer.PickInfo{
|
||||||
|
Ctx: SetPickedCluster(context.Background(), "cds:notacluster"),
|
||||||
|
},
|
||||||
|
wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testPick(t, p2, tt.pickInfo, tt.wantSC, tt.wantErr)
|
testPick(t, p2, tt.pickInfo, tt.wantSC, tt.wantErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRoutingConfigUpdateDeleteAll covers the cases the routing balancer receives config
|
// TestRoutingConfigUpdateDeleteAll covers the cases the balancer receives
|
||||||
// update with no routes. Pick should fail with details in error.
|
// config update with no clusters. Pick should fail with details in error.
|
||||||
func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
||||||
cc := testutils.NewTestClientConn(t)
|
cc := testutils.NewTestClientConn(t)
|
||||||
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
rtb := rtBuilder.Build(cc, balancer.BuildOptions{})
|
||||||
|
|
||||||
configJSON1 := `{
|
configJSON1 := `{
|
||||||
"Action": {
|
"children": {
|
||||||
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
"cds:cluster_1":{ "childPolicy": [{"ignore_attrs_round_robin":""}] },
|
||||||
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
"cds:cluster_2":{ "childPolicy": [{"ignore_attrs_round_robin":""}] }
|
||||||
},
|
}
|
||||||
"Route": [
|
|
||||||
{"prefix":"/a/", "action":"cds:cluster_1"},
|
|
||||||
{"path":"/z/y", "action":"cds:cluster_2"}
|
|
||||||
]
|
|
||||||
}`
|
}`
|
||||||
|
|
||||||
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
config1, err := rtParser.ParseConfig([]byte(configJSON1))
|
||||||
|
|
@ -599,30 +405,28 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m1[wantAddrs[0]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m1[wantAddrs[1]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m1[wantAddrs[1]],
|
Ctx: SetPickedCluster(context.Background(), "cds:notacluster"),
|
||||||
wantErr: nil,
|
},
|
||||||
},
|
wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/c/d"},
|
|
||||||
wantSC: nil,
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A config update with no routes.
|
// A config update with no clusters.
|
||||||
configJSON2 := `{}`
|
configJSON2 := `{}`
|
||||||
config2, err := rtParser.ParseConfig([]byte(configJSON2))
|
config2, err := rtParser.ParseConfig([]byte(configJSON2))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -634,7 +438,7 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
||||||
t.Fatalf("failed to update ClientConn state: %v", err)
|
t.Fatalf("failed to update ClientConn state: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expect two remove subconn.
|
// Expect two removed subconns.
|
||||||
for range wantAddrs {
|
for range wantAddrs {
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Millisecond * 500):
|
case <-time.After(time.Millisecond * 500):
|
||||||
|
|
@ -645,13 +449,13 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
||||||
|
|
||||||
p2 := <-cc.NewPickerCh
|
p2 := <-cc.NewPickerCh
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
gotSCSt, err := p2.Pick(balancer.PickInfo{})
|
gotSCSt, err := p2.Pick(balancer.PickInfo{Ctx: SetPickedCluster(context.Background(), "cds:notacluster")})
|
||||||
if err != errNoMatchedRouteFound {
|
if fmt.Sprint(err) != status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`).Error() {
|
||||||
t.Fatalf("picker.Pick, got %v, %v, want error %v", gotSCSt, err, errNoMatchedRouteFound)
|
t.Fatalf("picker.Pick, got %v, %v, want error %v", gotSCSt, err, `unknown cluster selected for RPC: "cds:notacluster"`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resend the previous config with routes and actions.
|
// Resend the previous config with clusters
|
||||||
if err := rtb.UpdateClientConnState(balancer.ClientConnState{
|
if err := rtb.UpdateClientConnState(balancer.ClientConnState{
|
||||||
ResolverState: resolver.State{Addresses: []resolver.Address{
|
ResolverState: resolver.State{Addresses: []resolver.Address{
|
||||||
hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}),
|
hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}),
|
||||||
|
|
@ -685,24 +489,22 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/0"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m2[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m2[wantAddrs[0]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/a/1"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m2[wantAddrs[0]],
|
Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"),
|
||||||
wantErr: nil,
|
},
|
||||||
|
wantSC: m2[wantAddrs[1]],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/z/y"},
|
pickInfo: balancer.PickInfo{
|
||||||
wantSC: m2[wantAddrs[1]],
|
Ctx: SetPickedCluster(context.Background(), "cds:notacluster"),
|
||||||
wantErr: nil,
|
},
|
||||||
},
|
wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
|
||||||
{
|
|
||||||
pickInfo: balancer.PickInfo{FullMethodName: "/c/d"},
|
|
||||||
wantSC: nil,
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testPick(t, p3, tt.pickInfo, tt.wantSC, tt.wantErr)
|
testPick(t, p3, tt.pickInfo, tt.wantSC, tt.wantErr)
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustermanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
||||||
|
"google.golang.org/grpc/serviceconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
type childConfig struct {
|
||||||
|
// ChildPolicy is the child policy and it's config.
|
||||||
|
ChildPolicy *internalserviceconfig.BalancerConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// lbConfig is the balancer config for xds routing policy.
|
||||||
|
type lbConfig struct {
|
||||||
|
serviceconfig.LoadBalancingConfig
|
||||||
|
Children map[string]childConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseConfig(c json.RawMessage) (*lbConfig, error) {
|
||||||
|
cfg := &lbConfig{}
|
||||||
|
if err := json.Unmarshal(c, cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,144 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustermanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
||||||
|
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer"
|
||||||
|
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
testJSONConfig = `{
|
||||||
|
"children":{
|
||||||
|
"cds:cluster_1":{
|
||||||
|
"childPolicy":[{
|
||||||
|
"cds_experimental":{"cluster":"cluster_1"}
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"weighted:cluster_1_cluster_2_1":{
|
||||||
|
"childPolicy":[{
|
||||||
|
"weighted_target_experimental":{
|
||||||
|
"targets": {
|
||||||
|
"cluster_1" : {
|
||||||
|
"weight":75,
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
||||||
|
},
|
||||||
|
"cluster_2" : {
|
||||||
|
"weight":25,
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"weighted:cluster_1_cluster_3_1":{
|
||||||
|
"childPolicy":[{
|
||||||
|
"weighted_target_experimental":{
|
||||||
|
"targets": {
|
||||||
|
"cluster_1": {
|
||||||
|
"weight":99,
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
||||||
|
},
|
||||||
|
"cluster_3": {
|
||||||
|
"weight":1,
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_3"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
cdsName = "cds_experimental"
|
||||||
|
wtName = "weighted_target_experimental"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cdsConfigParser = balancer.Get(cdsName).(balancer.ConfigParser)
|
||||||
|
cdsConfigJSON1 = `{"cluster":"cluster_1"}`
|
||||||
|
cdsConfig1, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON1))
|
||||||
|
|
||||||
|
wtConfigParser = balancer.Get(wtName).(balancer.ConfigParser)
|
||||||
|
wtConfigJSON1 = `{
|
||||||
|
"targets": {
|
||||||
|
"cluster_1" : { "weight":75, "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] },
|
||||||
|
"cluster_2" : { "weight":25, "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] }
|
||||||
|
} }`
|
||||||
|
wtConfig1, _ = wtConfigParser.ParseConfig([]byte(wtConfigJSON1))
|
||||||
|
wtConfigJSON2 = `{
|
||||||
|
"targets": {
|
||||||
|
"cluster_1": { "weight":99, "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] },
|
||||||
|
"cluster_3": { "weight":1, "childPolicy":[{"cds_experimental":{"cluster":"cluster_3"}}] }
|
||||||
|
} }`
|
||||||
|
wtConfig2, _ = wtConfigParser.ParseConfig([]byte(wtConfigJSON2))
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_parseConfig(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
js string
|
||||||
|
want *lbConfig
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty json",
|
||||||
|
js: "",
|
||||||
|
want: nil,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OK",
|
||||||
|
js: testJSONConfig,
|
||||||
|
want: &lbConfig{
|
||||||
|
Children: map[string]childConfig{
|
||||||
|
"cds:cluster_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
||||||
|
Name: cdsName, Config: cdsConfig1},
|
||||||
|
},
|
||||||
|
"weighted:cluster_1_cluster_2_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
||||||
|
Name: wtName, Config: wtConfig1},
|
||||||
|
},
|
||||||
|
"weighted:cluster_1_cluster_3_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
||||||
|
Name: wtName, Config: wtConfig2},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := parseConfig([]byte(tt.js))
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if d := cmp.Diff(got, tt.want, cmp.AllowUnexported(lbConfig{})); d != "" {
|
||||||
|
t.Errorf("parseConfig() got unexpected result, diff: %v", d)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -16,45 +16,55 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package clustermanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// pickerGroup contains a list of route matchers and their corresponding
|
// pickerGroup contains a list of pickers. If the picker isn't ready, the pick
|
||||||
// pickers. For each pick, the first matched picker is used. If the picker isn't
|
// will be queued.
|
||||||
// ready, the pick will be queued.
|
|
||||||
type pickerGroup struct {
|
type pickerGroup struct {
|
||||||
routes []route
|
|
||||||
pickers map[string]balancer.Picker
|
pickers map[string]balancer.Picker
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerGroup(routes []route, idToPickerState map[string]*subBalancerState) *pickerGroup {
|
func newPickerGroup(idToPickerState map[string]*subBalancerState) *pickerGroup {
|
||||||
pickers := make(map[string]balancer.Picker)
|
pickers := make(map[string]balancer.Picker)
|
||||||
for id, st := range idToPickerState {
|
for id, st := range idToPickerState {
|
||||||
pickers[id] = st.state.Picker
|
pickers[id] = st.state.Picker
|
||||||
}
|
}
|
||||||
return &pickerGroup{
|
return &pickerGroup{
|
||||||
routes: routes,
|
|
||||||
pickers: pickers,
|
pickers: pickers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
|
|
||||||
|
|
||||||
func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
for _, rt := range pg.routes {
|
cluster := getPickedCluster(info.Ctx)
|
||||||
if rt.m.match(info) {
|
if p := pg.pickers[cluster]; p != nil {
|
||||||
// action from route is the ID for the sub-balancer to use.
|
return p.Pick(info)
|
||||||
p, ok := pg.pickers[rt.action]
|
|
||||||
if !ok {
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
return p.Pick(info)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return balancer.PickResult{}, errNoMatchedRouteFound
|
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "unknown cluster selected for RPC: %q", cluster)
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterKey struct{}
|
||||||
|
|
||||||
|
func getPickedCluster(ctx context.Context) string {
|
||||||
|
cluster, _ := ctx.Value(clusterKey{}).(string)
|
||||||
|
return cluster
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPickedClusterForTesting returns the cluster in the context; to be used
|
||||||
|
// for testing only.
|
||||||
|
func GetPickedClusterForTesting(ctx context.Context) string {
|
||||||
|
return getPickedCluster(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPickedCluster adds the selected cluster to the context for the
|
||||||
|
// xds_cluster_manager LB policy to pick.
|
||||||
|
func SetPickedCluster(ctx context.Context, cluster string) context.Context {
|
||||||
|
return context.WithValue(ctx, clusterKey{}, cluster)
|
||||||
}
|
}
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package xdsrouting implements the routing balancer for xds.
|
|
||||||
package xdsrouting
|
|
||||||
|
|
@ -1,243 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package xdsrouting
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
|
||||||
"google.golang.org/grpc/internal/hierarchy"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
"google.golang.org/grpc/serviceconfig"
|
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
const xdsRoutingName = "xds_routing_experimental"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
balancer.Register(&routingBB{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type routingBB struct{}
|
|
||||||
|
|
||||||
func (rbb *routingBB) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
|
|
||||||
b := &routingBalancer{}
|
|
||||||
b.logger = prefixLogger(b)
|
|
||||||
b.stateAggregator = newBalancerStateAggregator(cc, b.logger)
|
|
||||||
b.stateAggregator.start()
|
|
||||||
b.bg = balancergroup.New(cc, b.stateAggregator, nil, b.logger)
|
|
||||||
b.bg.Start()
|
|
||||||
b.logger.Infof("Created")
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rbb *routingBB) Name() string {
|
|
||||||
return xdsRoutingName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rbb *routingBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
|
||||||
return parseConfig(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
type route struct {
|
|
||||||
action string
|
|
||||||
m *compositeMatcher
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r route) String() string {
|
|
||||||
return r.m.String() + "->" + r.action
|
|
||||||
}
|
|
||||||
|
|
||||||
type routingBalancer struct {
|
|
||||||
logger *grpclog.PrefixLogger
|
|
||||||
|
|
||||||
// TODO: make this package not dependent on xds specific code. Same as for
|
|
||||||
// weighted target balancer.
|
|
||||||
bg *balancergroup.BalancerGroup
|
|
||||||
stateAggregator *balancerStateAggregator
|
|
||||||
|
|
||||||
actions map[string]actionConfig
|
|
||||||
routes []route
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) updateActions(s balancer.ClientConnState, newConfig *lbConfig) (needRebuild bool) {
|
|
||||||
addressesSplit := hierarchy.Group(s.ResolverState.Addresses)
|
|
||||||
var rebuildStateAndPicker bool
|
|
||||||
|
|
||||||
// Remove sub-pickers and sub-balancers that are not in the new action list.
|
|
||||||
for name := range rb.actions {
|
|
||||||
if _, ok := newConfig.actions[name]; !ok {
|
|
||||||
rb.stateAggregator.remove(name)
|
|
||||||
rb.bg.Remove(name)
|
|
||||||
// Trigger a state/picker update, because we don't want `ClientConn`
|
|
||||||
// to pick this sub-balancer anymore.
|
|
||||||
rebuildStateAndPicker = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For sub-balancers in the new action list,
|
|
||||||
// - add to balancer group if it's new,
|
|
||||||
// - forward the address/balancer config update.
|
|
||||||
for name, newT := range newConfig.actions {
|
|
||||||
if _, ok := rb.actions[name]; !ok {
|
|
||||||
// If this is a new sub-balancer, add weights to the picker map.
|
|
||||||
rb.stateAggregator.add(name)
|
|
||||||
// Then add to the balancer group.
|
|
||||||
rb.bg.Add(name, balancer.Get(newT.ChildPolicy.Name))
|
|
||||||
// Not trigger a state/picker update. Wait for the new sub-balancer
|
|
||||||
// to send its updates.
|
|
||||||
}
|
|
||||||
// Forwards all the update:
|
|
||||||
// - Addresses are from the map after splitting with hierarchy path,
|
|
||||||
// - Top level service config and attributes are the same,
|
|
||||||
// - Balancer config comes from the targets map.
|
|
||||||
//
|
|
||||||
// TODO: handle error? How to aggregate errors and return?
|
|
||||||
_ = rb.bg.UpdateClientConnState(name, balancer.ClientConnState{
|
|
||||||
ResolverState: resolver.State{
|
|
||||||
Addresses: addressesSplit[name],
|
|
||||||
ServiceConfig: s.ResolverState.ServiceConfig,
|
|
||||||
Attributes: s.ResolverState.Attributes,
|
|
||||||
},
|
|
||||||
BalancerConfig: newT.ChildPolicy.Config,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
rb.actions = newConfig.actions
|
|
||||||
return rebuildStateAndPicker
|
|
||||||
}
|
|
||||||
|
|
||||||
func routeToMatcher(r routeConfig) (*compositeMatcher, error) {
|
|
||||||
var pathMatcher pathMatcherInterface
|
|
||||||
switch {
|
|
||||||
case r.regex != "":
|
|
||||||
re, err := regexp.Compile(r.regex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to compile regex %q", r.regex)
|
|
||||||
}
|
|
||||||
pathMatcher = newPathRegexMatcher(re)
|
|
||||||
case r.path != "":
|
|
||||||
pathMatcher = newPathExactMatcher(r.path, r.caseInsensitive)
|
|
||||||
default:
|
|
||||||
pathMatcher = newPathPrefixMatcher(r.prefix, r.caseInsensitive)
|
|
||||||
}
|
|
||||||
|
|
||||||
var headerMatchers []headerMatcherInterface
|
|
||||||
for _, h := range r.headers {
|
|
||||||
var matcherT headerMatcherInterface
|
|
||||||
switch {
|
|
||||||
case h.exactMatch != "":
|
|
||||||
matcherT = newHeaderExactMatcher(h.name, h.exactMatch)
|
|
||||||
case h.regexMatch != "":
|
|
||||||
re, err := regexp.Compile(h.regexMatch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to compile regex %q, skipping this matcher", h.regexMatch)
|
|
||||||
}
|
|
||||||
matcherT = newHeaderRegexMatcher(h.name, re)
|
|
||||||
case h.prefixMatch != "":
|
|
||||||
matcherT = newHeaderPrefixMatcher(h.name, h.prefixMatch)
|
|
||||||
case h.suffixMatch != "":
|
|
||||||
matcherT = newHeaderSuffixMatcher(h.name, h.suffixMatch)
|
|
||||||
case h.rangeMatch != nil:
|
|
||||||
matcherT = newHeaderRangeMatcher(h.name, h.rangeMatch.start, h.rangeMatch.end)
|
|
||||||
default:
|
|
||||||
matcherT = newHeaderPresentMatcher(h.name, h.presentMatch)
|
|
||||||
}
|
|
||||||
if h.invertMatch {
|
|
||||||
matcherT = newInvertMatcher(matcherT)
|
|
||||||
}
|
|
||||||
headerMatchers = append(headerMatchers, matcherT)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fractionMatcher *fractionMatcher
|
|
||||||
if r.fraction != nil {
|
|
||||||
fractionMatcher = newFractionMatcher(*r.fraction)
|
|
||||||
}
|
|
||||||
return newCompositeMatcher(pathMatcher, headerMatchers, fractionMatcher), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func routesEqual(a, b []route) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := range a {
|
|
||||||
aa := a[i]
|
|
||||||
bb := b[i]
|
|
||||||
if aa.action != bb.action {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !aa.m.equal(bb.m) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) updateRoutes(newConfig *lbConfig) (needRebuild bool, _ error) {
|
|
||||||
var newRoutes []route
|
|
||||||
for _, rt := range newConfig.routes {
|
|
||||||
newMatcher, err := routeToMatcher(rt)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
newRoutes = append(newRoutes, route{action: rt.action, m: newMatcher})
|
|
||||||
}
|
|
||||||
rebuildStateAndPicker := !routesEqual(newRoutes, rb.routes)
|
|
||||||
rb.routes = newRoutes
|
|
||||||
|
|
||||||
if rebuildStateAndPicker {
|
|
||||||
rb.stateAggregator.updateRoutes(rb.routes)
|
|
||||||
}
|
|
||||||
return rebuildStateAndPicker, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|
||||||
newConfig, ok := s.BalancerConfig.(*lbConfig)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
|
|
||||||
}
|
|
||||||
rb.logger.Infof("update with config %+v, resolver state %+v", s.BalancerConfig, s.ResolverState)
|
|
||||||
|
|
||||||
rebuildForActions := rb.updateActions(s, newConfig)
|
|
||||||
rebuildForRoutes, err := rb.updateRoutes(newConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("xds_routing balancer: failed to update routes: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rebuildForActions || rebuildForRoutes {
|
|
||||||
rb.stateAggregator.buildAndUpdate()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) ResolverError(err error) {
|
|
||||||
rb.bg.ResolverError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
|
||||||
rb.bg.UpdateSubConnState(sc, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *routingBalancer) Close() {
|
|
||||||
rb.stateAggregator.close()
|
|
||||||
rb.bg.Close()
|
|
||||||
}
|
|
||||||
|
|
@ -1,207 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package xdsrouting
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
|
||||||
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
|
||||||
"google.golang.org/grpc/serviceconfig"
|
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
type actionConfig struct {
|
|
||||||
// ChildPolicy is the child policy and it's config.
|
|
||||||
ChildPolicy *internalserviceconfig.BalancerConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
type int64Range struct {
|
|
||||||
start, end int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerMatcher struct {
|
|
||||||
name string
|
|
||||||
// matchSpecifiers
|
|
||||||
invertMatch bool
|
|
||||||
|
|
||||||
// At most one of the following has non-default value.
|
|
||||||
exactMatch, regexMatch, prefixMatch, suffixMatch string
|
|
||||||
rangeMatch *int64Range
|
|
||||||
presentMatch bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type routeConfig struct {
|
|
||||||
// Path, Prefix and Regex can have at most one set. This is guaranteed by
|
|
||||||
// config parsing.
|
|
||||||
path, prefix, regex string
|
|
||||||
// Indicates if prefix/path matching should be case insensitive. The default
|
|
||||||
// is false (case sensitive).
|
|
||||||
caseInsensitive bool
|
|
||||||
|
|
||||||
headers []headerMatcher
|
|
||||||
fraction *uint32
|
|
||||||
|
|
||||||
// Action is the name from the action list.
|
|
||||||
action string
|
|
||||||
}
|
|
||||||
|
|
||||||
// lbConfig is the balancer config for xds routing policy.
|
|
||||||
type lbConfig struct {
|
|
||||||
serviceconfig.LoadBalancingConfig
|
|
||||||
routes []routeConfig
|
|
||||||
actions map[string]actionConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following structs with `JSON` in name are temporary structs to unmarshal
|
|
||||||
// json into. The fields will be read into lbConfig, to be used by the balancer.
|
|
||||||
|
|
||||||
// routeJSON is temporary struct for json unmarshal.
|
|
||||||
type routeJSON struct {
|
|
||||||
// Path, Prefix and Regex can have at most one non-nil.
|
|
||||||
Path, Prefix, Regex *string
|
|
||||||
CaseInsensitive bool
|
|
||||||
// Zero or more header matchers.
|
|
||||||
Headers []*xdsclient.HeaderMatcher
|
|
||||||
MatchFraction *wrapperspb.UInt32Value
|
|
||||||
// Action is the name from the action list.
|
|
||||||
Action string
|
|
||||||
}
|
|
||||||
|
|
||||||
// lbConfigJSON is temporary struct for json unmarshal.
|
|
||||||
type lbConfigJSON struct {
|
|
||||||
Route []routeJSON
|
|
||||||
Action map[string]actionConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (jc lbConfigJSON) toLBConfig() *lbConfig {
|
|
||||||
var ret lbConfig
|
|
||||||
for _, r := range jc.Route {
|
|
||||||
var tempR routeConfig
|
|
||||||
switch {
|
|
||||||
case r.Path != nil:
|
|
||||||
tempR.path = *r.Path
|
|
||||||
case r.Prefix != nil:
|
|
||||||
tempR.prefix = *r.Prefix
|
|
||||||
case r.Regex != nil:
|
|
||||||
tempR.regex = *r.Regex
|
|
||||||
}
|
|
||||||
tempR.caseInsensitive = r.CaseInsensitive
|
|
||||||
for _, h := range r.Headers {
|
|
||||||
var tempHeader headerMatcher
|
|
||||||
switch {
|
|
||||||
case h.ExactMatch != nil:
|
|
||||||
tempHeader.exactMatch = *h.ExactMatch
|
|
||||||
case h.RegexMatch != nil:
|
|
||||||
tempHeader.regexMatch = *h.RegexMatch
|
|
||||||
case h.PrefixMatch != nil:
|
|
||||||
tempHeader.prefixMatch = *h.PrefixMatch
|
|
||||||
case h.SuffixMatch != nil:
|
|
||||||
tempHeader.suffixMatch = *h.SuffixMatch
|
|
||||||
case h.RangeMatch != nil:
|
|
||||||
tempHeader.rangeMatch = &int64Range{
|
|
||||||
start: h.RangeMatch.Start,
|
|
||||||
end: h.RangeMatch.End,
|
|
||||||
}
|
|
||||||
case h.PresentMatch != nil:
|
|
||||||
tempHeader.presentMatch = *h.PresentMatch
|
|
||||||
}
|
|
||||||
tempHeader.name = h.Name
|
|
||||||
if h.InvertMatch != nil {
|
|
||||||
tempHeader.invertMatch = *h.InvertMatch
|
|
||||||
}
|
|
||||||
tempR.headers = append(tempR.headers, tempHeader)
|
|
||||||
}
|
|
||||||
if r.MatchFraction != nil {
|
|
||||||
tempR.fraction = &r.MatchFraction.Value
|
|
||||||
}
|
|
||||||
tempR.action = r.Action
|
|
||||||
ret.routes = append(ret.routes, tempR)
|
|
||||||
}
|
|
||||||
ret.actions = jc.Action
|
|
||||||
return &ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseConfig(c json.RawMessage) (*lbConfig, error) {
|
|
||||||
var tempConfig lbConfigJSON
|
|
||||||
if err := json.Unmarshal(c, &tempConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// For each route:
|
|
||||||
// - at most one of path/prefix/regex.
|
|
||||||
// - action is in action list.
|
|
||||||
|
|
||||||
allRouteActions := make(map[string]bool)
|
|
||||||
for _, r := range tempConfig.Route {
|
|
||||||
var oneOfCount int
|
|
||||||
if r.Path != nil {
|
|
||||||
oneOfCount++
|
|
||||||
}
|
|
||||||
if r.Prefix != nil {
|
|
||||||
oneOfCount++
|
|
||||||
}
|
|
||||||
if r.Regex != nil {
|
|
||||||
oneOfCount++
|
|
||||||
}
|
|
||||||
if oneOfCount != 1 {
|
|
||||||
return nil, fmt.Errorf("%d (not exactly one) of path/prefix/regex is set in route %+v", oneOfCount, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range r.Headers {
|
|
||||||
var oneOfCountH int
|
|
||||||
if h.ExactMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if h.RegexMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if h.PrefixMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if h.SuffixMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if h.RangeMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if h.PresentMatch != nil {
|
|
||||||
oneOfCountH++
|
|
||||||
}
|
|
||||||
if oneOfCountH != 1 {
|
|
||||||
return nil, fmt.Errorf("%d (not exactly one) of header matcher specifier is set in route %+v", oneOfCountH, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := tempConfig.Action[r.Action]; !ok {
|
|
||||||
return nil, fmt.Errorf("action %q from route %+v is not found in action list", r.Action, r)
|
|
||||||
}
|
|
||||||
allRouteActions[r.Action] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that actions are used by at least one route.
|
|
||||||
for n := range tempConfig.Action {
|
|
||||||
if _, ok := allRouteActions[n]; !ok {
|
|
||||||
return nil, fmt.Errorf("action %q is not used by any route", n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tempConfig.toLBConfig(), nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,369 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package xdsrouting
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer"
|
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
testJSONConfig = `{
|
|
||||||
"action":{
|
|
||||||
"cds:cluster_1":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"cds_experimental":{"cluster":"cluster_1"}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"weighted:cluster_1_cluster_2_1":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental":{
|
|
||||||
"targets": {
|
|
||||||
"cluster_1" : {
|
|
||||||
"weight":75,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
|
||||||
},
|
|
||||||
"cluster_2" : {
|
|
||||||
"weight":25,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"weighted:cluster_1_cluster_3_1":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental":{
|
|
||||||
"targets": {
|
|
||||||
"cluster_1": {
|
|
||||||
"weight":99,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
|
||||||
},
|
|
||||||
"cluster_3": {
|
|
||||||
"weight":1,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_3"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
"route":[{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path":"/service_1/method_2",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"/service_2/method_1",
|
|
||||||
"action":"weighted:cluster_1_cluster_2_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"/service_2",
|
|
||||||
"action":"weighted:cluster_1_cluster_2_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"regex":"^/service_2/method_3$",
|
|
||||||
"action":"weighted:cluster_1_cluster_3_1"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
testJSONConfigWithAllMatchers = `{
|
|
||||||
"action":{
|
|
||||||
"cds:cluster_1":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"cds_experimental":{"cluster":"cluster_1"}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"cds:cluster_2":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"cds_experimental":{"cluster":"cluster_2"}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"cds:cluster_3":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"cds_experimental":{"cluster":"cluster_3"}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
"route":[{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"/service_2/method_1",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"regex":"^/service_2/method_3$",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "exactMatch":"value-1", "invertMatch":true}],
|
|
||||||
"action":"cds:cluster_2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "regexMatch":"^value-1$"}],
|
|
||||||
"action":"cds:cluster_2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "rangeMatch":{"start":-1, "end":7}}],
|
|
||||||
"action":"cds:cluster_3"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "presentMatch":true}],
|
|
||||||
"action":"cds:cluster_3"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "prefixMatch":"value-1"}],
|
|
||||||
"action":"cds:cluster_2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "suffixMatch":"value-1"}],
|
|
||||||
"action":"cds:cluster_2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"matchFraction":{"value": 31415},
|
|
||||||
"action":"cds:cluster_3"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
cdsName = "cds_experimental"
|
|
||||||
wtName = "weighted_target_experimental"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
cdsConfigParser = balancer.Get(cdsName).(balancer.ConfigParser)
|
|
||||||
cdsConfigJSON1 = `{"cluster":"cluster_1"}`
|
|
||||||
cdsConfig1, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON1))
|
|
||||||
cdsConfigJSON2 = `{"cluster":"cluster_2"}`
|
|
||||||
cdsConfig2, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON2))
|
|
||||||
cdsConfigJSON3 = `{"cluster":"cluster_3"}`
|
|
||||||
cdsConfig3, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON3))
|
|
||||||
|
|
||||||
wtConfigParser = balancer.Get(wtName).(balancer.ConfigParser)
|
|
||||||
wtConfigJSON1 = `{
|
|
||||||
"targets": {
|
|
||||||
"cluster_1" : { "weight":75, "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] },
|
|
||||||
"cluster_2" : { "weight":25, "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] }
|
|
||||||
} }`
|
|
||||||
wtConfig1, _ = wtConfigParser.ParseConfig([]byte(wtConfigJSON1))
|
|
||||||
wtConfigJSON2 = `{
|
|
||||||
"targets": {
|
|
||||||
"cluster_1": { "weight":99, "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] },
|
|
||||||
"cluster_3": { "weight":1, "childPolicy":[{"cds_experimental":{"cluster":"cluster_3"}}] }
|
|
||||||
} }`
|
|
||||||
wtConfig2, _ = wtConfigParser.ParseConfig([]byte(wtConfigJSON2))
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_parseConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
js string
|
|
||||||
want *lbConfig
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty json",
|
|
||||||
js: "",
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "more than one path matcher", // Path matcher is oneof, so this is an error.
|
|
||||||
js: `{
|
|
||||||
"Action":{
|
|
||||||
"cds:cluster_1":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]}
|
|
||||||
},
|
|
||||||
"Route": [{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"prefix":"/service_1/",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no path matcher",
|
|
||||||
js: `{
|
|
||||||
"Action":{
|
|
||||||
"cds:cluster_1":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]}
|
|
||||||
},
|
|
||||||
"Route": [{
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "route action not found in action list",
|
|
||||||
js: `{
|
|
||||||
"Action":{},
|
|
||||||
"Route": [{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "action list contains action not used",
|
|
||||||
js: `{
|
|
||||||
"Action":{
|
|
||||||
"cds:cluster_1":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]},
|
|
||||||
"cds:cluster_not_used":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]}
|
|
||||||
},
|
|
||||||
"Route": [{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "no header specifier in header matcher",
|
|
||||||
js: `{
|
|
||||||
"Action":{
|
|
||||||
"cds:cluster_1":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]}
|
|
||||||
},
|
|
||||||
"Route": [{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"headers":[{"name":"header-1"}],
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "more than one header specifier in header matcher",
|
|
||||||
js: `{
|
|
||||||
"Action":{
|
|
||||||
"cds:cluster_1":{ "childPolicy":[{ "cds_experimental":{"cluster":"cluster_1"} }]}
|
|
||||||
},
|
|
||||||
"Route": [{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"headers":[{"name":"header-1", "prefixMatch":"a", "suffixMatch":"b"}],
|
|
||||||
"action":"cds:cluster_1"
|
|
||||||
}]
|
|
||||||
}`,
|
|
||||||
want: nil,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "OK with path matchers only",
|
|
||||||
js: testJSONConfig,
|
|
||||||
want: &lbConfig{
|
|
||||||
routes: []routeConfig{
|
|
||||||
{path: "/service_1/method_1", action: "cds:cluster_1"},
|
|
||||||
{path: "/service_1/method_2", action: "cds:cluster_1"},
|
|
||||||
{prefix: "/service_2/method_1", action: "weighted:cluster_1_cluster_2_1"},
|
|
||||||
{prefix: "/service_2", action: "weighted:cluster_1_cluster_2_1"},
|
|
||||||
{regex: "^/service_2/method_3$", action: "weighted:cluster_1_cluster_3_1"},
|
|
||||||
},
|
|
||||||
actions: map[string]actionConfig{
|
|
||||||
"cds:cluster_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: cdsName, Config: cdsConfig1},
|
|
||||||
},
|
|
||||||
"weighted:cluster_1_cluster_2_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: wtName, Config: wtConfig1},
|
|
||||||
},
|
|
||||||
"weighted:cluster_1_cluster_3_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: wtName, Config: wtConfig2},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "OK with all matchers",
|
|
||||||
js: testJSONConfigWithAllMatchers,
|
|
||||||
want: &lbConfig{
|
|
||||||
routes: []routeConfig{
|
|
||||||
{path: "/service_1/method_1", action: "cds:cluster_1"},
|
|
||||||
{prefix: "/service_2/method_1", action: "cds:cluster_1"},
|
|
||||||
{regex: "^/service_2/method_3$", action: "cds:cluster_1"},
|
|
||||||
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", exactMatch: "value-1", invertMatch: true}}, action: "cds:cluster_2"},
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", regexMatch: "^value-1$"}}, action: "cds:cluster_2"},
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", rangeMatch: &int64Range{start: -1, end: 7}}}, action: "cds:cluster_3"},
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", presentMatch: true}}, action: "cds:cluster_3"},
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", prefixMatch: "value-1"}}, action: "cds:cluster_2"},
|
|
||||||
{prefix: "", headers: []headerMatcher{{name: "header-1", suffixMatch: "value-1"}}, action: "cds:cluster_2"},
|
|
||||||
{prefix: "", fraction: newUInt32P(31415), action: "cds:cluster_3"},
|
|
||||||
},
|
|
||||||
actions: map[string]actionConfig{
|
|
||||||
"cds:cluster_1": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: cdsName, Config: cdsConfig1},
|
|
||||||
},
|
|
||||||
"cds:cluster_2": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: cdsName, Config: cdsConfig2},
|
|
||||||
},
|
|
||||||
"cds:cluster_3": {ChildPolicy: &internalserviceconfig.BalancerConfig{
|
|
||||||
Name: cdsName, Config: cdsConfig3},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cmpOptions := []cmp.Option{cmp.AllowUnexported(lbConfig{}, routeConfig{}, headerMatcher{}, int64Range{})}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := parseConfig([]byte(tt.js))
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !cmp.Equal(got, tt.want, cmpOptions...) {
|
|
||||||
t.Errorf("parseConfig() got unexpected result, diff: %v", cmp.Diff(got, tt.want, cmpOptions...))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newUInt32P(i uint32) *uint32 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
@ -1,176 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package xdsrouting
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/xds/internal/testutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
testPickers = []*testutils.TestConstPicker{
|
|
||||||
{SC: testutils.TestSubConns[0]},
|
|
||||||
{SC: testutils.TestSubConns[1]},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s) TestRoutingPickerGroupPick(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
|
|
||||||
routes []route
|
|
||||||
pickers map[string]*subBalancerState
|
|
||||||
info balancer.PickInfo
|
|
||||||
|
|
||||||
want balancer.PickResult
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty",
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one route no match",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-0"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/z/y"},
|
|
||||||
wantErr: errNoMatchedRouteFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one route one match",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-0"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/a/b"},
|
|
||||||
want: balancer.PickResult{SubConn: testutils.TestSubConns[0]},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "two routes first match",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-0"},
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/z/", false), nil, nil), action: "action-1"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
"action-1": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[1],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/a/b"},
|
|
||||||
want: balancer.PickResult{SubConn: testutils.TestSubConns[0]},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "two routes second match",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-0"},
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/z/", false), nil, nil), action: "action-1"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
"action-1": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[1],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/z/y"},
|
|
||||||
want: balancer.PickResult{SubConn: testutils.TestSubConns[1]},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "two routes both match former more specific",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathExactMatcher("/a/b", false), nil, nil), action: "action-0"},
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-1"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
"action-1": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[1],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/a/b"},
|
|
||||||
// First route is a match, so first action is picked.
|
|
||||||
want: balancer.PickResult{SubConn: testutils.TestSubConns[0]},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "tow routes both match latter more specific",
|
|
||||||
routes: []route{
|
|
||||||
{m: newCompositeMatcher(newPathPrefixMatcher("/a/", false), nil, nil), action: "action-0"},
|
|
||||||
{m: newCompositeMatcher(newPathExactMatcher("/a/b", false), nil, nil), action: "action-1"},
|
|
||||||
},
|
|
||||||
pickers: map[string]*subBalancerState{
|
|
||||||
"action-0": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[0],
|
|
||||||
}},
|
|
||||||
"action-1": {state: balancer.State{
|
|
||||||
ConnectivityState: connectivity.Ready,
|
|
||||||
Picker: testPickers[1],
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
info: balancer.PickInfo{FullMethodName: "/a/b"},
|
|
||||||
// First route is a match, so first action is picked, even though
|
|
||||||
// second is an exact match.
|
|
||||||
want: balancer.PickResult{SubConn: testutils.TestSubConns[0]},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cmpOpts := []cmp.Option{cmp.AllowUnexported(testutils.TestSubConn{})}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
pg := newPickerGroup(tt.routes, tt.pickers)
|
|
||||||
got, err := pg.Pick(tt.info)
|
|
||||||
t.Logf("Pick(%+v) = {%+v, %+v}", tt.info, got, err)
|
|
||||||
if err != tt.wantErr {
|
|
||||||
t.Errorf("Pick() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !cmp.Equal(got, tt.want, cmpOpts...) {
|
|
||||||
t.Errorf("Pick() got = %v, want %v, diff %s", got, tt.want, cmp.Diff(got, tt.want, cmpOpts...))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -16,18 +16,73 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) {
|
||||||
|
var pathMatcher pathMatcherInterface
|
||||||
|
switch {
|
||||||
|
case r.Regex != nil:
|
||||||
|
re, err := regexp.Compile(*r.Regex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to compile regex %q", *r.Regex)
|
||||||
|
}
|
||||||
|
pathMatcher = newPathRegexMatcher(re)
|
||||||
|
case r.Path != nil:
|
||||||
|
pathMatcher = newPathExactMatcher(*r.Path, r.CaseInsensitive)
|
||||||
|
case r.Prefix != nil:
|
||||||
|
pathMatcher = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("illegal route: missing path_matcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
var headerMatchers []headerMatcherInterface
|
||||||
|
for _, h := range r.Headers {
|
||||||
|
var matcherT headerMatcherInterface
|
||||||
|
switch {
|
||||||
|
case h.ExactMatch != nil && *h.ExactMatch != "":
|
||||||
|
matcherT = newHeaderExactMatcher(h.Name, *h.ExactMatch)
|
||||||
|
case h.RegexMatch != nil && *h.RegexMatch != "":
|
||||||
|
re, err := regexp.Compile(*h.RegexMatch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to compile regex %q, skipping this matcher", *h.RegexMatch)
|
||||||
|
}
|
||||||
|
matcherT = newHeaderRegexMatcher(h.Name, re)
|
||||||
|
case h.PrefixMatch != nil && *h.PrefixMatch != "":
|
||||||
|
matcherT = newHeaderPrefixMatcher(h.Name, *h.PrefixMatch)
|
||||||
|
case h.SuffixMatch != nil && *h.SuffixMatch != "":
|
||||||
|
matcherT = newHeaderSuffixMatcher(h.Name, *h.SuffixMatch)
|
||||||
|
case h.RangeMatch != nil:
|
||||||
|
matcherT = newHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End)
|
||||||
|
case h.PresentMatch != nil:
|
||||||
|
matcherT = newHeaderPresentMatcher(h.Name, *h.PresentMatch)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("illegal route: missing header_match_specifier")
|
||||||
|
}
|
||||||
|
if h.InvertMatch != nil && *h.InvertMatch {
|
||||||
|
matcherT = newInvertMatcher(matcherT)
|
||||||
|
}
|
||||||
|
headerMatchers = append(headerMatchers, matcherT)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fractionMatcher *fractionMatcher
|
||||||
|
if r.Fraction != nil {
|
||||||
|
fractionMatcher = newFractionMatcher(*r.Fraction)
|
||||||
|
}
|
||||||
|
return newCompositeMatcher(pathMatcher, headerMatchers, fractionMatcher), nil
|
||||||
|
}
|
||||||
|
|
||||||
// compositeMatcher.match returns true if all matchers return true.
|
// compositeMatcher.match returns true if all matchers return true.
|
||||||
type compositeMatcher struct {
|
type compositeMatcher struct {
|
||||||
pm pathMatcherInterface
|
pm pathMatcherInterface
|
||||||
|
|
@ -39,17 +94,17 @@ func newCompositeMatcher(pm pathMatcherInterface, hms []headerMatcherInterface,
|
||||||
return &compositeMatcher{pm: pm, hms: hms, fm: fm}
|
return &compositeMatcher{pm: pm, hms: hms, fm: fm}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *compositeMatcher) match(info balancer.PickInfo) bool {
|
func (a *compositeMatcher) match(info iresolver.RPCInfo) bool {
|
||||||
if a.pm != nil && !a.pm.match(info.FullMethodName) {
|
if a.pm != nil && !a.pm.match(info.Method) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call headerMatchers even if md is nil, because routes may match
|
// Call headerMatchers even if md is nil, because routes may match
|
||||||
// non-presence of some headers.
|
// non-presence of some headers.
|
||||||
var md metadata.MD
|
var md metadata.MD
|
||||||
if info.Ctx != nil {
|
if info.Context != nil {
|
||||||
md, _ = metadata.FromOutgoingContext(info.Ctx)
|
md, _ = metadata.FromOutgoingContext(info.Context)
|
||||||
if extraMD, ok := grpcutil.ExtraMetadata(info.Ctx); ok {
|
if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok {
|
||||||
md = metadata.Join(md, extraMD)
|
md = metadata.Join(md, extraMD)
|
||||||
// Remove all binary headers. They are hard to match with. May need
|
// Remove all binary headers. They are hard to match with. May need
|
||||||
// to add back if asked by users.
|
// to add back if asked by users.
|
||||||
|
|
@ -72,35 +127,6 @@ func (a *compositeMatcher) match(info balancer.PickInfo) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *compositeMatcher) equal(mm *compositeMatcher) bool {
|
|
||||||
if a == mm {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if a == nil || mm == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if (a.pm != nil || mm.pm != nil) && (a.pm == nil || !a.pm.equal(mm.pm)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(a.hms) != len(mm.hms) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := range a.hms {
|
|
||||||
if !a.hms[i].equal(mm.hms[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (a.fm != nil || mm.fm != nil) && (a.fm == nil || !a.fm.equal(mm.fm)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *compositeMatcher) String() string {
|
func (a *compositeMatcher) String() string {
|
||||||
var ret string
|
var ret string
|
||||||
if a.pm != nil {
|
if a.pm != nil {
|
||||||
|
|
@ -130,17 +156,6 @@ func (fm *fractionMatcher) match() bool {
|
||||||
return t <= fm.fraction
|
return t <= fm.fraction
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fm *fractionMatcher) equal(m *fractionMatcher) bool {
|
|
||||||
if fm == m {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if fm == nil || m == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return fm.fraction == m.fraction
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fm *fractionMatcher) String() string {
|
func (fm *fractionMatcher) String() string {
|
||||||
return fmt.Sprintf("fraction:%v", fm.fraction)
|
return fmt.Sprintf("fraction:%v", fm.fraction)
|
||||||
}
|
}
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
@ -29,7 +29,6 @@ import (
|
||||||
|
|
||||||
type headerMatcherInterface interface {
|
type headerMatcherInterface interface {
|
||||||
match(metadata.MD) bool
|
match(metadata.MD) bool
|
||||||
equal(headerMatcherInterface) bool
|
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -62,14 +61,6 @@ func (hem *headerExactMatcher) match(md metadata.MD) bool {
|
||||||
return v == hem.exact
|
return v == hem.exact
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hem *headerExactMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerExactMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hem.key == mm.key && hem.exact == mm.exact
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hem *headerExactMatcher) String() string {
|
func (hem *headerExactMatcher) String() string {
|
||||||
return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact)
|
return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact)
|
||||||
}
|
}
|
||||||
|
|
@ -91,14 +82,6 @@ func (hrm *headerRegexMatcher) match(md metadata.MD) bool {
|
||||||
return hrm.re.MatchString(v)
|
return hrm.re.MatchString(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hrm *headerRegexMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerRegexMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hrm.key == mm.key && hrm.re.String() == mm.re.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrm *headerRegexMatcher) String() string {
|
func (hrm *headerRegexMatcher) String() string {
|
||||||
return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String())
|
return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String())
|
||||||
}
|
}
|
||||||
|
|
@ -123,14 +106,6 @@ func (hrm *headerRangeMatcher) match(md metadata.MD) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hrm *headerRangeMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerRangeMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hrm.key == mm.key && hrm.start == mm.start && hrm.end == mm.end
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrm *headerRangeMatcher) String() string {
|
func (hrm *headerRangeMatcher) String() string {
|
||||||
return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end)
|
return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end)
|
||||||
}
|
}
|
||||||
|
|
@ -150,14 +125,6 @@ func (hpm *headerPresentMatcher) match(md metadata.MD) bool {
|
||||||
return present == hpm.present
|
return present == hpm.present
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hpm *headerPresentMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerPresentMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hpm.key == mm.key && hpm.present == mm.present
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hpm *headerPresentMatcher) String() string {
|
func (hpm *headerPresentMatcher) String() string {
|
||||||
return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present)
|
return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present)
|
||||||
}
|
}
|
||||||
|
|
@ -179,14 +146,6 @@ func (hpm *headerPrefixMatcher) match(md metadata.MD) bool {
|
||||||
return strings.HasPrefix(v, hpm.prefix)
|
return strings.HasPrefix(v, hpm.prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hpm *headerPrefixMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerPrefixMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hpm.key == mm.key && hpm.prefix == mm.prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hpm *headerPrefixMatcher) String() string {
|
func (hpm *headerPrefixMatcher) String() string {
|
||||||
return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix)
|
return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix)
|
||||||
}
|
}
|
||||||
|
|
@ -208,14 +167,6 @@ func (hsm *headerSuffixMatcher) match(md metadata.MD) bool {
|
||||||
return strings.HasSuffix(v, hsm.suffix)
|
return strings.HasSuffix(v, hsm.suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hsm *headerSuffixMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*headerSuffixMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return hsm.key == mm.key && hsm.suffix == mm.suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hsm *headerSuffixMatcher) String() string {
|
func (hsm *headerSuffixMatcher) String() string {
|
||||||
return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix)
|
return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix)
|
||||||
}
|
}
|
||||||
|
|
@ -232,14 +183,6 @@ func (i *invertMatcher) match(md metadata.MD) bool {
|
||||||
return !i.m.match(md)
|
return !i.m.match(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *invertMatcher) equal(m headerMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*invertMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return i.m.equal(mm.m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *invertMatcher) String() string {
|
func (i *invertMatcher) String() string {
|
||||||
return fmt.Sprintf("invert{%s}", i.m)
|
return fmt.Sprintf("invert{%s}", i.m)
|
||||||
}
|
}
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
type pathMatcherInterface interface {
|
type pathMatcherInterface interface {
|
||||||
match(path string) bool
|
match(path string) bool
|
||||||
equal(pathMatcherInterface) bool
|
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -53,14 +52,6 @@ func (pem *pathExactMatcher) match(path string) bool {
|
||||||
return pem.fullPath == path
|
return pem.fullPath == path
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pem *pathExactMatcher) equal(m pathMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*pathExactMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return pem.fullPath == mm.fullPath && pem.caseInsensitive == mm.caseInsensitive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pem *pathExactMatcher) String() string {
|
func (pem *pathExactMatcher) String() string {
|
||||||
return "pathExact:" + pem.fullPath
|
return "pathExact:" + pem.fullPath
|
||||||
}
|
}
|
||||||
|
|
@ -89,14 +80,6 @@ func (ppm *pathPrefixMatcher) match(path string) bool {
|
||||||
return strings.HasPrefix(path, ppm.prefix)
|
return strings.HasPrefix(path, ppm.prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ppm *pathPrefixMatcher) equal(m pathMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*pathPrefixMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return ppm.prefix == mm.prefix && ppm.caseInsensitive == mm.caseInsensitive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ppm *pathPrefixMatcher) String() string {
|
func (ppm *pathPrefixMatcher) String() string {
|
||||||
return "pathPrefix:" + ppm.prefix
|
return "pathPrefix:" + ppm.prefix
|
||||||
}
|
}
|
||||||
|
|
@ -113,14 +96,6 @@ func (prm *pathRegexMatcher) match(path string) bool {
|
||||||
return prm.re.MatchString(path)
|
return prm.re.MatchString(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (prm *pathRegexMatcher) equal(m pathMatcherInterface) bool {
|
|
||||||
mm, ok := m.(*pathRegexMatcher)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return prm.re.String() == mm.re.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prm *pathRegexMatcher) String() string {
|
func (prm *pathRegexMatcher) String() string {
|
||||||
return "pathRegex:" + prm.re.String()
|
return "pathRegex:" + prm.re.String()
|
||||||
}
|
}
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -16,15 +16,15 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -33,16 +33,16 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name string
|
name string
|
||||||
pm pathMatcherInterface
|
pm pathMatcherInterface
|
||||||
hm headerMatcherInterface
|
hm headerMatcherInterface
|
||||||
info balancer.PickInfo
|
info iresolver.RPCInfo
|
||||||
want bool
|
want bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "both match",
|
name: "both match",
|
||||||
pm: newPathExactMatcher("/a/b", false),
|
pm: newPathExactMatcher("/a/b", false),
|
||||||
hm: newHeaderExactMatcher("th", "tv"),
|
hm: newHeaderExactMatcher("th", "tv"),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/a/b",
|
Method: "/a/b",
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
||||||
},
|
},
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
|
|
@ -50,9 +50,9 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name: "both match with path case insensitive",
|
name: "both match with path case insensitive",
|
||||||
pm: newPathExactMatcher("/A/B", true),
|
pm: newPathExactMatcher("/A/B", true),
|
||||||
hm: newHeaderExactMatcher("th", "tv"),
|
hm: newHeaderExactMatcher("th", "tv"),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/a/b",
|
Method: "/a/b",
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
||||||
},
|
},
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
|
|
@ -60,9 +60,9 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name: "only one match",
|
name: "only one match",
|
||||||
pm: newPathExactMatcher("/a/b", false),
|
pm: newPathExactMatcher("/a/b", false),
|
||||||
hm: newHeaderExactMatcher("th", "tv"),
|
hm: newHeaderExactMatcher("th", "tv"),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/z/y",
|
Method: "/z/y",
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
||||||
},
|
},
|
||||||
want: false,
|
want: false,
|
||||||
},
|
},
|
||||||
|
|
@ -70,9 +70,9 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name: "both not match",
|
name: "both not match",
|
||||||
pm: newPathExactMatcher("/z/y", false),
|
pm: newPathExactMatcher("/z/y", false),
|
||||||
hm: newHeaderExactMatcher("th", "abc"),
|
hm: newHeaderExactMatcher("th", "abc"),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/a/b",
|
Method: "/a/b",
|
||||||
Ctx: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
|
||||||
},
|
},
|
||||||
want: false,
|
want: false,
|
||||||
},
|
},
|
||||||
|
|
@ -80,9 +80,9 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name: "fake header",
|
name: "fake header",
|
||||||
pm: newPathPrefixMatcher("/", false),
|
pm: newPathPrefixMatcher("/", false),
|
||||||
hm: newHeaderExactMatcher("content-type", "fake"),
|
hm: newHeaderExactMatcher("content-type", "fake"),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/a/b",
|
Method: "/a/b",
|
||||||
Ctx: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs(
|
Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs(
|
||||||
"content-type", "fake",
|
"content-type", "fake",
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
|
|
@ -92,9 +92,9 @@ func TestAndMatcherMatch(t *testing.T) {
|
||||||
name: "binary header",
|
name: "binary header",
|
||||||
pm: newPathPrefixMatcher("/", false),
|
pm: newPathPrefixMatcher("/", false),
|
||||||
hm: newHeaderPresentMatcher("t-bin", true),
|
hm: newHeaderPresentMatcher("t-bin", true),
|
||||||
info: balancer.PickInfo{
|
info: iresolver.RPCInfo{
|
||||||
FullMethodName: "/a/b",
|
Method: "/a/b",
|
||||||
Ctx: grpcutil.WithExtraMetadata(
|
Context: grpcutil.WithExtraMetadata(
|
||||||
metadata.NewOutgoingContext(context.Background(), metadata.Pairs("t-bin", "123")), metadata.Pairs(
|
metadata.NewOutgoingContext(context.Background(), metadata.Pairs("t-bin", "123")), metadata.Pairs(
|
||||||
"content-type", "fake",
|
"content-type", "fake",
|
||||||
)),
|
)),
|
||||||
|
|
@ -144,42 +144,3 @@ func TestFractionMatcherMatch(t *testing.T) {
|
||||||
t.Errorf("match() = %v, want match", matched)
|
t.Errorf("match() = %v, want match", matched)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompositeMatcherEqual(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
pm pathMatcherInterface
|
|
||||||
hms []headerMatcherInterface
|
|
||||||
fm *fractionMatcher
|
|
||||||
mm *compositeMatcher
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "equal",
|
|
||||||
pm: newPathExactMatcher("/a/b", false),
|
|
||||||
mm: newCompositeMatcher(newPathExactMatcher("/a/b", false), nil, nil),
|
|
||||||
want: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no path matcher",
|
|
||||||
pm: nil,
|
|
||||||
mm: newCompositeMatcher(nil, nil, nil),
|
|
||||||
want: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "not equal",
|
|
||||||
pm: newPathExactMatcher("/a/b", false),
|
|
||||||
fm: newFractionMatcher(123),
|
|
||||||
mm: newCompositeMatcher(newPathExactMatcher("/a/b", false), nil, nil),
|
|
||||||
want: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
a := newCompositeMatcher(tt.pm, tt.hms, tt.fm)
|
|
||||||
if got := a.equal(tt.mm); got != tt.want {
|
|
||||||
t.Errorf("equal() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -21,15 +21,18 @@ package resolver
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
"google.golang.org/grpc/codes"
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
|
"google.golang.org/grpc/internal/wrr"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/grpc/xds/internal/balancer/clustermanager"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cdsName = "cds_experimental"
|
cdsName = "cds_experimental"
|
||||||
weightedTargetName = "weighted_target_experimental"
|
xdsClusterManagerName = "xds_cluster_manager_experimental"
|
||||||
xdsRoutingName = "xds_routing_experimental"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type serviceConfig struct {
|
type serviceConfig struct {
|
||||||
|
|
@ -42,74 +45,43 @@ func newBalancerConfig(name string, config interface{}) balancerConfig {
|
||||||
return []map[string]interface{}{{name: config}}
|
return []map[string]interface{}{{name: config}}
|
||||||
}
|
}
|
||||||
|
|
||||||
type weightedCDSBalancerConfig struct {
|
|
||||||
Targets map[string]cdsWithWeight `json:"targets"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type cdsWithWeight struct {
|
|
||||||
Weight uint32 `json:"weight"`
|
|
||||||
ChildPolicy balancerConfig `json:"childPolicy"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type cdsBalancerConfig struct {
|
type cdsBalancerConfig struct {
|
||||||
Cluster string `json:"cluster"`
|
Cluster string `json:"cluster"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type route struct {
|
type xdsChildConfig struct {
|
||||||
Path *string `json:"path,omitempty"`
|
|
||||||
Prefix *string `json:"prefix,omitempty"`
|
|
||||||
Regex *string `json:"regex,omitempty"`
|
|
||||||
CaseInsensitive bool `json:"caseInsensitive"`
|
|
||||||
Headers []*xdsclient.HeaderMatcher `json:"headers,omitempty"`
|
|
||||||
Fraction *wrapperspb.UInt32Value `json:"matchFraction,omitempty"`
|
|
||||||
Action string `json:"action"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type xdsActionConfig struct {
|
|
||||||
ChildPolicy balancerConfig `json:"childPolicy"`
|
ChildPolicy balancerConfig `json:"childPolicy"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type xdsRoutingBalancerConfig struct {
|
type xdsClusterManagerConfig struct {
|
||||||
Action map[string]xdsActionConfig `json:"action"`
|
Children map[string]xdsChildConfig `json:"children"`
|
||||||
Route []*route `json:"route"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *xdsResolver) routesToJSON(routes []*xdsclient.Route) (string, error) {
|
// pruneActiveClusters deletes entries in r.activeClusters with zero
|
||||||
r.updateActions(newActionsFromRoutes(routes))
|
// references.
|
||||||
|
func (r *xdsResolver) pruneActiveClusters() {
|
||||||
// Generate routes.
|
for cluster, ci := range r.activeClusters {
|
||||||
var rts []*route
|
if atomic.LoadInt32(&ci.refCount) == 0 {
|
||||||
for _, rt := range routes {
|
delete(r.activeClusters, cluster)
|
||||||
t := &route{
|
|
||||||
Path: rt.Path,
|
|
||||||
Prefix: rt.Prefix,
|
|
||||||
Regex: rt.Regex,
|
|
||||||
Headers: rt.Headers,
|
|
||||||
CaseInsensitive: rt.CaseInsensitive,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if f := rt.Fraction; f != nil {
|
|
||||||
t.Fraction = &wrapperspb.UInt32Value{Value: *f}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Action = r.getActionAssignedName(rt.Action)
|
|
||||||
rts = append(rts, t)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Generate actions.
|
// serviceConfigJSON produces a service config in JSON format representing all
|
||||||
action := make(map[string]xdsActionConfig)
|
// the clusters referenced in activeClusters. This includes clusters with zero
|
||||||
for _, act := range r.actions {
|
// references, so they must be pruned first.
|
||||||
action[act.assignedName] = xdsActionConfig{
|
func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) {
|
||||||
ChildPolicy: weightedClusterToBalancerConfig(act.clustersWithWeights),
|
// Generate children (all entries in activeClusters).
|
||||||
|
children := make(map[string]xdsChildConfig)
|
||||||
|
for cluster := range activeClusters {
|
||||||
|
children[cluster] = xdsChildConfig{
|
||||||
|
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := serviceConfig{
|
sc := serviceConfig{
|
||||||
LoadBalancingConfig: newBalancerConfig(
|
LoadBalancingConfig: newBalancerConfig(
|
||||||
xdsRoutingName, xdsRoutingBalancerConfig{
|
xdsClusterManagerName, xdsClusterManagerConfig{Children: children},
|
||||||
Route: rts,
|
|
||||||
Action: action,
|
|
||||||
},
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -120,25 +92,136 @@ func (r *xdsResolver) routesToJSON(routes []*xdsclient.Route) (string, error) {
|
||||||
return string(bs), nil
|
return string(bs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func weightedClusterToBalancerConfig(wc map[string]uint32) balancerConfig {
|
type route struct {
|
||||||
// Even if WeightedCluster has only one entry, we still use weighted_target
|
action wrr.WRR
|
||||||
// as top level balancer, to avoid switching top policy between CDS and
|
m *compositeMatcher // converted from route matchers
|
||||||
// weighted_target, causing TCP connection to be recreated.
|
|
||||||
targets := make(map[string]cdsWithWeight)
|
|
||||||
for name, weight := range wc {
|
|
||||||
targets[name] = cdsWithWeight{
|
|
||||||
Weight: weight,
|
|
||||||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: name}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bc := newBalancerConfig(
|
|
||||||
weightedTargetName, weightedCDSBalancerConfig{
|
|
||||||
Targets: targets,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return bc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *xdsResolver) serviceUpdateToJSON(su serviceUpdate) (string, error) {
|
func (r route) String() string {
|
||||||
return r.routesToJSON(su.Routes)
|
return r.m.String() + "->" + fmt.Sprint(r.action)
|
||||||
|
}
|
||||||
|
|
||||||
|
type configSelector struct {
|
||||||
|
r *xdsResolver
|
||||||
|
routes []route
|
||||||
|
clusters map[string]*clusterInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
|
||||||
|
|
||||||
|
func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
|
||||||
|
var action wrr.WRR
|
||||||
|
// Loop through routes in order and select first match.
|
||||||
|
for _, rt := range cs.routes {
|
||||||
|
if rt.m.match(rpcInfo) {
|
||||||
|
action = rt.action
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if action == nil {
|
||||||
|
return nil, errNoMatchedRouteFound
|
||||||
|
}
|
||||||
|
cluster, ok := action.Next().(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster)
|
||||||
|
}
|
||||||
|
// Add a ref to the selected cluster, as this RPC needs this cluster until
|
||||||
|
// it is committed.
|
||||||
|
ref := &cs.clusters[cluster].refCount
|
||||||
|
atomic.AddInt32(ref, 1)
|
||||||
|
return &iresolver.RPCConfig{
|
||||||
|
// Communicate to the LB policy the chosen cluster.
|
||||||
|
Context: clustermanager.SetPickedCluster(rpcInfo.Context, cluster),
|
||||||
|
OnCommitted: func() {
|
||||||
|
// When the RPC is committed, the cluster is no longer required.
|
||||||
|
// Decrease its ref.
|
||||||
|
if v := atomic.AddInt32(ref, -1); v == 0 {
|
||||||
|
// This entry will be removed from activeClusters when
|
||||||
|
// producing the service config for the empty update.
|
||||||
|
select {
|
||||||
|
case cs.r.updateCh <- suWithError{emptyUpdate: true}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// incRefs increments refs of all clusters referenced by this config selector.
|
||||||
|
func (cs *configSelector) incRefs() {
|
||||||
|
// Loops over cs.clusters, but these are pointers to entries in
|
||||||
|
// activeClusters.
|
||||||
|
for _, ci := range cs.clusters {
|
||||||
|
atomic.AddInt32(&ci.refCount, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// decRefs decrements refs of all clusters referenced by this config selector.
|
||||||
|
func (cs *configSelector) decRefs() {
|
||||||
|
// The resolver's old configSelector may be nil. Handle that here.
|
||||||
|
if cs == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If any refs drop to zero, we'll need a service config update to delete
|
||||||
|
// the cluster.
|
||||||
|
needUpdate := false
|
||||||
|
// Loops over cs.clusters, but these are pointers to entries in
|
||||||
|
// activeClusters.
|
||||||
|
for _, ci := range cs.clusters {
|
||||||
|
if v := atomic.AddInt32(&ci.refCount, -1); v == 0 {
|
||||||
|
needUpdate = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We stop the old config selector immediately after sending a new config
|
||||||
|
// selector; we need another update to delete clusters from the config (if
|
||||||
|
// we don't have another update pending already).
|
||||||
|
if needUpdate {
|
||||||
|
select {
|
||||||
|
case cs.r.updateCh <- suWithError{emptyUpdate: true}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A global for testing.
|
||||||
|
var newWRR = wrr.NewRandom
|
||||||
|
|
||||||
|
// newConfigSelector creates the config selector for su; may add entries to
|
||||||
|
// r.activeClusters for previously-unseen clusters.
|
||||||
|
func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, error) {
|
||||||
|
cs := &configSelector{
|
||||||
|
r: r,
|
||||||
|
routes: make([]route, len(su.Routes)),
|
||||||
|
clusters: make(map[string]*clusterInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, rt := range su.Routes {
|
||||||
|
action := newWRR()
|
||||||
|
for cluster, weight := range rt.Action {
|
||||||
|
action.Add(cluster, int64(weight))
|
||||||
|
|
||||||
|
// Initialize entries in cs.clusters map, creating entries in
|
||||||
|
// r.activeClusters as necessary. Set to zero as they will be
|
||||||
|
// incremented by incRefs.
|
||||||
|
ci := r.activeClusters[cluster]
|
||||||
|
if ci == nil {
|
||||||
|
ci = &clusterInfo{refCount: 0}
|
||||||
|
r.activeClusters[cluster] = ci
|
||||||
|
}
|
||||||
|
cs.clusters[cluster] = ci
|
||||||
|
}
|
||||||
|
cs.routes[i].action = action
|
||||||
|
|
||||||
|
var err error
|
||||||
|
cs.routes[i].m, err = routeToMatcher(rt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterInfo struct {
|
||||||
|
// number of references to this cluster; accessed atomically
|
||||||
|
refCount int32
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,186 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package resolver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
type actionWithAssignedName struct {
|
|
||||||
// cluster:weight, "A":40, "B":60
|
|
||||||
clustersWithWeights map[string]uint32
|
|
||||||
// clusterNames, without weights, sorted and hashed, "A_B_"
|
|
||||||
clusterNames string
|
|
||||||
// The assigned name, clusters plus a random number, "A_B_1"
|
|
||||||
assignedName string
|
|
||||||
// randomNumber is the number appended to assignedName.
|
|
||||||
randomNumber int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// newActionsFromRoutes gets actions from the routes, and turns them into a map
|
|
||||||
// keyed by the hash of the clusters.
|
|
||||||
//
|
|
||||||
// In the returned map, all actions don't have assignedName. The assignedName
|
|
||||||
// will be filled in after comparing the new actions with the existing actions,
|
|
||||||
// so when a new and old action only diff in weights, the new action can reuse
|
|
||||||
// the old action's name.
|
|
||||||
//
|
|
||||||
// from
|
|
||||||
// {B:60, A:40}, {A:30, B:70}, {B:90, C:10}
|
|
||||||
//
|
|
||||||
// to
|
|
||||||
// A40_B60_: {{A:40, B:60}, "A_B_", ""}
|
|
||||||
// A30_B70_: {{A:30, B:70}, "A_B_", ""}
|
|
||||||
// B90_C10_: {{B:90, C:10}, "B_C_", ""}
|
|
||||||
func newActionsFromRoutes(routes []*xdsclient.Route) map[string]actionWithAssignedName {
|
|
||||||
newActions := make(map[string]actionWithAssignedName)
|
|
||||||
for _, route := range routes {
|
|
||||||
var clusterNames []string
|
|
||||||
for n := range route.Action {
|
|
||||||
clusterNames = append(clusterNames, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort names to be consistent.
|
|
||||||
sort.Strings(clusterNames)
|
|
||||||
clustersOnly := ""
|
|
||||||
clustersWithWeight := ""
|
|
||||||
for _, c := range clusterNames {
|
|
||||||
// Generates A_B_
|
|
||||||
clustersOnly = clustersOnly + c + "_"
|
|
||||||
// Generates A40_B60_
|
|
||||||
clustersWithWeight = clustersWithWeight + c + strconv.FormatUint(uint64(route.Action[c]), 10) + "_"
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := newActions[clustersWithWeight]; !ok {
|
|
||||||
newActions[clustersWithWeight] = actionWithAssignedName{
|
|
||||||
clustersWithWeights: route.Action,
|
|
||||||
clusterNames: clustersOnly,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newActions
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateActions takes a new map of actions, and updates the existing action map in the resolver.
|
|
||||||
//
|
|
||||||
// In the old map, all actions have assignedName set.
|
|
||||||
// In the new map, all actions have no assignedName.
|
|
||||||
//
|
|
||||||
// After the update, the action map is updated to have all actions from the new
|
|
||||||
// map, with assignedName:
|
|
||||||
// - if the new action exists in old, get the old name
|
|
||||||
// - if the new action doesn't exist in old
|
|
||||||
// - if there is an old action that will be removed, and has the same set of
|
|
||||||
// clusters, reuse the old action's name
|
|
||||||
// - otherwise, generate a new name
|
|
||||||
func (r *xdsResolver) updateActions(newActions map[string]actionWithAssignedName) {
|
|
||||||
if r.actions == nil {
|
|
||||||
r.actions = make(map[string]actionWithAssignedName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete actions from existingActions if they are not in newActions. Keep
|
|
||||||
// the removed actions in a map, with key as clusterNames without weights,
|
|
||||||
// so their assigned names can be reused.
|
|
||||||
existingActions := r.actions
|
|
||||||
actionsRemoved := make(map[string][]string)
|
|
||||||
for actionHash, act := range existingActions {
|
|
||||||
if _, ok := newActions[actionHash]; !ok {
|
|
||||||
actionsRemoved[act.clusterNames] = append(actionsRemoved[act.clusterNames], act.assignedName)
|
|
||||||
delete(existingActions, actionHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find actions in newActions but not in oldActions. Add them, and try to
|
|
||||||
// reuse assigned names from actionsRemoved.
|
|
||||||
if r.usedActionNameRandomNumber == nil {
|
|
||||||
r.usedActionNameRandomNumber = make(map[int64]bool)
|
|
||||||
}
|
|
||||||
for actionHash, act := range newActions {
|
|
||||||
if _, ok := existingActions[actionHash]; !ok {
|
|
||||||
if assignedNamed, ok := actionsRemoved[act.clusterNames]; ok {
|
|
||||||
// Reuse the first assigned name from actionsRemoved.
|
|
||||||
act.assignedName = assignedNamed[0]
|
|
||||||
// If there are more names to reuse after this, update the slice
|
|
||||||
// in the map. Otherwise, remove the entry from the map.
|
|
||||||
if len(assignedNamed) > 1 {
|
|
||||||
actionsRemoved[act.clusterNames] = assignedNamed[1:]
|
|
||||||
} else {
|
|
||||||
delete(actionsRemoved, act.clusterNames)
|
|
||||||
}
|
|
||||||
existingActions[actionHash] = act
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Generate a new name.
|
|
||||||
act.randomNumber = r.nextAssignedNameRandomNumber()
|
|
||||||
act.assignedName = fmt.Sprintf("%s%d", act.clusterNames, act.randomNumber)
|
|
||||||
existingActions[actionHash] = act
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete entry from nextIndex if all actions with the clusters are removed.
|
|
||||||
remainingRandomNumbers := make(map[int64]bool)
|
|
||||||
for _, act := range existingActions {
|
|
||||||
remainingRandomNumbers[act.randomNumber] = true
|
|
||||||
}
|
|
||||||
r.usedActionNameRandomNumber = remainingRandomNumbers
|
|
||||||
}
|
|
||||||
|
|
||||||
var grpcrandInt63n = grpcrand.Int63n
|
|
||||||
|
|
||||||
func (r *xdsResolver) nextAssignedNameRandomNumber() int64 {
|
|
||||||
for {
|
|
||||||
t := grpcrandInt63n(math.MaxInt32)
|
|
||||||
if !r.usedActionNameRandomNumber[t] {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getActionAssignedName hashes the clusters from the action, and find the
|
|
||||||
// assigned action name. The assigned action names are kept in r.actions, with
|
|
||||||
// the clusters name hash as map key.
|
|
||||||
//
|
|
||||||
// The assigned action name is not simply the hash. For example, the hash can be
|
|
||||||
// "A40_B60_", but the assigned name can be "A_B_0". It's this way so the action
|
|
||||||
// can be reused if only weights are changing.
|
|
||||||
func (r *xdsResolver) getActionAssignedName(action map[string]uint32) string {
|
|
||||||
var clusterNames []string
|
|
||||||
for n := range action {
|
|
||||||
clusterNames = append(clusterNames, n)
|
|
||||||
}
|
|
||||||
// Hash cluster names. Sort names to be consistent.
|
|
||||||
sort.Strings(clusterNames)
|
|
||||||
clustersWithWeight := ""
|
|
||||||
for _, c := range clusterNames {
|
|
||||||
// Generates hash "A40_B60_".
|
|
||||||
clustersWithWeight = clustersWithWeight + c + strconv.FormatUint(uint64(action[c]), 10) + "_"
|
|
||||||
}
|
|
||||||
// Look in r.actions for the assigned action name.
|
|
||||||
if act, ok := r.actions[clustersWithWeight]; ok {
|
|
||||||
return act.assignedName
|
|
||||||
}
|
|
||||||
r.logger.Warningf("no assigned name found for action %v", action)
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
@ -1,356 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2020 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package resolver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewActionsFromRoutes(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
routes []*xdsclient.Route
|
|
||||||
want map[string]actionWithAssignedName
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "temp",
|
|
||||||
routes: []*xdsclient.Route{
|
|
||||||
{Action: map[string]uint32{"B": 60, "A": 40}},
|
|
||||||
{Action: map[string]uint32{"A": 30, "B": 70}},
|
|
||||||
{Action: map[string]uint32{"B": 90, "C": 10}},
|
|
||||||
},
|
|
||||||
want: map[string]actionWithAssignedName{
|
|
||||||
"A40_B60_": {map[string]uint32{"A": 40, "B": 60}, "A_B_", "", 0},
|
|
||||||
"A30_B70_": {map[string]uint32{"A": 30, "B": 70}, "A_B_", "", 0},
|
|
||||||
"B90_C10_": {map[string]uint32{"B": 90, "C": 10}, "B_C_", "", 0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cmpOpts := []cmp.Option{cmp.AllowUnexported(actionWithAssignedName{})}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := newActionsFromRoutes(tt.routes); !cmp.Equal(got, tt.want, cmpOpts...) {
|
|
||||||
t.Errorf("newActionsFromRoutes() got unexpected result, diff %v", cmp.Diff(got, tt.want, cmpOpts...))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveOrReuseName(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
oldActions map[string]actionWithAssignedName
|
|
||||||
oldRandNums map[int64]bool
|
|
||||||
newActions map[string]actionWithAssignedName
|
|
||||||
wantActions map[string]actionWithAssignedName
|
|
||||||
wantRandNums map[int64]bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "add same cluster",
|
|
||||||
oldActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
oldRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
},
|
|
||||||
newActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_1000",
|
|
||||||
randomNumber: 1000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
1000: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "delete same cluster",
|
|
||||||
oldActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_1",
|
|
||||||
randomNumber: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
oldRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
1: true,
|
|
||||||
},
|
|
||||||
newActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new clusters",
|
|
||||||
oldActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
oldRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
},
|
|
||||||
newActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
"a50_b50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 50, "b": 50},
|
|
||||||
clusterNames: "a_b_",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
"a50_b50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 50, "b": 50},
|
|
||||||
clusterNames: "a_b_",
|
|
||||||
assignedName: "a_b_1000",
|
|
||||||
randomNumber: 1000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
1000: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "reuse",
|
|
||||||
oldActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
oldRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
},
|
|
||||||
newActions: map[string]actionWithAssignedName{
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantActions: map[string]actionWithAssignedName{
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add and reuse",
|
|
||||||
oldActions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_1",
|
|
||||||
randomNumber: 1,
|
|
||||||
},
|
|
||||||
"a50_b50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 50, "b": 50},
|
|
||||||
clusterNames: "a_b_",
|
|
||||||
assignedName: "a_b_2",
|
|
||||||
randomNumber: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
oldRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
1: true,
|
|
||||||
2: true,
|
|
||||||
},
|
|
||||||
newActions: map[string]actionWithAssignedName{
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
"a30_b30_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 30, "b": 30, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
},
|
|
||||||
"c50_d50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"c": 50, "d": 50},
|
|
||||||
clusterNames: "c_d_",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantActions: map[string]actionWithAssignedName{
|
|
||||||
"a10_b50_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 10, "b": 50, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_1",
|
|
||||||
randomNumber: 1,
|
|
||||||
},
|
|
||||||
"a30_b30_c40_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 30, "b": 30, "c": 40},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
randomNumber: 0,
|
|
||||||
},
|
|
||||||
"c50_d50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"c": 50, "d": 50},
|
|
||||||
clusterNames: "c_d_",
|
|
||||||
assignedName: "c_d_1000",
|
|
||||||
randomNumber: 1000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantRandNums: map[int64]bool{
|
|
||||||
0: true,
|
|
||||||
1: true,
|
|
||||||
1000: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cmpOpts := []cmp.Option{cmp.AllowUnexported(actionWithAssignedName{})}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
defer replaceRandNumGenerator(1000)()
|
|
||||||
r := &xdsResolver{
|
|
||||||
actions: tt.oldActions,
|
|
||||||
usedActionNameRandomNumber: tt.oldRandNums,
|
|
||||||
}
|
|
||||||
r.updateActions(tt.newActions)
|
|
||||||
if !cmp.Equal(r.actions, tt.wantActions, cmpOpts...) {
|
|
||||||
t.Errorf("removeOrReuseName() got unexpected actions, diff %v", cmp.Diff(r.actions, tt.wantActions, cmpOpts...))
|
|
||||||
}
|
|
||||||
if !cmp.Equal(r.usedActionNameRandomNumber, tt.wantRandNums) {
|
|
||||||
t.Errorf("removeOrReuseName() got unexpected nextIndex, diff %v", cmp.Diff(r.usedActionNameRandomNumber, tt.wantRandNums))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetActionAssignedName(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
actions map[string]actionWithAssignedName
|
|
||||||
action map[string]uint32
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "good",
|
|
||||||
actions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
action: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
want: "a_b_c_0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "two",
|
|
||||||
actions: map[string]actionWithAssignedName{
|
|
||||||
"a20_b30_c50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"a": 20, "b": 30, "c": 50},
|
|
||||||
clusterNames: "a_b_c_",
|
|
||||||
assignedName: "a_b_c_0",
|
|
||||||
},
|
|
||||||
"c50_d50_": {
|
|
||||||
clustersWithWeights: map[string]uint32{"c": 50, "d": 50},
|
|
||||||
clusterNames: "c_d_",
|
|
||||||
assignedName: "c_d_0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
action: map[string]uint32{"c": 50, "d": 50},
|
|
||||||
want: "c_d_0",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
r := &xdsResolver{
|
|
||||||
actions: tt.actions,
|
|
||||||
}
|
|
||||||
if got := r.getActionAssignedName(tt.action); got != tt.want {
|
|
||||||
t.Errorf("getActionAssignedName() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -22,346 +22,22 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"google.golang.org/grpc/internal"
|
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
|
||||||
"google.golang.org/grpc/serviceconfig"
|
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget"
|
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/xdsrouting"
|
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
func (s) TestPruneActiveClusters(t *testing.T) {
|
||||||
testCluster1 = "test-cluster-1"
|
r := &xdsResolver{activeClusters: map[string]*clusterInfo{
|
||||||
testOneClusterOnlyJSON = `{"loadBalancingConfig":[{
|
"zero": {refCount: 0},
|
||||||
"xds_routing_experimental":{
|
"one": {refCount: 1},
|
||||||
"action":{
|
"two": {refCount: 2},
|
||||||
"test-cluster-1_0":{
|
"anotherzero": {refCount: 0},
|
||||||
"childPolicy":[{
|
}}
|
||||||
"weighted_target_experimental":{
|
want := map[string]*clusterInfo{
|
||||||
"targets":{
|
"one": {refCount: 1},
|
||||||
"test-cluster-1":{
|
"two": {refCount: 2},
|
||||||
"weight":1,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}]
|
|
||||||
}
|
|
||||||
}}}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"route":[{"prefix":"","action":"test-cluster-1_0"}]
|
|
||||||
}}]}`
|
|
||||||
testWeightedCDSJSON = `{"loadBalancingConfig":[{
|
|
||||||
"xds_routing_experimental":{
|
|
||||||
"action":{
|
|
||||||
"cluster_1_cluster_2_1":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental":{
|
|
||||||
"targets":{
|
|
||||||
"cluster_1":{
|
|
||||||
"weight":75,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
|
||||||
},
|
|
||||||
"cluster_2":{
|
|
||||||
"weight":25,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
|
||||||
}
|
|
||||||
}}}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"route":[{"prefix":"","action":"cluster_1_cluster_2_1"}]
|
|
||||||
}}]}`
|
|
||||||
|
|
||||||
testRoutingJSON = `{"loadBalancingConfig":[{
|
|
||||||
"xds_routing_experimental": {
|
|
||||||
"action":{
|
|
||||||
"cluster_1_cluster_2_0":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental": {
|
|
||||||
"targets": {
|
|
||||||
"cluster_1" : {
|
|
||||||
"weight":75,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
|
||||||
},
|
|
||||||
"cluster_2" : {
|
|
||||||
"weight":25,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
"route":[{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cluster_1_cluster_2_0"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}]}
|
|
||||||
`
|
|
||||||
testRoutingAllMatchersJSON = `{"loadBalancingConfig":[{
|
|
||||||
"xds_routing_experimental": {
|
|
||||||
"action":{
|
|
||||||
"cluster_1_0":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental": {
|
|
||||||
"targets": {
|
|
||||||
"cluster_1" : {
|
|
||||||
"weight":1,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"cluster_2_0":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental": {
|
|
||||||
"targets": {
|
|
||||||
"cluster_2" : {
|
|
||||||
"weight":1,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
"cluster_3_0":{
|
|
||||||
"childPolicy":[{
|
|
||||||
"weighted_target_experimental": {
|
|
||||||
"targets": {
|
|
||||||
"cluster_3" : {
|
|
||||||
"weight":1,
|
|
||||||
"childPolicy":[{"cds_experimental":{"cluster":"cluster_3"}}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
"route":[{
|
|
||||||
"path":"/service_1/method_1",
|
|
||||||
"action":"cluster_1_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"/service_2/method_1",
|
|
||||||
"caseInsensitive":true,
|
|
||||||
"action":"cluster_1_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"regex":"^/service_2/method_3$",
|
|
||||||
"action":"cluster_1_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "exactMatch":"value-1", "invertMatch":true}],
|
|
||||||
"action":"cluster_2_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "regexMatch":"^value-1$"}],
|
|
||||||
"action":"cluster_2_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "rangeMatch":{"start":-1, "end":7}}],
|
|
||||||
"action":"cluster_3_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "presentMatch":true}],
|
|
||||||
"action":"cluster_3_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "prefixMatch":"value-1"}],
|
|
||||||
"action":"cluster_2_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"headers":[{"name":"header-1", "suffixMatch":"value-1"}],
|
|
||||||
"action":"cluster_2_0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"prefix":"",
|
|
||||||
"matchFraction":{"value": 31415},
|
|
||||||
"action":"cluster_3_0"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}]}
|
|
||||||
`
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRoutesToJSON(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
routes []*xdsclient.Route
|
|
||||||
wantJSON string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "one route",
|
|
||||||
routes: []*xdsclient.Route{{
|
|
||||||
Path: newStringP("/service_1/method_1"),
|
|
||||||
Action: map[string]uint32{"cluster_1": 75, "cluster_2": 25},
|
|
||||||
}},
|
|
||||||
wantJSON: testRoutingJSON,
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "all matchers",
|
|
||||||
routes: []*xdsclient.Route{
|
|
||||||
{
|
|
||||||
Path: newStringP("/service_1/method_1"),
|
|
||||||
Action: map[string]uint32{"cluster_1": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP("/service_2/method_1"),
|
|
||||||
CaseInsensitive: true,
|
|
||||||
Action: map[string]uint32{"cluster_1": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Regex: newStringP("^/service_2/method_3$"),
|
|
||||||
Action: map[string]uint32{"cluster_1": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
InvertMatch: newBoolP(true),
|
|
||||||
ExactMatch: newStringP("value-1"),
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_2": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
RegexMatch: newStringP("^value-1$"),
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_2": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
RangeMatch: &xdsclient.Int64Range{Start: -1, End: 7},
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_3": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
PresentMatch: newBoolP(true),
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_3": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
PrefixMatch: newStringP("value-1"),
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_2": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Headers: []*xdsclient.HeaderMatcher{{
|
|
||||||
Name: "header-1",
|
|
||||||
SuffixMatch: newStringP("value-1"),
|
|
||||||
}},
|
|
||||||
Action: map[string]uint32{"cluster_2": 1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Prefix: newStringP(""),
|
|
||||||
Fraction: newUint32P(31415),
|
|
||||||
Action: map[string]uint32{"cluster_3": 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantJSON: testRoutingAllMatchersJSON,
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
r.pruneActiveClusters()
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
if d := cmp.Diff(r.activeClusters, want, cmp.AllowUnexported(clusterInfo{})); d != "" {
|
||||||
// Note this random number function only generates 0. This is
|
t.Fatalf("r.activeClusters = %v; want %v\nDiffs: %v", r.activeClusters, want, d)
|
||||||
// because the test doesn't handle action update, and there's only
|
|
||||||
// one action for each cluster bundle.
|
|
||||||
//
|
|
||||||
// This is necessary so the output is deterministic.
|
|
||||||
grpcrandInt63n = func(int64) int64 { return 0 }
|
|
||||||
defer func() { grpcrandInt63n = grpcrand.Int63n }()
|
|
||||||
|
|
||||||
gotJSON, err := (&xdsResolver{}).routesToJSON(tt.routes)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("routesToJSON returned error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
gotParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(gotJSON)
|
|
||||||
wantParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(tt.wantJSON)
|
|
||||||
|
|
||||||
if !internal.EqualServiceConfigForTesting(gotParsed.Config, wantParsed.Config) {
|
|
||||||
t.Errorf("serviceUpdateToJSON() = %v, want %v", gotJSON, tt.wantJSON)
|
|
||||||
t.Error("gotParsed: ", cmp.Diff(nil, gotParsed))
|
|
||||||
t.Error("wantParsed: ", cmp.Diff(nil, wantParsed))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceUpdateToJSON(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
su serviceUpdate
|
|
||||||
wantJSON string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "routing",
|
|
||||||
su: serviceUpdate{
|
|
||||||
Routes: []*xdsclient.Route{{
|
|
||||||
Path: newStringP("/service_1/method_1"),
|
|
||||||
Action: map[string]uint32{"cluster_1": 75, "cluster_2": 25},
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
wantJSON: testRoutingJSON,
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
defer replaceRandNumGenerator(0)()
|
|
||||||
gotJSON, err := (&xdsResolver{}).serviceUpdateToJSON(tt.su)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("serviceUpdateToJSON returned error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
gotParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(gotJSON)
|
|
||||||
wantParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(tt.wantJSON)
|
|
||||||
|
|
||||||
if !internal.EqualServiceConfigForTesting(gotParsed.Config, wantParsed.Config) {
|
|
||||||
t.Errorf("serviceUpdateToJSON() = %v, want %v", gotJSON, tt.wantJSON)
|
|
||||||
t.Error("gotParsed: ", cmp.Diff(nil, gotParsed))
|
|
||||||
t.Error("wantParsed: ", cmp.Diff(nil, wantParsed))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Two updates to the same resolver, test that action names are reused.
|
|
||||||
func TestServiceUpdateToJSON_TwoConfig_UpdateActions(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStringP(s string) *string {
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBoolP(b bool) *bool {
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func newUint32P(i uint32) *uint32 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -144,6 +144,10 @@ func verifyServiceUpdate(ctx context.Context, updateCh *testutils.Channel, wantU
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newStringP(s string) *string {
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
||||||
// TestServiceWatch covers the cases:
|
// TestServiceWatch covers the cases:
|
||||||
// - an update is received after a watch()
|
// - an update is received after a watch()
|
||||||
// - an update with routes received
|
// - an update with routes received
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
|
||||||
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -46,10 +47,11 @@ type xdsResolverBuilder struct{}
|
||||||
// time an xds resolver is built.
|
// time an xds resolver is built.
|
||||||
func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||||
r := &xdsResolver{
|
r := &xdsResolver{
|
||||||
target: t,
|
target: t,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
closed: grpcsync.NewEvent(),
|
closed: grpcsync.NewEvent(),
|
||||||
updateCh: make(chan suWithError, 1),
|
updateCh: make(chan suWithError, 1),
|
||||||
|
activeClusters: make(map[string]*clusterInfo),
|
||||||
}
|
}
|
||||||
r.logger = prefixLogger((r))
|
r.logger = prefixLogger((r))
|
||||||
r.logger.Infof("Creating resolver for target: %+v", t)
|
r.logger.Infof("Creating resolver for target: %+v", t)
|
||||||
|
|
@ -88,8 +90,9 @@ type xdsClientInterface interface {
|
||||||
// suWithError wraps the ServiceUpdate and error received through a watch API
|
// suWithError wraps the ServiceUpdate and error received through a watch API
|
||||||
// callback, so that it can pushed onto the update channel as a single entity.
|
// callback, so that it can pushed onto the update channel as a single entity.
|
||||||
type suWithError struct {
|
type suWithError struct {
|
||||||
su serviceUpdate
|
su serviceUpdate
|
||||||
err error
|
emptyUpdate bool
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// xdsResolver implements the resolver.Resolver interface.
|
// xdsResolver implements the resolver.Resolver interface.
|
||||||
|
|
@ -112,15 +115,11 @@ type xdsResolver struct {
|
||||||
// cancelWatch is the function to cancel the watcher.
|
// cancelWatch is the function to cancel the watcher.
|
||||||
cancelWatch func()
|
cancelWatch func()
|
||||||
|
|
||||||
// actions is a map from hash of weighted cluster, to the weighted cluster
|
// activeClusters is a map from cluster name to a ref count. Only read or
|
||||||
// map, and it's assigned name. E.g.
|
// written during a service update (synchronous).
|
||||||
// "A40_B60_": {{A:40, B:60}, "A_B_", "A_B_0"}
|
activeClusters map[string]*clusterInfo
|
||||||
// "A30_B70_": {{A:30, B:70}, "A_B_", "A_B_1"}
|
|
||||||
// "B90_C10_": {{B:90, C:10}, "B_C_", "B_C_0"}
|
curConfigSelector *configSelector
|
||||||
actions map[string]actionWithAssignedName
|
|
||||||
// usedActionNameRandomNumber contains random numbers that have been used in
|
|
||||||
// assigned names, to avoid collision.
|
|
||||||
usedActionNameRandomNumber map[int64]bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// run is a long running goroutine which blocks on receiving service updates
|
// run is a long running goroutine which blocks on receiving service updates
|
||||||
|
|
@ -141,23 +140,55 @@ func (r *xdsResolver) run() {
|
||||||
r.cc.UpdateState(resolver.State{
|
r.cc.UpdateState(resolver.State{
|
||||||
ServiceConfig: r.cc.ParseServiceConfig("{}"),
|
ServiceConfig: r.cc.ParseServiceConfig("{}"),
|
||||||
})
|
})
|
||||||
|
// Dereference the active config selector, if one exists.
|
||||||
|
r.curConfigSelector.decRefs()
|
||||||
|
r.curConfigSelector = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Send error to ClientConn, and balancers, if error is not
|
// Send error to ClientConn, and balancers, if error is not
|
||||||
// resource not found.
|
// resource not found. No need to update resolver state if we
|
||||||
|
// can keep using the old config.
|
||||||
r.cc.ReportError(update.err)
|
r.cc.ReportError(update.err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sc, err := r.serviceUpdateToJSON(update.su)
|
var cs *configSelector
|
||||||
|
if !update.emptyUpdate {
|
||||||
|
// Create the config selector for this update.
|
||||||
|
var err error
|
||||||
|
if cs, err = r.newConfigSelector(update.su); err != nil {
|
||||||
|
r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.target.Endpoint, r.client, err)
|
||||||
|
r.cc.ReportError(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Empty update; use the existing config selector.
|
||||||
|
cs = r.curConfigSelector
|
||||||
|
}
|
||||||
|
// Account for this config selector's clusters.
|
||||||
|
cs.incRefs()
|
||||||
|
// Delete entries from r.activeClusters with zero references;
|
||||||
|
// otherwise serviceConfigJSON will generate a config including
|
||||||
|
// them.
|
||||||
|
r.pruneActiveClusters()
|
||||||
|
// Produce the service config.
|
||||||
|
sc, err := serviceConfigJSON(r.activeClusters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Warningf("failed to convert update to service config: %v", err)
|
// JSON marshal error; should never happen.
|
||||||
|
r.logger.Errorf("%v", err)
|
||||||
r.cc.ReportError(err)
|
r.cc.ReportError(err)
|
||||||
|
cs.decRefs()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc)
|
r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc)
|
||||||
r.cc.UpdateState(resolver.State{
|
// Send the update to the ClientConn.
|
||||||
|
state := iresolver.SetConfigSelector(resolver.State{
|
||||||
ServiceConfig: r.cc.ParseServiceConfig(sc),
|
ServiceConfig: r.cc.ParseServiceConfig(sc),
|
||||||
})
|
}, cs)
|
||||||
|
r.cc.UpdateState(state)
|
||||||
|
// Decrement references to the old config selector and assign the
|
||||||
|
// new one as the current one.
|
||||||
|
r.curConfigSelector.decRefs()
|
||||||
|
r.curConfigSelector = cs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -170,7 +201,7 @@ func (r *xdsResolver) handleServiceUpdate(su serviceUpdate, err error) {
|
||||||
// Do not pass updates to the ClientConn once the resolver is closed.
|
// Do not pass updates to the ClientConn once the resolver is closed.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r.updateCh <- suWithError{su, err}
|
r.updateCh <- suWithError{su: su, err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveNow is a no-op at this point.
|
// ResolveNow is a no-op at this point.
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ package resolver
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -28,12 +29,16 @@ import (
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/grpctest"
|
"google.golang.org/grpc/internal/grpctest"
|
||||||
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/testutils"
|
"google.golang.org/grpc/internal/testutils"
|
||||||
|
"google.golang.org/grpc/internal/wrr"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config
|
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config
|
||||||
|
"google.golang.org/grpc/xds/internal/balancer/clustermanager"
|
||||||
"google.golang.org/grpc/xds/internal/client"
|
"google.golang.org/grpc/xds/internal/client"
|
||||||
xdsclient "google.golang.org/grpc/xds/internal/client"
|
xdsclient "google.golang.org/grpc/xds/internal/client"
|
||||||
|
xdstestutils "google.golang.org/grpc/xds/internal/testutils"
|
||||||
"google.golang.org/grpc/xds/internal/testutils/fakeclient"
|
"google.golang.org/grpc/xds/internal/testutils/fakeclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -276,19 +281,66 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) {
|
||||||
defer replaceRandNumGenerator(0)()
|
defer replaceRandNumGenerator(0)()
|
||||||
|
|
||||||
for _, tt := range []struct {
|
for _, tt := range []struct {
|
||||||
routes []*xdsclient.Route
|
routes []*xdsclient.Route
|
||||||
wantJSON string
|
wantJSON string
|
||||||
|
wantClusters map[string]bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{testCluster1: 1}}},
|
routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{"test-cluster-1": 1}}},
|
||||||
wantJSON: testOneClusterOnlyJSON,
|
wantJSON: `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"test-cluster-1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`,
|
||||||
|
wantClusters: map[string]bool{"test-cluster-1": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{
|
routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{
|
||||||
"cluster_1": 75,
|
"cluster_1": 75,
|
||||||
"cluster_2": 25,
|
"cluster_2": 25,
|
||||||
}}},
|
}}},
|
||||||
wantJSON: testWeightedCDSJSON,
|
// This update contains the cluster from the previous update as
|
||||||
|
// well as this update, as the previous config selector still
|
||||||
|
// references the old cluster when the new one is pushed.
|
||||||
|
wantJSON: `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"test-cluster-1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}]
|
||||||
|
},
|
||||||
|
"cluster_1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
||||||
|
},
|
||||||
|
"cluster_2":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`,
|
||||||
|
wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{
|
||||||
|
"cluster_1": 75,
|
||||||
|
"cluster_2": 25,
|
||||||
|
}}},
|
||||||
|
// With this redundant update, the old config selector has been
|
||||||
|
// stopped, so there are no more references to the first cluster.
|
||||||
|
// Only the second update's clusters should remain.
|
||||||
|
wantJSON: `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"cluster_1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}]
|
||||||
|
},
|
||||||
|
"cluster_2":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`,
|
||||||
|
wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
// Invoke the watchAPI callback with a good service update and wait for the
|
// Invoke the watchAPI callback with a good service update and wait for the
|
||||||
|
|
@ -319,6 +371,241 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) {
|
||||||
t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config))
|
t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config))
|
||||||
t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config))
|
t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cs := iresolver.GetConfigSelector(rState)
|
||||||
|
if cs == nil {
|
||||||
|
t.Error("received nil config selector")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pickedClusters := make(map[string]bool)
|
||||||
|
// Odds of picking 75% cluster 100 times in a row: 1 in 3E-13. And
|
||||||
|
// with the random number generator stubbed out, we can rely on this
|
||||||
|
// to be 100% reproducible.
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err)
|
||||||
|
}
|
||||||
|
cluster := clustermanager.GetPickedClusterForTesting(res.Context)
|
||||||
|
pickedClusters[cluster] = true
|
||||||
|
res.OnCommitted()
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(pickedClusters, tt.wantClusters) {
|
||||||
|
t.Errorf("Picked clusters: %v; want: %v", pickedClusters, tt.wantClusters)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s) TestXDSResolverWRR(t *testing.T) {
|
||||||
|
xdsC := fakeclient.NewClient()
|
||||||
|
xdsR, tcc, cancel := testSetup(t, setupOpts{
|
||||||
|
xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil },
|
||||||
|
})
|
||||||
|
defer func() {
|
||||||
|
cancel()
|
||||||
|
xdsR.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
|
defer cancel()
|
||||||
|
waitForWatchListener(ctx, t, xdsC, targetStr)
|
||||||
|
xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil)
|
||||||
|
waitForWatchRouteConfig(ctx, t, xdsC, routeStr)
|
||||||
|
|
||||||
|
defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR)
|
||||||
|
newWRR = xdstestutils.NewTestWRR
|
||||||
|
|
||||||
|
// Invoke the watchAPI callback with a good service update and wait for the
|
||||||
|
// UpdateState method to be called on the ClientConn.
|
||||||
|
xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{
|
||||||
|
VirtualHosts: []*xdsclient.VirtualHost{
|
||||||
|
{
|
||||||
|
Domains: []string{targetStr},
|
||||||
|
Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{
|
||||||
|
"A": 5,
|
||||||
|
"B": 10,
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
gotState, err := tcc.stateCh.Receive(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState returned error: %v", err)
|
||||||
|
}
|
||||||
|
rState := gotState.(resolver.State)
|
||||||
|
if err := rState.ServiceConfig.Err; err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := iresolver.GetConfigSelector(rState)
|
||||||
|
if cs == nil {
|
||||||
|
t.Fatal("received nil config selector")
|
||||||
|
}
|
||||||
|
|
||||||
|
picks := map[string]int{}
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err)
|
||||||
|
}
|
||||||
|
picks[clustermanager.GetPickedClusterForTesting(res.Context)]++
|
||||||
|
res.OnCommitted()
|
||||||
|
}
|
||||||
|
want := map[string]int{"A": 10, "B": 20}
|
||||||
|
if !reflect.DeepEqual(picks, want) {
|
||||||
|
t.Errorf("picked clusters = %v; want %v", picks, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestXDSResolverDelayedOnCommitted tests that clusters remain in service
|
||||||
|
// config if RPCs are in flight.
|
||||||
|
func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) {
|
||||||
|
xdsC := fakeclient.NewClient()
|
||||||
|
xdsR, tcc, cancel := testSetup(t, setupOpts{
|
||||||
|
xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil },
|
||||||
|
})
|
||||||
|
defer func() {
|
||||||
|
cancel()
|
||||||
|
xdsR.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
|
||||||
|
defer cancel()
|
||||||
|
waitForWatchListener(ctx, t, xdsC, targetStr)
|
||||||
|
xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil)
|
||||||
|
waitForWatchRouteConfig(ctx, t, xdsC, routeStr)
|
||||||
|
|
||||||
|
// Invoke the watchAPI callback with a good service update and wait for the
|
||||||
|
// UpdateState method to be called on the ClientConn.
|
||||||
|
xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{
|
||||||
|
VirtualHosts: []*xdsclient.VirtualHost{
|
||||||
|
{
|
||||||
|
Domains: []string{targetStr},
|
||||||
|
Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{"test-cluster-1": 1}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
gotState, err := tcc.stateCh.Receive(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState returned error: %v", err)
|
||||||
|
}
|
||||||
|
rState := gotState.(resolver.State)
|
||||||
|
if err := rState.ServiceConfig.Err; err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantJSON := `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"test-cluster-1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`
|
||||||
|
wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON)
|
||||||
|
if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) {
|
||||||
|
t.Errorf("ClientConn.UpdateState received different service config")
|
||||||
|
t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config))
|
||||||
|
t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config))
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := iresolver.GetConfigSelector(rState)
|
||||||
|
if cs == nil {
|
||||||
|
t.Fatal("received nil config selector")
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err)
|
||||||
|
}
|
||||||
|
cluster := clustermanager.GetPickedClusterForTesting(res.Context)
|
||||||
|
if cluster != "test-cluster-1" {
|
||||||
|
t.Fatalf("")
|
||||||
|
}
|
||||||
|
// delay res.OnCommitted()
|
||||||
|
|
||||||
|
// Perform TWO updates to ensure the old config selector does not hold a
|
||||||
|
// reference to test-cluster-1.
|
||||||
|
xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{
|
||||||
|
VirtualHosts: []*xdsclient.VirtualHost{
|
||||||
|
{
|
||||||
|
Domains: []string{targetStr},
|
||||||
|
Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{"NEW": 1}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{
|
||||||
|
VirtualHosts: []*xdsclient.VirtualHost{
|
||||||
|
{
|
||||||
|
Domains: []string{targetStr},
|
||||||
|
Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{"NEW": 1}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
tcc.stateCh.Receive(ctx) // Ignore the first update
|
||||||
|
gotState, err = tcc.stateCh.Receive(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState returned error: %v", err)
|
||||||
|
}
|
||||||
|
rState = gotState.(resolver.State)
|
||||||
|
if err := rState.ServiceConfig.Err; err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err)
|
||||||
|
}
|
||||||
|
wantJSON2 := `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"test-cluster-1":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}]
|
||||||
|
},
|
||||||
|
"NEW":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"NEW"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`
|
||||||
|
wantSCParsed2 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON2)
|
||||||
|
if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) {
|
||||||
|
t.Errorf("ClientConn.UpdateState received different service config")
|
||||||
|
t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config))
|
||||||
|
t.Fatal("want: ", cmp.Diff(nil, wantSCParsed2.Config))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke OnCommitted; should lead to a service config update that deletes
|
||||||
|
// test-cluster-1.
|
||||||
|
res.OnCommitted()
|
||||||
|
|
||||||
|
xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{
|
||||||
|
VirtualHosts: []*xdsclient.VirtualHost{
|
||||||
|
{
|
||||||
|
Domains: []string{targetStr},
|
||||||
|
Routes: []*client.Route{{Prefix: newStringP(""), Action: map[string]uint32{"NEW": 1}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
gotState, err = tcc.stateCh.Receive(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState returned error: %v", err)
|
||||||
|
}
|
||||||
|
rState = gotState.(resolver.State)
|
||||||
|
if err := rState.ServiceConfig.Err; err != nil {
|
||||||
|
t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err)
|
||||||
|
}
|
||||||
|
wantJSON3 := `{"loadBalancingConfig":[{
|
||||||
|
"xds_cluster_manager_experimental":{
|
||||||
|
"children":{
|
||||||
|
"NEW":{
|
||||||
|
"childPolicy":[{"cds_experimental":{"cluster":"NEW"}}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}]}`
|
||||||
|
wantSCParsed3 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON3)
|
||||||
|
if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) {
|
||||||
|
t.Errorf("ClientConn.UpdateState received different service config")
|
||||||
|
t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config))
|
||||||
|
t.Fatal("want: ", cmp.Diff(nil, wantSCParsed3.Config))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
// +build !386
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2020 gRPC authors.
|
* Copyright 2020 gRPC authors.
|
||||||
|
|
@ -16,19 +18,34 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsrouting
|
// Package xds_test contains e2e tests for xDS use.
|
||||||
|
package xds_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/internal/grpctest"
|
||||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const prefix = "[xds-routing-lb %p] "
|
const (
|
||||||
|
defaultTestTimeout = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
var logger = grpclog.Component("xds")
|
type s struct {
|
||||||
|
grpctest.Tester
|
||||||
func prefixLogger(p *routingBalancer) *internalgrpclog.PrefixLogger {
|
}
|
||||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p))
|
|
||||||
|
func Test(t *testing.T) {
|
||||||
|
grpctest.RunSubTests(t, s{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type testService struct {
|
||||||
|
testpb.TestServiceServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) {
|
||||||
|
return &testpb.Empty{}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -18,14 +18,13 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package xds_test contains e2e tests for xDS use on the server.
|
// Package xds_test contains e2e tests for xDS use.
|
||||||
package xds_test
|
package xds_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
@ -37,7 +36,6 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/internal/grpctest"
|
|
||||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||||
"google.golang.org/grpc/xds"
|
"google.golang.org/grpc/xds"
|
||||||
"google.golang.org/grpc/xds/internal/env"
|
"google.golang.org/grpc/xds/internal/env"
|
||||||
|
|
@ -46,18 +44,6 @@ import (
|
||||||
"google.golang.org/grpc/xds/internal/version"
|
"google.golang.org/grpc/xds/internal/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
defaultTestTimeout = 10 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type s struct {
|
|
||||||
grpctest.Tester
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test(t *testing.T) {
|
|
||||||
grpctest.RunSubTests(t, s{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestServerSideXDS is an e2e tests for xDS use on the server. This does not
|
// TestServerSideXDS is an e2e tests for xDS use on the server. This does not
|
||||||
// use any xDS features because we have not implemented any on the server side.
|
// use any xDS features because we have not implemented any on the server side.
|
||||||
func (s) TestServerSideXDS(t *testing.T) {
|
func (s) TestServerSideXDS(t *testing.T) {
|
||||||
|
|
@ -148,11 +134,3 @@ func (s) TestServerSideXDS(t *testing.T) {
|
||||||
t.Fatalf("rpc EmptyCall() failed: %v", err)
|
t.Fatalf("rpc EmptyCall() failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type testService struct {
|
|
||||||
testpb.TestServiceServer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) {
|
|
||||||
return &testpb.Empty{}, nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -116,7 +116,7 @@ func (tcc *TestClientConn) NewSubConn(a []resolver.Address, o balancer.NewSubCon
|
||||||
|
|
||||||
// RemoveSubConn removes the SubConn.
|
// RemoveSubConn removes the SubConn.
|
||||||
func (tcc *TestClientConn) RemoveSubConn(sc balancer.SubConn) {
|
func (tcc *TestClientConn) RemoveSubConn(sc balancer.SubConn) {
|
||||||
tcc.logger.Logf("testClientCOnn: RemoveSubConn(%p)", sc)
|
tcc.logger.Logf("testClientConn: RemoveSubConn(%p)", sc)
|
||||||
select {
|
select {
|
||||||
case tcc.RemoveSubConnCh <- sc:
|
case tcc.RemoveSubConnCh <- sc:
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue