mirror of https://github.com/grpc/grpc-go.git
balancergroup: remove mentions of locality from comments (#7476)
This commit is contained in:
parent
c8716e591a
commit
338595ca57
|
|
@ -99,7 +99,7 @@ func (sbc *subBalancerWrapper) startBalancer() {
|
|||
if sbc.balancer == nil {
|
||||
sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts)
|
||||
}
|
||||
sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id)
|
||||
sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id)
|
||||
sbc.balancer.SwitchTo(sbc.builder)
|
||||
if sbc.ccState != nil {
|
||||
sbc.balancer.UpdateClientConnState(*sbc.ccState)
|
||||
|
|
@ -121,14 +121,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState)
|
|||
sbc.ccState = &s
|
||||
b := sbc.balancer
|
||||
if b == nil {
|
||||
// This sub-balancer was closed. This should never happen because
|
||||
// sub-balancers are closed when the locality is removed from EDS, or
|
||||
// the balancer group is closed. There should be no further address
|
||||
// updates when either of this happened.
|
||||
//
|
||||
// This will be a common case with priority support, because a
|
||||
// sub-balancer (and the whole balancer group) could be closed because
|
||||
// it's the lower priority, but it can still get address updates.
|
||||
// A sub-balancer is closed when it is removed from the group or the
|
||||
// group is closed as a whole, and is not expected to receive updates
|
||||
// after that. But when used with the priority LB policy a sub-balancer
|
||||
// (and the whole balancer group) could be closed because it's the lower
|
||||
// priority, but it can still get address updates.
|
||||
return nil
|
||||
}
|
||||
return b.UpdateClientConnState(s)
|
||||
|
|
@ -137,14 +134,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState)
|
|||
func (sbc *subBalancerWrapper) resolverError(err error) {
|
||||
b := sbc.balancer
|
||||
if b == nil {
|
||||
// This sub-balancer was closed. This should never happen because
|
||||
// sub-balancers are closed when the locality is removed from EDS, or
|
||||
// the balancer group is closed. There should be no further address
|
||||
// updates when either of this happened.
|
||||
//
|
||||
// This will be a common case with priority support, because a
|
||||
// sub-balancer (and the whole balancer group) could be closed because
|
||||
// it's the lower priority, but it can still get address updates.
|
||||
// A sub-balancer is closed when it is removed from the group or the
|
||||
// group is closed as a whole, and is not expected to receive updates
|
||||
// after that. But when used with the priority LB policy a sub-balancer
|
||||
// (and the whole balancer group) could be closed because it's the lower
|
||||
// priority, but it can still get address updates.
|
||||
return
|
||||
}
|
||||
b.ResolverError(err)
|
||||
|
|
@ -302,7 +296,7 @@ func (bg *BalancerGroup) Start() {
|
|||
//
|
||||
// TODO: Get rid of the existing Add() API and replace it with this.
|
||||
func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
|
||||
bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id)
|
||||
bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id)
|
||||
builder := balancer.Get(balancerName)
|
||||
if builder == nil {
|
||||
return fmt.Errorf("unregistered balancer name %q", balancerName)
|
||||
|
|
@ -318,7 +312,7 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.
|
|||
if bg.outgoingStarted && bg.deletedBalancerCache != nil {
|
||||
if old, ok := bg.deletedBalancerCache.Remove(id); ok {
|
||||
if bg.logger.V(2) {
|
||||
bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id)
|
||||
bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id)
|
||||
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
|
||||
}
|
||||
|
||||
|
|
@ -372,13 +366,13 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) {
|
|||
// closed after timeout. Cleanup work (closing sub-balancer and removing
|
||||
// subconns) will be done after timeout.
|
||||
func (bg *BalancerGroup) Remove(id string) {
|
||||
bg.logger.Infof("Removing child policy for locality %q", id)
|
||||
bg.logger.Infof("Removing child policy for child %q", id)
|
||||
|
||||
bg.outgoingMu.Lock()
|
||||
|
||||
sbToRemove, ok := bg.idToBalancerConfig[id]
|
||||
if !ok {
|
||||
bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id)
|
||||
bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id)
|
||||
bg.outgoingMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
|
@ -394,13 +388,13 @@ func (bg *BalancerGroup) Remove(id string) {
|
|||
|
||||
if bg.deletedBalancerCache != nil {
|
||||
if bg.logger.V(2) {
|
||||
bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id)
|
||||
bg.logger.Infof("Adding child policy for child %q to the balancer cache", id)
|
||||
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
|
||||
}
|
||||
|
||||
bg.deletedBalancerCache.Add(id, sbToRemove, func() {
|
||||
if bg.logger.V(2) {
|
||||
bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id)
|
||||
bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id)
|
||||
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
|
||||
}
|
||||
|
||||
|
|
@ -541,7 +535,7 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver
|
|||
// aggregator will create an aggregated picker and an aggregated connectivity
|
||||
// state, then forward to ClientConn.
|
||||
func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
|
||||
bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
|
||||
bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state)
|
||||
|
||||
// Send new state to the aggregator, without holding the incomingMu.
|
||||
// incomingMu is to protect all calls to the parent ClientConn, this update
|
||||
|
|
|
|||
Loading…
Reference in New Issue