mirror of https://github.com/tikv/client-go.git
internal/locate: remove use of errors.Trace (#332)
Signed-off-by: disksing <i@disksing.com>
This commit is contained in:
parent
b074cb9ad3
commit
3e9bd8b941
|
|
@ -86,13 +86,13 @@ func (c *CodecPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey
|
||||||
|
|
||||||
regions, err := c.Client.ScanRegions(ctx, startKey, endKey, limit)
|
regions, err := c.Client.ScanRegions(ctx, startKey, endKey, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
for _, region := range regions {
|
for _, region := range regions {
|
||||||
if region != nil {
|
if region != nil {
|
||||||
err = decodeRegionMetaKeyInPlace(region.Meta)
|
err = decodeRegionMetaKeyInPlace(region.Meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -101,14 +101,14 @@ func (c *CodecPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey
|
||||||
|
|
||||||
func processRegionResult(region *pd.Region, err error) (*pd.Region, error) {
|
func processRegionResult(region *pd.Region, err error) (*pd.Region, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
if region == nil || region.Meta == nil {
|
if region == nil || region.Meta == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
err = decodeRegionMetaKeyInPlace(region.Meta)
|
err = decodeRegionMetaKeyInPlace(region.Meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return region, nil
|
return region, nil
|
||||||
}
|
}
|
||||||
|
|
@ -132,14 +132,14 @@ func decodeRegionMetaKeyInPlace(r *metapb.Region) error {
|
||||||
if len(r.StartKey) != 0 {
|
if len(r.StartKey) != 0 {
|
||||||
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
|
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &decodeError{err}
|
return errors.WithStack(&decodeError{err})
|
||||||
}
|
}
|
||||||
r.StartKey = decoded
|
r.StartKey = decoded
|
||||||
}
|
}
|
||||||
if len(r.EndKey) != 0 {
|
if len(r.EndKey) != 0 {
|
||||||
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
|
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &decodeError{err}
|
return errors.WithStack(&decodeError{err})
|
||||||
}
|
}
|
||||||
r.EndKey = decoded
|
r.EndKey = decoded
|
||||||
}
|
}
|
||||||
|
|
@ -151,14 +151,14 @@ func decodeRegionMetaKeyWithShallowCopy(r *metapb.Region) (*metapb.Region, error
|
||||||
if len(r.StartKey) != 0 {
|
if len(r.StartKey) != 0 {
|
||||||
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
|
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
nr.StartKey = decoded
|
nr.StartKey = decoded
|
||||||
}
|
}
|
||||||
if len(r.EndKey) != 0 {
|
if len(r.EndKey) != 0 {
|
||||||
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
|
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
nr.EndKey = decoded
|
nr.EndKey = decoded
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -918,7 +918,7 @@ func (c *RegionCache) LocateRegionByID(bo *retry.Backoffer, regionID uint64) (*K
|
||||||
|
|
||||||
r, err := c.loadRegionByID(bo, regionID)
|
r, err := c.loadRegionByID(bo, regionID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
|
@ -944,7 +944,7 @@ func (c *RegionCache) GroupKeysByRegion(bo *retry.Backoffer, keys [][]byte, filt
|
||||||
var err error
|
var err error
|
||||||
lastLoc, err = c.LocateKey(bo, k)
|
lastLoc, err = c.LocateKey(bo, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, first, errors.Trace(err)
|
return nil, first, err
|
||||||
}
|
}
|
||||||
if filter != nil && filter(k, lastLoc.StartKey) {
|
if filter != nil && filter(k, lastLoc.StartKey) {
|
||||||
continue
|
continue
|
||||||
|
|
@ -964,7 +964,7 @@ func (c *RegionCache) ListRegionIDsInKeyRange(bo *retry.Backoffer, startKey, end
|
||||||
for {
|
for {
|
||||||
curRegion, err := c.LocateKey(bo, startKey)
|
curRegion, err := c.LocateKey(bo, startKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
regionIDs = append(regionIDs, curRegion.Region.id)
|
regionIDs = append(regionIDs, curRegion.Region.id)
|
||||||
if curRegion.Contains(endKey) {
|
if curRegion.Contains(endKey) {
|
||||||
|
|
@ -981,7 +981,7 @@ func (c *RegionCache) LoadRegionsInKeyRange(bo *retry.Backoffer, startKey, endKe
|
||||||
for {
|
for {
|
||||||
batchRegions, err = c.BatchLoadRegionsWithKeyRange(bo, startKey, endKey, defaultRegionsPerBatch)
|
batchRegions, err = c.BatchLoadRegionsWithKeyRange(bo, startKey, endKey, defaultRegionsPerBatch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(batchRegions) == 0 {
|
if len(batchRegions) == 0 {
|
||||||
// should never happen
|
// should never happen
|
||||||
|
|
@ -1025,7 +1025,7 @@ func (c *RegionCache) BatchLoadRegionsWithKeyRange(bo *retry.Backoffer, startKey
|
||||||
func (c *RegionCache) BatchLoadRegionsFromKey(bo *retry.Backoffer, startKey []byte, count int) ([]byte, error) {
|
func (c *RegionCache) BatchLoadRegionsFromKey(bo *retry.Backoffer, startKey []byte, count int) ([]byte, error) {
|
||||||
regions, err := c.BatchLoadRegionsWithKeyRange(bo, startKey, nil, count)
|
regions, err := c.BatchLoadRegionsWithKeyRange(bo, startKey, nil, count)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return regions[len(regions)-1].EndKey(), nil
|
return regions[len(regions)-1].EndKey(), nil
|
||||||
}
|
}
|
||||||
|
|
@ -1238,7 +1238,7 @@ func (c *RegionCache) loadRegion(bo *retry.Backoffer, key []byte, isEndKey bool)
|
||||||
if backoffErr != nil {
|
if backoffErr != nil {
|
||||||
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var reg *pd.Region
|
var reg *pd.Region
|
||||||
|
|
@ -1297,7 +1297,7 @@ func (c *RegionCache) loadRegionByID(bo *retry.Backoffer, regionID uint64) (*Reg
|
||||||
if backoffErr != nil {
|
if backoffErr != nil {
|
||||||
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reg, err := c.pdClient.GetRegionByID(ctx, regionID)
|
reg, err := c.pdClient.GetRegionByID(ctx, regionID)
|
||||||
|
|
@ -1350,7 +1350,7 @@ func (c *RegionCache) scanRegions(bo *retry.Backoffer, startKey, endKey []byte,
|
||||||
if backoffErr != nil {
|
if backoffErr != nil {
|
||||||
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
err := bo.Backoff(retry.BoPDRPC, backoffErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Trace(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
regionsInfo, err := c.pdClient.ScanRegions(ctx, startKey, endKey, limit)
|
regionsInfo, err := c.pdClient.ScanRegions(ctx, startKey, endKey, limit)
|
||||||
|
|
@ -1905,8 +1905,8 @@ func (s *Store) initResolve(bo *retry.Backoffer, c *RegionCache) (addr string, e
|
||||||
} else {
|
} else {
|
||||||
metrics.RegionCacheCounterWithGetStoreOK.Inc()
|
metrics.RegionCacheCounterWithGetStoreOK.Inc()
|
||||||
}
|
}
|
||||||
if bo.GetCtx().Err() != nil && errors.Cause(bo.GetCtx().Err()) == context.Canceled {
|
if err := bo.GetCtx().Err(); err != nil && errors.Cause(err) == context.Canceled {
|
||||||
return
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
if err != nil && !isStoreNotFoundError(err) {
|
if err != nil && !isStoreNotFoundError(err) {
|
||||||
// TODO: more refine PD error status handle.
|
// TODO: more refine PD error status handle.
|
||||||
|
|
@ -2228,7 +2228,7 @@ func createKVHealthClient(ctx context.Context, addr string) (*grpc.ClientConn, h
|
||||||
if len(cfg.Security.ClusterSSLCA) != 0 {
|
if len(cfg.Security.ClusterSSLCA) != 0 {
|
||||||
tlsConfig, err := cfg.Security.ToTLSConfig()
|
tlsConfig, err := cfg.Security.ToTLSConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Trace(err)
|
return nil, nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
|
opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
|
||||||
}
|
}
|
||||||
|
|
@ -2256,7 +2256,7 @@ func createKVHealthClient(ctx context.Context, addr string) (*grpc.ClientConn, h
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Trace(err)
|
return nil, nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
cli := healthpb.NewHealthClient(conn)
|
cli := healthpb.NewHealthClient(conn)
|
||||||
return conn, cli, nil
|
return conn, cli, nil
|
||||||
|
|
|
||||||
|
|
@ -799,7 +799,7 @@ func (s *replicaSelector) onNotLeader(bo *retry.Backoffer, ctx *RPCContext, notL
|
||||||
// The region may be during transferring leader.
|
// The region may be during transferring leader.
|
||||||
s.state.onNoLeader(s)
|
s.state.onNoLeader(s)
|
||||||
if err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("no leader, ctx: %v", ctx)); err != nil {
|
if err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("no leader, ctx: %v", ctx)); err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.updateLeader(notLeader.GetLeader())
|
s.updateLeader(notLeader.GetLeader())
|
||||||
|
|
@ -979,7 +979,7 @@ func (s *RegionRequestSender) SendReqCtx(
|
||||||
var retry bool
|
var retry bool
|
||||||
resp, retry, err = s.sendReqToRegion(bo, rpcCtx, req, timeout)
|
resp, retry, err = s.sendReqToRegion(bo, rpcCtx, req, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Trace(err)
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// recheck whether the session/query is killed during the Next()
|
// recheck whether the session/query is killed during the Next()
|
||||||
|
|
@ -1000,12 +1000,12 @@ func (s *RegionRequestSender) SendReqCtx(
|
||||||
var regionErr *errorpb.Error
|
var regionErr *errorpb.Error
|
||||||
regionErr, err = resp.GetRegionError()
|
regionErr, err = resp.GetRegionError()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Trace(err)
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if regionErr != nil {
|
if regionErr != nil {
|
||||||
retry, err = s.onRegionError(bo, rpcCtx, req, regionErr)
|
retry, err = s.onRegionError(bo, rpcCtx, req, regionErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Trace(err)
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if retry {
|
if retry {
|
||||||
tryTimes++
|
tryTimes++
|
||||||
|
|
@ -1069,7 +1069,7 @@ func (h *RPCCanceller) CancelAll() {
|
||||||
|
|
||||||
func (s *RegionRequestSender) sendReqToRegion(bo *retry.Backoffer, rpcCtx *RPCContext, req *tikvrpc.Request, timeout time.Duration) (resp *tikvrpc.Response, retry bool, err error) {
|
func (s *RegionRequestSender) sendReqToRegion(bo *retry.Backoffer, rpcCtx *RPCContext, req *tikvrpc.Request, timeout time.Duration) (resp *tikvrpc.Response, retry bool, err error) {
|
||||||
if e := tikvrpc.SetContext(req, rpcCtx.Meta, rpcCtx.Peer); e != nil {
|
if e := tikvrpc.SetContext(req, rpcCtx.Meta, rpcCtx.Peer); e != nil {
|
||||||
return nil, false, errors.Trace(e)
|
return nil, false, err
|
||||||
}
|
}
|
||||||
// judge the store limit switch.
|
// judge the store limit switch.
|
||||||
if limit := kv.StoreLimit.Load(); limit > 0 {
|
if limit := kv.StoreLimit.Load(); limit > 0 {
|
||||||
|
|
@ -1189,7 +1189,7 @@ func (s *RegionRequestSender) sendReqToRegion(bo *retry.Backoffer, rpcCtx *RPCCo
|
||||||
// we need to retry the request. But for context cancel active, for example, limitExec gets the required rows,
|
// we need to retry the request. But for context cancel active, for example, limitExec gets the required rows,
|
||||||
// we shouldn't retry the request, it will go to backoff and hang in retry logic.
|
// we shouldn't retry the request, it will go to backoff and hang in retry logic.
|
||||||
if ctx.Err() != nil && errors.Cause(ctx.Err()) == context.Canceled {
|
if ctx.Err() != nil && errors.Cause(ctx.Err()) == context.Canceled {
|
||||||
return nil, false, errors.Trace(ctx.Err())
|
return nil, false, errors.WithStack(ctx.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, e := util.EvalFailpoint("noRetryOnRpcError"); e == nil {
|
if val, e := util.EvalFailpoint("noRetryOnRpcError"); e == nil {
|
||||||
|
|
@ -1198,7 +1198,7 @@ func (s *RegionRequestSender) sendReqToRegion(bo *retry.Backoffer, rpcCtx *RPCCo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e := s.onSendFail(bo, rpcCtx, err); e != nil {
|
if e := s.onSendFail(bo, rpcCtx, err); e != nil {
|
||||||
return nil, false, errors.Trace(e)
|
return nil, false, err
|
||||||
}
|
}
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1214,7 +1214,7 @@ func (s *RegionRequestSender) getStoreToken(st *Store, limit int64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
metrics.TiKVStoreLimitErrorCounter.WithLabelValues(st.addr, strconv.FormatUint(st.storeID, 10)).Inc()
|
metrics.TiKVStoreLimitErrorCounter.WithLabelValues(st.addr, strconv.FormatUint(st.storeID, 10)).Inc()
|
||||||
return &tikverr.ErrTokenLimit{StoreID: st.storeID}
|
return errors.WithStack(&tikverr.ErrTokenLimit{StoreID: st.storeID})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *RegionRequestSender) releaseStoreToken(st *Store) {
|
func (s *RegionRequestSender) releaseStoreToken(st *Store) {
|
||||||
|
|
@ -1235,14 +1235,14 @@ func (s *RegionRequestSender) onSendFail(bo *retry.Backoffer, ctx *RPCContext, e
|
||||||
}
|
}
|
||||||
// If it failed because the context is cancelled by ourself, don't retry.
|
// If it failed because the context is cancelled by ourself, don't retry.
|
||||||
if errors.Cause(err) == context.Canceled {
|
if errors.Cause(err) == context.Canceled {
|
||||||
return errors.Trace(err)
|
return errors.WithStack(err)
|
||||||
} else if LoadShuttingDown() > 0 {
|
} else if LoadShuttingDown() > 0 {
|
||||||
return tikverr.ErrTiDBShuttingDown
|
return errors.WithStack(tikverr.ErrTiDBShuttingDown)
|
||||||
}
|
}
|
||||||
if status.Code(errors.Cause(err)) == codes.Canceled {
|
if status.Code(errors.Cause(err)) == codes.Canceled {
|
||||||
select {
|
select {
|
||||||
case <-bo.GetCtx().Done():
|
case <-bo.GetCtx().Done():
|
||||||
return errors.Trace(err)
|
return errors.WithStack(err)
|
||||||
default:
|
default:
|
||||||
// If we don't cancel, but the error code is Canceled, it must be from grpc remote.
|
// If we don't cancel, but the error code is Canceled, it must be from grpc remote.
|
||||||
// This may happen when tikv is killed and exiting.
|
// This may happen when tikv is killed and exiting.
|
||||||
|
|
@ -1268,7 +1268,7 @@ func (s *RegionRequestSender) onSendFail(bo *retry.Backoffer, ctx *RPCContext, e
|
||||||
} else {
|
} else {
|
||||||
err = bo.Backoff(retry.BoTiKVRPC, errors.Errorf("send tikv request error: %v, ctx: %v, try next peer later", err, ctx))
|
err = bo.Backoff(retry.BoTiKVRPC, errors.Errorf("send tikv request error: %v, ctx: %v, try next peer later", err, ctx))
|
||||||
}
|
}
|
||||||
return errors.Trace(err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedReloadRegion checks is all peers has sent failed, if so need reload.
|
// NeedReloadRegion checks is all peers has sent failed, if so need reload.
|
||||||
|
|
@ -1357,7 +1357,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
// the region from PD.
|
// the region from PD.
|
||||||
s.regionCache.InvalidateCachedRegionWithReason(ctx.Region, NoLeader)
|
s.regionCache.InvalidateCachedRegionWithReason(ctx.Region, NoLeader)
|
||||||
if err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("not leader: %v, ctx: %v", notLeader, ctx)); err != nil {
|
if err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("not leader: %v, ctx: %v", notLeader, ctx)); err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -1396,7 +1396,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
if !retry && s.replicaSelector != nil {
|
if !retry && s.replicaSelector != nil {
|
||||||
s.replicaSelector.invalidateRegion()
|
s.replicaSelector.invalidateRegion()
|
||||||
}
|
}
|
||||||
return retry, errors.Trace(err)
|
return retry, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if regionErr.GetServerIsBusy() != nil {
|
if regionErr.GetServerIsBusy() != nil {
|
||||||
|
|
@ -1409,7 +1409,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
err = bo.Backoff(retry.BoTiKVServerBusy, errors.Errorf("server is busy, ctx: %v", ctx))
|
err = bo.Backoff(retry.BoTiKVServerBusy, errors.Errorf("server is busy, ctx: %v", ctx))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1425,7 +1425,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
} else {
|
} else {
|
||||||
err = bo.Backoff(retry.BoStaleCmd, errors.Errorf("stale command, ctx: %v", ctx))
|
err = bo.Backoff(retry.BoStaleCmd, errors.Errorf("stale command, ctx: %v", ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
@ -1450,7 +1450,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
logutil.BgLogger().Debug("tikv reports `MaxTimestampNotSynced`", zap.Stringer("ctx", ctx))
|
logutil.BgLogger().Debug("tikv reports `MaxTimestampNotSynced`", zap.Stringer("ctx", ctx))
|
||||||
err = bo.Backoff(retry.BoMaxTsNotSynced, errors.Errorf("max timestamp not synced, ctx: %v", ctx))
|
err = bo.Backoff(retry.BoMaxTsNotSynced, errors.Errorf("max timestamp not synced, ctx: %v", ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1463,7 +1463,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
zap.Stringer("ctx", ctx))
|
zap.Stringer("ctx", ctx))
|
||||||
err = bo.Backoff(retry.BoMaxRegionNotInitialized, errors.Errorf("region not initialized"))
|
err = bo.Backoff(retry.BoMaxRegionNotInitialized, errors.Errorf("region not initialized"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1477,7 +1477,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
// The region can't provide service until split or merge finished, so backoff.
|
// The region can't provide service until split or merge finished, so backoff.
|
||||||
err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("read index not ready, ctx: %v", ctx))
|
err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("read index not ready, ctx: %v", ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1487,7 +1487,7 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
// The region is merging and it can't provide service until merge finished, so backoff.
|
// The region is merging and it can't provide service until merge finished, so backoff.
|
||||||
err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("region is merging, ctx: %v", ctx))
|
err = bo.Backoff(retry.BoRegionScheduling, errors.Errorf("region is merging, ctx: %v", ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
@ -1502,9 +1502,9 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext
|
||||||
zap.Uint64("region-id", regionErr.GetDataIsNotReady().GetRegionId()),
|
zap.Uint64("region-id", regionErr.GetDataIsNotReady().GetRegionId()),
|
||||||
zap.Uint64("safe-ts", regionErr.GetDataIsNotReady().GetSafeTs()),
|
zap.Uint64("safe-ts", regionErr.GetDataIsNotReady().GetSafeTs()),
|
||||||
zap.Stringer("ctx", ctx))
|
zap.Stringer("ctx", ctx))
|
||||||
err = bo.Backoff(retry.BoMaxDataNotReady, errors.Errorf("data is not ready"))
|
err = bo.Backoff(retry.BoMaxDataNotReady, errors.New("data is not ready"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Trace(err)
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -737,7 +737,7 @@ func SetContext(req *Request, region *metapb.Region, peer *metapb.Peer) error {
|
||||||
case CmdCheckSecondaryLocks:
|
case CmdCheckSecondaryLocks:
|
||||||
req.CheckSecondaryLocks().Context = ctx
|
req.CheckSecondaryLocks().Context = ctx
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid request type %v", req.Type)
|
return errors.Errorf("invalid request type %v", req.Type)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -900,7 +900,7 @@ func (resp *Response) GetRegionError() (*errorpb.Error, error) {
|
||||||
if _, isEmpty := resp.Resp.(*tikvpb.BatchCommandsEmptyResponse); isEmpty {
|
if _, isEmpty := resp.Resp.(*tikvpb.BatchCommandsEmptyResponse); isEmpty {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("invalid response type %v", resp)
|
return nil, errors.Errorf("invalid response type %v", resp)
|
||||||
}
|
}
|
||||||
return err.GetRegionError(), nil
|
return err.GetRegionError(), nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue