mirror of https://github.com/tikv/client-go.git
internal/client: remove use of errors.Trace (#329)
Signed-off-by: disksing <i@disksing.com>
This commit is contained in:
parent
9921da23d5
commit
b074cb9ad3
|
|
@ -138,7 +138,7 @@ func (a *connArray) Init(addr string, security config.Security, idleNotify *uint
|
|||
if len(security.ClusterSSLCA) != 0 {
|
||||
tlsConfig, err := security.ToTLSConfig()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
|
||||
}
|
||||
|
|
@ -196,7 +196,7 @@ func (a *connArray) Init(addr string, security config.Security, idleNotify *uint
|
|||
if err != nil {
|
||||
// Cleanup if the initialization fails.
|
||||
a.Close()
|
||||
return errors.Trace(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
a.v[i] = conn
|
||||
|
||||
|
|
@ -237,7 +237,7 @@ func (a *connArray) Close() {
|
|||
for i, c := range a.v {
|
||||
if c != nil {
|
||||
err := c.Close()
|
||||
tikverr.Log(errors.Trace(err))
|
||||
tikverr.Log(err)
|
||||
a.v[i] = nil
|
||||
}
|
||||
}
|
||||
|
|
@ -382,7 +382,7 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R
|
|||
enableBatch := req.StoreTp != tikvrpc.TiDB && req.StoreTp != tikvrpc.TiFlash
|
||||
connArray, err := c.getConnArray(addr, enableBatch)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
|
@ -447,7 +447,7 @@ func (c *RPCClient) getCopStreamResponse(ctx context.Context, client tikvpb.Tikv
|
|||
resp, err := tikvrpc.CallRPC(ctx1, client, req)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, errors.Trace(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Put the lease object to the timeout channel, so it would be checked periodically.
|
||||
|
|
@ -463,7 +463,7 @@ func (c *RPCClient) getCopStreamResponse(ctx context.Context, client tikvpb.Tikv
|
|||
first, err = copStream.Recv()
|
||||
if err != nil {
|
||||
if errors.Cause(err) != io.EOF {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
logutil.BgLogger().Debug("copstream returns nothing for the request.")
|
||||
}
|
||||
|
|
@ -482,7 +482,7 @@ func (c *RPCClient) getBatchCopStreamResponse(ctx context.Context, client tikvpb
|
|||
resp, err := tikvrpc.CallRPC(ctx1, client, req)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, errors.Trace(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Put the lease object to the timeout channel, so it would be checked periodically.
|
||||
|
|
@ -498,7 +498,7 @@ func (c *RPCClient) getBatchCopStreamResponse(ctx context.Context, client tikvpb
|
|||
first, err = copStream.Recv()
|
||||
if err != nil {
|
||||
if errors.Cause(err) != io.EOF {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
logutil.BgLogger().Debug("batch copstream returns nothing for the request.")
|
||||
}
|
||||
|
|
@ -516,7 +516,7 @@ func (c *RPCClient) getMPPStreamResponse(ctx context.Context, client tikvpb.Tikv
|
|||
resp, err := tikvrpc.CallRPC(ctx1, client, req)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, errors.Trace(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Put the lease object to the timeout channel, so it would be checked periodically.
|
||||
|
|
@ -532,7 +532,7 @@ func (c *RPCClient) getMPPStreamResponse(ctx context.Context, client tikvpb.Tikv
|
|||
first, err = copStream.Recv()
|
||||
if err != nil {
|
||||
if errors.Cause(err) != io.EOF {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
copStream.MPPDataPacket = first
|
||||
|
|
|
|||
|
|
@ -458,7 +458,7 @@ func (s *batchCommandsStream) recreate(conn *grpc.ClientConn) error {
|
|||
}
|
||||
streamClient, err := tikvClient.BatchCommands(ctx)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
s.Tikv_BatchCommandsClient = streamClient
|
||||
return nil
|
||||
|
|
@ -710,7 +710,7 @@ func (c *batchCommandsClient) recreateStreamingClient(err error, streamClient *b
|
|||
func (c *batchCommandsClient) newBatchStream(forwardedHost string) (*batchCommandsStream, error) {
|
||||
batchStream := &batchCommandsStream{forwardedHost: forwardedHost}
|
||||
if err := batchStream.recreate(c.conn); err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, err
|
||||
}
|
||||
return batchStream, nil
|
||||
}
|
||||
|
|
@ -729,7 +729,7 @@ func (c *batchCommandsClient) initBatchClient(forwardedHost string) error {
|
|||
|
||||
streamClient, err := c.newBatchStream(forwardedHost)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
return err
|
||||
}
|
||||
if forwardedHost == "" {
|
||||
c.client = streamClient
|
||||
|
|
@ -777,7 +777,7 @@ func sendBatchRequest(
|
|||
case <-ctx.Done():
|
||||
logutil.BgLogger().Warn("send request is cancelled",
|
||||
zap.String("to", addr), zap.String("cause", ctx.Err().Error()))
|
||||
return nil, errors.Trace(ctx.Err())
|
||||
return nil, errors.WithStack(ctx.Err())
|
||||
case <-timer.C:
|
||||
return nil, errors.SuspendStack(errors.Annotate(context.DeadlineExceeded, "wait sendLoop"))
|
||||
}
|
||||
|
|
@ -786,14 +786,14 @@ func sendBatchRequest(
|
|||
select {
|
||||
case res, ok := <-entry.res:
|
||||
if !ok {
|
||||
return nil, errors.Trace(entry.err)
|
||||
return nil, errors.WithStack(entry.err)
|
||||
}
|
||||
return tikvrpc.FromBatchCommandsResponse(res)
|
||||
case <-ctx.Done():
|
||||
atomic.StoreInt32(&entry.canceled, 1)
|
||||
logutil.BgLogger().Warn("wait response is cancelled",
|
||||
zap.String("to", addr), zap.String("cause", ctx.Err().Error()))
|
||||
return nil, errors.Trace(ctx.Err())
|
||||
return nil, errors.WithStack(ctx.Err())
|
||||
case <-timer.C:
|
||||
atomic.StoreInt32(&entry.canceled, 1)
|
||||
return nil, errors.SuspendStack(errors.Annotate(context.DeadlineExceeded, "wait recvLoop"))
|
||||
|
|
|
|||
|
|
@ -105,14 +105,14 @@ func (r reqCollapse) collapse(ctx context.Context, key string, sf *singleflight.
|
|||
defer timer.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = errors.Trace(ctx.Err())
|
||||
err = errors.WithStack(ctx.Err())
|
||||
return
|
||||
case <-timer.C:
|
||||
err = errors.Trace(context.DeadlineExceeded)
|
||||
err = errors.WithStack(context.DeadlineExceeded)
|
||||
return
|
||||
case rs := <-rsC:
|
||||
if rs.Err != nil {
|
||||
err = errors.Trace(rs.Err)
|
||||
err = errors.WithStack(rs.Err)
|
||||
return
|
||||
}
|
||||
resp = rs.Val.(*tikvrpc.Response)
|
||||
|
|
|
|||
|
|
@ -1017,7 +1017,7 @@ func CallRPC(ctx context.Context, client tikvpb.TikvClient, req *Request) (*Resp
|
|||
return nil, errors.Errorf("invalid request type: %v", req.Type)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue