mirror of https://github.com/tikv/client-go.git
parent
7f6daad88a
commit
b3d61828e5
|
|
@ -35,7 +35,6 @@
|
||||||
package mocktikv
|
package mocktikv
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
@ -232,26 +231,7 @@ func (c *pdClient) ScatterRegions(ctx context.Context, regionsID []uint64, opts
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pdClient) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...pd.RegionsOption) (*pdpb.SplitRegionsResponse, error) {
|
func (c *pdClient) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...pd.RegionsOption) (*pdpb.SplitRegionsResponse, error) {
|
||||||
regionsID := make([]uint64, 0, len(splitKeys))
|
return nil, nil
|
||||||
for i, key := range splitKeys {
|
|
||||||
k := NewMvccKey(key)
|
|
||||||
region, _, _ := c.cluster.GetRegionByKey(k)
|
|
||||||
if bytes.Equal(region.GetStartKey(), key) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
regionsID = append(regionsID, region.Id)
|
|
||||||
}
|
|
||||||
newRegionID, newPeerIDs := c.cluster.AllocID(), c.cluster.AllocIDs(len(region.Peers))
|
|
||||||
newRegion := c.cluster.SplitRaw(region.GetId(), newRegionID, k, newPeerIDs, newPeerIDs[0])
|
|
||||||
regionsID = append(regionsID, newRegion.Id)
|
|
||||||
}
|
|
||||||
response := &pdpb.SplitRegionsResponse{
|
|
||||||
Header: &pdpb.ResponseHeader{},
|
|
||||||
FinishedPercentage: 100,
|
|
||||||
RegionsId: regionsID,
|
|
||||||
}
|
|
||||||
return response, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) {
|
func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) {
|
||||||
|
|
|
||||||
|
|
@ -40,13 +40,18 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/pingcap/kvproto/pkg/kvrpcpb"
|
||||||
|
"github.com/pingcap/kvproto/pkg/metapb"
|
||||||
"github.com/pingcap/kvproto/pkg/pdpb"
|
"github.com/pingcap/kvproto/pkg/pdpb"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
tikverr "github.com/tikv/client-go/v2/error"
|
tikverr "github.com/tikv/client-go/v2/error"
|
||||||
|
"github.com/tikv/client-go/v2/internal/client"
|
||||||
"github.com/tikv/client-go/v2/internal/kvrpc"
|
"github.com/tikv/client-go/v2/internal/kvrpc"
|
||||||
|
"github.com/tikv/client-go/v2/internal/locate"
|
||||||
"github.com/tikv/client-go/v2/internal/logutil"
|
"github.com/tikv/client-go/v2/internal/logutil"
|
||||||
"github.com/tikv/client-go/v2/internal/retry"
|
"github.com/tikv/client-go/v2/internal/retry"
|
||||||
"github.com/tikv/client-go/v2/kv"
|
"github.com/tikv/client-go/v2/kv"
|
||||||
|
"github.com/tikv/client-go/v2/tikvrpc"
|
||||||
"github.com/tikv/client-go/v2/txnkv/rangetask"
|
"github.com/tikv/client-go/v2/txnkv/rangetask"
|
||||||
"github.com/tikv/client-go/v2/util"
|
"github.com/tikv/client-go/v2/util"
|
||||||
pd "github.com/tikv/pd/client"
|
pd "github.com/tikv/pd/client"
|
||||||
|
|
@ -59,7 +64,7 @@ func equalRegionStartKey(key, regionStartKey []byte) bool {
|
||||||
return bytes.Equal(key, regionStartKey)
|
return bytes.Equal(key, regionStartKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *KVStore) splitBatchRegionsReq(bo *Backoffer, keys [][]byte, scatter bool, tableID *int64) (*pdpb.SplitRegionsResponse, error) {
|
func (s *KVStore) splitBatchRegionsReq(bo *Backoffer, keys [][]byte, scatter bool, tableID *int64) (*tikvrpc.Response, error) {
|
||||||
// equalRegionStartKey is used to filter split keys.
|
// equalRegionStartKey is used to filter split keys.
|
||||||
// If the split key is equal to the start key of the region, then the key has been split, we need to skip the split key.
|
// If the split key is equal to the start key of the region, then the key has been split, we need to skip the split key.
|
||||||
groups, _, err := s.regionCache.GroupKeysByRegion(bo, keys, equalRegionStartKey)
|
groups, _, err := s.regionCache.GroupKeysByRegion(bo, keys, equalRegionStartKey)
|
||||||
|
|
@ -85,9 +90,9 @@ func (s *KVStore) splitBatchRegionsReq(bo *Backoffer, keys [][]byte, scatter boo
|
||||||
}
|
}
|
||||||
if len(batches) == 1 {
|
if len(batches) == 1 {
|
||||||
resp := s.batchSendSingleRegion(bo, batches[0], scatter, tableID)
|
resp := s.batchSendSingleRegion(bo, batches[0], scatter, tableID)
|
||||||
return resp, err
|
return resp.Response, resp.Error
|
||||||
}
|
}
|
||||||
ch := make(chan *pdpb.SplitRegionsResponse, len(batches))
|
ch := make(chan kvrpc.BatchResult, len(batches))
|
||||||
for _, batch1 := range batches {
|
for _, batch1 := range batches {
|
||||||
go func(b kvrpc.Batch) {
|
go func(b kvrpc.Batch) {
|
||||||
backoffer, cancel := bo.Fork()
|
backoffer, cancel := bo.Fork()
|
||||||
|
|
@ -97,38 +102,37 @@ func (s *KVStore) splitBatchRegionsReq(bo *Backoffer, keys [][]byte, scatter boo
|
||||||
select {
|
select {
|
||||||
case ch <- s.batchSendSingleRegion(backoffer, b, scatter, tableID):
|
case ch <- s.batchSendSingleRegion(backoffer, b, scatter, tableID):
|
||||||
case <-bo.GetCtx().Done():
|
case <-bo.GetCtx().Done():
|
||||||
resp := &pdpb.SplitRegionsResponse{}
|
ch <- kvrpc.BatchResult{Error: bo.GetCtx().Err()}
|
||||||
resp.Header.Error = &pdpb.Error{Message: err.Error()}
|
|
||||||
ch <- resp
|
|
||||||
}
|
}
|
||||||
}, func(r interface{}) {
|
}, func(r interface{}) {
|
||||||
if r != nil {
|
if r != nil {
|
||||||
resp := &pdpb.SplitRegionsResponse{}
|
ch <- kvrpc.BatchResult{Error: errors.Errorf("%v", r)}
|
||||||
resp.Header.Error = &pdpb.Error{Message: err.Error()}
|
|
||||||
ch <- resp
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}(batch1)
|
}(batch1)
|
||||||
}
|
}
|
||||||
|
|
||||||
srResp := &pdpb.SplitRegionsResponse{RegionsId: make([]uint64, len(keys)*2)}
|
srResp := &kvrpcpb.SplitRegionResponse{Regions: make([]*metapb.Region, 0, len(keys)*2)}
|
||||||
for i := 0; i < len(batches); i++ {
|
for i := 0; i < len(batches); i++ {
|
||||||
batchResp := <-ch
|
batchResp := <-ch
|
||||||
if batchResp.Header.Error != nil {
|
if batchResp.Error != nil {
|
||||||
respErr := errors.New(batchResp.Header.Error.GetMessage())
|
logutil.BgLogger().Info("batch split regions failed", zap.Error(batchResp.Error))
|
||||||
logutil.BgLogger().Info("batch split regions failed", zap.Error(respErr))
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = respErr
|
err = batchResp.Error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the split succeeds and the scatter fails, we also need to add the region IDs.
|
// If the split succeeds and the scatter fails, we also need to add the region IDs.
|
||||||
srResp.RegionsId = append(srResp.RegionsId, batchResp.RegionsId...)
|
if batchResp.Response != nil {
|
||||||
|
spResp := batchResp.Resp.(*kvrpcpb.SplitRegionResponse)
|
||||||
|
regions := spResp.GetRegions()
|
||||||
|
srResp.Regions = append(srResp.Regions, regions...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return srResp, err
|
return &tikvrpc.Response{Resp: srResp}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *KVStore) batchSendSingleRegion(bo *Backoffer, batch kvrpc.Batch, scatter bool, tableID *int64) *pdpb.SplitRegionsResponse {
|
func (s *KVStore) batchSendSingleRegion(bo *Backoffer, batch kvrpc.Batch, scatter bool, tableID *int64) kvrpc.BatchResult {
|
||||||
if val, err := util.EvalFailpoint("mockSplitRegionTimeout"); err == nil {
|
if val, err := util.EvalFailpoint("mockSplitRegionTimeout"); err == nil {
|
||||||
if val.(bool) {
|
if val.(bool) {
|
||||||
if _, ok := bo.GetCtx().Deadline(); ok {
|
if _, ok := bo.GetCtx().Deadline(); ok {
|
||||||
|
|
@ -137,56 +141,80 @@ func (s *KVStore) batchSendSingleRegion(bo *Backoffer, batch kvrpc.Batch, scatte
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.pdClient.SplitRegions(bo.GetCtx(), batch.Keys)
|
req := tikvrpc.NewRequest(tikvrpc.CmdSplitRegion, &kvrpcpb.SplitRegionRequest{
|
||||||
|
SplitKeys: batch.Keys,
|
||||||
|
}, kvrpcpb.Context{
|
||||||
|
Priority: kvrpcpb.CommandPri_Normal,
|
||||||
|
})
|
||||||
|
|
||||||
|
sender := locate.NewRegionRequestSender(s.regionCache, s.GetTiKVClient())
|
||||||
|
resp, err := sender.SendReq(bo, req, batch.RegionID, client.ReadTimeoutShort)
|
||||||
|
|
||||||
|
batchResp := kvrpc.BatchResult{Response: resp}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resp := &pdpb.SplitRegionsResponse{}
|
batchResp.Error = err
|
||||||
resp.Header = &pdpb.ResponseHeader{}
|
return batchResp
|
||||||
resp.Header.Error = &pdpb.Error{Message: err.Error()}
|
|
||||||
return resp
|
|
||||||
}
|
}
|
||||||
if resp == nil {
|
regionErr, err := resp.GetRegionError()
|
||||||
return &pdpb.SplitRegionsResponse{
|
if err != nil {
|
||||||
Header: &pdpb.ResponseHeader{
|
batchResp.Error = err
|
||||||
Error: &pdpb.Error{Message: "empty response"},
|
return batchResp
|
||||||
},
|
}
|
||||||
|
if regionErr != nil {
|
||||||
|
err := bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String()))
|
||||||
|
if err != nil {
|
||||||
|
batchResp.Error = err
|
||||||
|
return batchResp
|
||||||
}
|
}
|
||||||
}
|
resp, err = s.splitBatchRegionsReq(bo, batch.Keys, scatter, tableID)
|
||||||
regionIDs := resp.GetRegionsId()
|
batchResp.Response = resp
|
||||||
if len(regionIDs) > 0 {
|
batchResp.Error = err
|
||||||
// Divide a region into n, one of them may not need to be scattered,
|
return batchResp
|
||||||
// so n-1 needs to be scattered to other stores.
|
|
||||||
regionIDs = regionIDs[:len(regionIDs)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spResp := resp.Resp.(*kvrpcpb.SplitRegionResponse)
|
||||||
|
regions := spResp.GetRegions()
|
||||||
|
if len(regions) > 0 {
|
||||||
|
// Divide a region into n, one of them may not need to be scattered,
|
||||||
|
// so n-1 needs to be scattered to other stores.
|
||||||
|
spResp.Regions = regions[:len(regions)-1]
|
||||||
|
}
|
||||||
|
var newRegionLeft string
|
||||||
|
if len(spResp.Regions) > 0 {
|
||||||
|
newRegionLeft = logutil.Hex(spResp.Regions[0]).String()
|
||||||
|
}
|
||||||
logutil.BgLogger().Info("batch split regions complete",
|
logutil.BgLogger().Info("batch split regions complete",
|
||||||
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
||||||
zap.String("first at", kv.StrKey(batch.Keys[0])),
|
zap.String("first at", kv.StrKey(batch.Keys[0])),
|
||||||
zap.Int("new region count", len(regionIDs)))
|
zap.String("first new region left", newRegionLeft),
|
||||||
if resp.FinishedPercentage != 100 {
|
zap.Int("new region count", len(spResp.Regions)))
|
||||||
err = errors.Errorf("Fail to batch split regions, finishedPercentage : %d, batch region ID : %d",
|
|
||||||
resp.FinishedPercentage, batch.RegionID.GetID())
|
|
||||||
resp.Header.Error = &pdpb.Error{Message: err.Error()}
|
|
||||||
}
|
|
||||||
if !scatter {
|
if !scatter {
|
||||||
return resp
|
return batchResp
|
||||||
}
|
}
|
||||||
for i, id := range regionIDs {
|
|
||||||
if err = s.scatterRegion(bo, id, tableID); err == nil {
|
for i, r := range spResp.Regions {
|
||||||
|
if err = s.scatterRegion(bo, r.Id, tableID); err == nil {
|
||||||
logutil.BgLogger().Info("batch split regions, scatter region complete",
|
logutil.BgLogger().Info("batch split regions, scatter region complete",
|
||||||
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
||||||
zap.String("at", kv.StrKey(batch.Keys[i])))
|
zap.String("at", kv.StrKey(batch.Keys[i])),
|
||||||
|
zap.Stringer("new region left", logutil.Hex(r)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
logutil.BgLogger().Info("batch split regions, scatter region failed",
|
logutil.BgLogger().Info("batch split regions, scatter region failed",
|
||||||
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
zap.Uint64("batch region ID", batch.RegionID.GetID()),
|
||||||
zap.String("at", kv.StrKey(batch.Keys[i])),
|
zap.String("at", kv.StrKey(batch.Keys[i])),
|
||||||
|
zap.Stringer("new region left", logutil.Hex(r)),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
if batchResp.Error == nil {
|
||||||
|
batchResp.Error = err
|
||||||
|
}
|
||||||
if _, ok := err.(*tikverr.ErrPDServerTimeout); ok {
|
if _, ok := err.(*tikverr.ErrPDServerTimeout); ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return resp
|
return batchResp
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -199,8 +227,11 @@ func (s *KVStore) SplitRegions(ctx context.Context, splitKeys [][]byte, scatter
|
||||||
bo := retry.NewBackofferWithVars(ctx, int(math.Min(float64(len(splitKeys))*splitRegionBackoff, maxSplitRegionsBackoff)), nil)
|
bo := retry.NewBackofferWithVars(ctx, int(math.Min(float64(len(splitKeys))*splitRegionBackoff, maxSplitRegionsBackoff)), nil)
|
||||||
resp, err := s.splitBatchRegionsReq(bo, splitKeys, scatter, tableID)
|
resp, err := s.splitBatchRegionsReq(bo, splitKeys, scatter, tableID)
|
||||||
regionIDs = make([]uint64, 0, len(splitKeys))
|
regionIDs = make([]uint64, 0, len(splitKeys))
|
||||||
if resp != nil {
|
if resp != nil && resp.Resp != nil {
|
||||||
regionIDs = append(regionIDs, resp.GetRegionsId()...)
|
spResp := resp.Resp.(*kvrpcpb.SplitRegionResponse)
|
||||||
|
for _, r := range spResp.Regions {
|
||||||
|
regionIDs = append(regionIDs, r.Id)
|
||||||
|
}
|
||||||
logutil.BgLogger().Info("split regions complete", zap.Int("region count", len(regionIDs)), zap.Uint64s("region IDs", regionIDs))
|
logutil.BgLogger().Info("split regions complete", zap.Int("region count", len(regionIDs)), zap.Uint64s("region IDs", regionIDs))
|
||||||
}
|
}
|
||||||
return regionIDs, err
|
return regionIDs, err
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue