mirror of https://github.com/tikv/client-go.git
txn: introduce pipelined memdb, Flush and BufferBatchGet for pipelined DML (#1114)
* Support pipelined memdb. Signed-off-by: you06 <you1474600@gmail.com> * fix mutex for pipelined memdb Signed-off-by: you06 <you1474600@gmail.com> --------- Signed-off-by: you06 <you1474600@gmail.com>
This commit is contained in:
parent
8c13f6b5a6
commit
8d28d3cd3a
|
|
@ -20,7 +20,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
|
|
|
|||
5
go.mod
5
go.mod
|
|
@ -5,6 +5,7 @@ go 1.21
|
|||
require (
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/google/btree v1.1.2
|
||||
|
|
@ -14,7 +15,7 @@ require (
|
|||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c
|
||||
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
|
|
@ -30,7 +31,6 @@ require (
|
|||
go.uber.org/zap v1.26.0
|
||||
golang.org/x/sync v0.6.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
google.golang.org/protobuf v1.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -56,6 +56,7 @@ require (
|
|||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
6
go.sum
6
go.sum
|
|
@ -22,6 +22,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
|
|
@ -74,8 +76,8 @@ github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgW
|
|||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew=
|
||||
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E=
|
||||
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf h1:n3FMveYjc2VuETjo6YhmsgkDx0P/yLJTvk96BJdCq6Y=
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8=
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 h1:pdA3DvkChrIp91JQO89ICT1x/SemOAm7vC848acr5Ik=
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8=
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw=
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
|
|||
|
|
@ -2466,10 +2466,10 @@ func (s *testCommitterSuite) TestFlagsInMemBufferMutations() {
|
|||
})
|
||||
|
||||
// Create memBufferMutations object and add keys with flags to it.
|
||||
mutations := transaction.NewMemBufferMutationsProbe(db.Len(), db)
|
||||
mutations := transaction.NewMemBufferMutationsProbe(db.Len(), db.GetMemDB())
|
||||
|
||||
forEachCase(func(op kvrpcpb.Op, key []byte, value []byte, i int, isPessimisticLock, assertExist, assertNotExist bool) {
|
||||
handle := db.IterWithFlags(key, nil).Handle()
|
||||
handle := db.GetMemDB().IterWithFlags(key, nil).Handle()
|
||||
mutations.Push(op, isPessimisticLock, assertExist, assertNotExist, false, handle)
|
||||
})
|
||||
|
||||
|
|
@ -2499,8 +2499,8 @@ func (s *testCommitterSuite) TestExtractKeyExistsErr() {
|
|||
txn.GetMemBuffer().UpdateFlags([]byte("de"), kv.DelPresumeKeyNotExists)
|
||||
err = committer.PrewriteAllMutations(context.Background())
|
||||
s.ErrorContains(err, "existErr")
|
||||
s.True(txn.GetMemBuffer().TryLock())
|
||||
txn.GetMemBuffer().Unlock()
|
||||
s.True(txn.GetMemBuffer().GetMemDB().TryLock())
|
||||
txn.GetMemBuffer().GetMemDB().Unlock()
|
||||
}
|
||||
|
||||
func (s *testCommitterSuite) TestKillSignal() {
|
||||
|
|
|
|||
|
|
@ -6,12 +6,12 @@ require (
|
|||
github.com/ninedraft/israce v0.0.3
|
||||
github.com/pingcap/errors v0.11.5-0.20231212100244-799fae176cfb
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf
|
||||
github.com/pingcap/tidb v1.1.0-beta.0.20240131080924-732fa8c98695
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24
|
||||
github.com/pingcap/tidb v1.1.0-beta.0.20240126041650-de177d85b19e
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tidwall/gjson v1.14.1
|
||||
github.com/tikv/client-go/v2 v2.0.8-0.20240125030910-e6f5a45b002e
|
||||
github.com/tikv/client-go/v2 v2.0.8-0.20240205071126-11cb7985f0ec
|
||||
github.com/tikv/pd/client v0.0.0-20240126020320-567c7d43a008
|
||||
go.uber.org/goleak v1.3.0
|
||||
)
|
||||
|
|
@ -51,7 +51,7 @@ require (
|
|||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
|
||||
github.com/influxdata/tdigest v0.0.1 // indirect
|
||||
github.com/jellydator/ttlcache/v3 v3.0.1 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
|
|
@ -92,14 +92,14 @@ require (
|
|||
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.12 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.12 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e // indirect
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
|
|
@ -118,5 +118,7 @@ require (
|
|||
|
||||
replace (
|
||||
github.com/go-ldap/ldap/v3 => github.com/YangKeao/ldap/v3 v3.4.5-0.20230421065457-369a3bab1117
|
||||
github.com/pingcap/tidb => github.com/you06/tidb v1.1.0-beta.0.20240220121437-9d6b908d9a92
|
||||
github.com/pingcap/tidb/pkg/parser => github.com/you06/tidb/pkg/parser v0.0.0-20240220121437-9d6b908d9a92
|
||||
github.com/tikv/client-go/v2 => ../
|
||||
)
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@ github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3Uu
|
|||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 h1:Q/yk4z/cHUVZfgTqtD09qeYBxHwshQAjVRX73qs8UH0=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU=
|
||||
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 h1:Jz3KVLYY5+JO7rDiX0sAuRGtuv2vG01r17Y9nLMWNUw=
|
||||
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY=
|
||||
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
|
|
@ -129,8 +129,9 @@ github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ
|
|||
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
||||
github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw=
|
||||
github.com/dolthub/swiss v0.2.1/go.mod h1:8AhKZZ1HK7g18j7v7k6c5cYIGEZJcPn0ARsai8cUrh0=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
|
|
@ -140,8 +141,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
|||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
|
|
@ -158,8 +159,8 @@ github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkPro
|
|||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
|
|
@ -188,8 +189,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
|
@ -248,16 +249,16 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
|
|||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
|
|
@ -286,8 +287,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
|||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df h1:Zrb0IbuLOGHL7nrO2WrcuNWgDTlzFv3zY69QMx4ggQE=
|
||||
github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
|
|
@ -313,7 +314,6 @@ github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo
|
|||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
|
@ -349,10 +349,10 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||
|
|
@ -415,18 +415,14 @@ github.com/pingcap/fn v1.0.0/go.mod h1:u9WZ1ZiOD1RpNhcI42RucFh/lBuzTu6rw88a+oF2Z
|
|||
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E=
|
||||
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
|
||||
github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf h1:n3FMveYjc2VuETjo6YhmsgkDx0P/yLJTvk96BJdCq6Y=
|
||||
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8=
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24 h1:pdA3DvkChrIp91JQO89ICT1x/SemOAm7vC848acr5Ik=
|
||||
github.com/pingcap/kvproto v0.0.0-20240206021635-05a3758a1d24/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8=
|
||||
github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM=
|
||||
github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
|
||||
github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 h1:2SOzvGvE8beiC1Y4g9Onkvu6UmuBBOeWRGQEjJaT/JY=
|
||||
github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
|
||||
github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I=
|
||||
github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM=
|
||||
github.com/pingcap/tidb v1.1.0-beta.0.20240131080924-732fa8c98695 h1:7ELVL+rVzEZrDv4dEC995qHbv9K0dSKm7+wuwotiB8U=
|
||||
github.com/pingcap/tidb v1.1.0-beta.0.20240131080924-732fa8c98695/go.mod h1:rfdtaBHvL6w0uFmxz0BJIBHHUpR/satNS695+def0y8=
|
||||
github.com/pingcap/tidb/pkg/parser v0.0.0-20240111112854-1ad36eb0ef29 h1:OPF0SMFk0O298dzHisYnhotbTcDQC2l+h0Xs7QxUF88=
|
||||
github.com/pingcap/tidb/pkg/parser v0.0.0-20240111112854-1ad36eb0ef29/go.mod h1:yRkiqLFwIqibYg2P7h4bclHjHcJiIFRLKhGRyBcKYus=
|
||||
github.com/pingcap/tipb v0.0.0-20240116032918-9bb28c43bbfc h1:sEp4lbExDfnMX8HXQyhZrhqo2/SgeFY5KOdo5akc8FM=
|
||||
github.com/pingcap/tipb v0.0.0-20240116032918-9bb28c43bbfc/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
|
|
@ -456,8 +452,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
|||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rivo/uniseg v0.4.6 h1:Sovz9sDSwbOz9tgUy8JpT+KgCkPYJEN/oYzlJiYTNLg=
|
||||
github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
|
@ -494,8 +490,8 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
|
|||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
|
|
@ -535,8 +531,8 @@ github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0h
|
|||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
|
||||
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
||||
|
|
@ -557,12 +553,16 @@ github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f/go.mod h1:8sd
|
|||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 h1:tBbuFCtyJNKT+BFAv6qjvTFpVdy97IYNaBwGUXifIUs=
|
||||
github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/you06/tidb v1.1.0-beta.0.20240220121437-9d6b908d9a92 h1:2n6DSHF79pe9BGKMrZ6bUFdV1AL1BJaC3wbkS3TiL2E=
|
||||
github.com/you06/tidb v1.1.0-beta.0.20240220121437-9d6b908d9a92/go.mod h1:WNewIzAOHkWHcmkh5Yw5HOU/fl9EhX4mBpNVmW6Eydk=
|
||||
github.com/you06/tidb/pkg/parser v0.0.0-20240220121437-9d6b908d9a92 h1:W7CHjUFBHqpCitLUaK1X2gN+AKsX6kgmKZ2D+YGuoHw=
|
||||
github.com/you06/tidb/pkg/parser v0.0.0-20240220121437-9d6b908d9a92/go.mod h1:MWQK6otJgZRI6zcCVPV22U4qE26qOGJnN4fq8XawgBs=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
|
|
@ -575,44 +575,43 @@ github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFi
|
|||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||
go.etcd.io/etcd/tests/v3 v3.5.10 h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s=
|
||||
go.etcd.io/etcd/tests/v3 v3.5.10/go.mod h1:vVMWDv9OhopxfJCd+CMI4pih0zUDqlkJj6JcBNlUVXI=
|
||||
go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=
|
||||
go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E=
|
||||
go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.12/go.mod h1:ERQuZVe79PI6vcC3DlKBukDCLja/L7YMu29B74Iwj4U=
|
||||
go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8=
|
||||
go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10=
|
||||
go.etcd.io/etcd/tests/v3 v3.5.12 h1:k1fG7+F87Z7zKp57EcjXu9XgOsW0sfp5USqfzmMTIwM=
|
||||
go.etcd.io/etcd/tests/v3 v3.5.12/go.mod h1:CLWdnlr8bWNa8tjkmKFybPz5Ldjh9GuHbYhq1g9vpIo=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
|
||||
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
|
||||
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
|
||||
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
|
||||
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
|
||||
go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
|
||||
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
|
||||
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
|
||||
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
|
|
@ -635,8 +634,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
|||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
|
|
@ -653,8 +652,8 @@ golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
|||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e h1:723BNChdd0c2Wk6WOE320qGBiPtYx0F0Bbm1kriShfE=
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo=
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
|
@ -729,6 +728,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -786,12 +786,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ=
|
||||
google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY=
|
||||
google.golang.org/api v0.160.0 h1:SEspjXHVqE1m5a1fRy8JFB+5jSu+V0GEDKDghF3ttO4=
|
||||
google.golang.org/api v0.160.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
|
|
@ -879,8 +878,8 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm
|
|||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2024 TiKV Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tikv_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pingcap/failpoint"
|
||||
"github.com/pingcap/tidb/pkg/store/mockstore/unistore"
|
||||
"github.com/stretchr/testify/suite"
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
"github.com/tikv/client-go/v2/testutils"
|
||||
"github.com/tikv/client-go/v2/tikv"
|
||||
)
|
||||
|
||||
const (
|
||||
MinFlushKeys = 10000
|
||||
MinFlushSize = 16 * 1024 * 1024 // 16MB
|
||||
)
|
||||
|
||||
func TestPipelinedMemDB(t *testing.T) {
|
||||
suite.Run(t, new(testPipelinedMemDBSuite))
|
||||
}
|
||||
|
||||
type testPipelinedMemDBSuite struct {
|
||||
suite.Suite
|
||||
cluster testutils.Cluster
|
||||
store *tikv.KVStore
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) SetupTest() {
|
||||
if *withTiKV {
|
||||
s.store = NewTestStore(s.T())
|
||||
return
|
||||
}
|
||||
|
||||
client, pdClient, cluster, err := unistore.New("", nil)
|
||||
s.Require().Nil(err)
|
||||
|
||||
unistore.BootstrapWithSingleStore(cluster)
|
||||
s.cluster = cluster
|
||||
store, err := tikv.NewTestTiKVStore(fpClient{Client: &unistoreClientWrapper{client}}, pdClient, nil, nil, 0)
|
||||
s.Require().Nil(err)
|
||||
|
||||
s.store = store
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) TearDownTest() {
|
||||
s.store.Close()
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) TestPipelinedAndFlush() {
|
||||
ctx := context.Background()
|
||||
txn, err := s.store.Begin(tikv.WithPipelinedMemDB())
|
||||
s.Nil(err)
|
||||
txn1, err := s.store.Begin()
|
||||
s.Nil(err)
|
||||
s.True(txn.IsPipelined())
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
txn.Set(key, value)
|
||||
flushed, err := txn.GetMemBuffer().Flush(false)
|
||||
s.Nil(err)
|
||||
if i < MinFlushKeys-1 {
|
||||
s.False(flushed)
|
||||
} else {
|
||||
s.True(flushed)
|
||||
}
|
||||
// txn can always read its own writes
|
||||
val, err := txn.Get(ctx, key)
|
||||
s.Nil(err)
|
||||
s.True(bytes.Equal(value, val))
|
||||
// txn1 cannot see it
|
||||
_, err = txn1.Get(ctx, key)
|
||||
s.True(tikverr.IsErrNotFound(err))
|
||||
}
|
||||
|
||||
// commit the txn, then it's visible to other txns.
|
||||
txn.Commit(ctx)
|
||||
|
||||
txn1, err = s.store.Begin()
|
||||
s.Nil(err)
|
||||
defer txn1.Rollback()
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
expect := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
val, err := txn1.Get(ctx, key)
|
||||
s.Nil(err)
|
||||
s.True(bytes.Equal(expect, val))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) TestPipelinedMemDBBufferGet() {
|
||||
ctx := context.Background()
|
||||
txn, err := s.store.Begin(tikv.WithPipelinedMemDB())
|
||||
s.Nil(err)
|
||||
for i := 0; i < 100; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := key
|
||||
txn.Set(key, value)
|
||||
flushed, err := txn.GetMemBuffer().Flush(true)
|
||||
s.Nil(err)
|
||||
s.True(flushed)
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
expect := key
|
||||
val, err := txn.GetMemBuffer().Get(ctx, key)
|
||||
s.Nil(err)
|
||||
s.True(bytes.Equal(val, expect))
|
||||
}
|
||||
s.Nil(txn.GetMemBuffer().FlushWait())
|
||||
s.Nil(txn.Rollback())
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) TestPipelinedFlushBlock() {
|
||||
txn, err := s.store.Begin(tikv.WithPipelinedMemDB())
|
||||
s.Nil(err)
|
||||
txn.Set([]byte("key1"), []byte("value1"))
|
||||
|
||||
s.Nil(failpoint.Enable("tikvclient/beforePipelinedFlush", `pause`))
|
||||
flushed, err := txn.GetMemBuffer().Flush(true)
|
||||
s.Nil(err)
|
||||
s.True(flushed)
|
||||
|
||||
txn.Set([]byte("key2"), []byte("value2"))
|
||||
flushReturned := make(chan struct{})
|
||||
go func() {
|
||||
flushed, err := txn.GetMemBuffer().Flush(true)
|
||||
s.Nil(err)
|
||||
s.True(flushed)
|
||||
close(flushReturned)
|
||||
}()
|
||||
|
||||
oneSec := time.After(time.Second)
|
||||
select {
|
||||
case <-flushReturned:
|
||||
s.Fail("Flush should be blocked")
|
||||
case <-oneSec:
|
||||
}
|
||||
s.Nil(failpoint.Disable("tikvclient/beforePipelinedFlush"))
|
||||
<-flushReturned
|
||||
s.Nil(txn.GetMemBuffer().FlushWait())
|
||||
}
|
||||
|
||||
func (s *testPipelinedMemDBSuite) TestPipelinedSkipFlushedLock() {
|
||||
txn, err := s.store.Begin(tikv.WithPipelinedMemDB())
|
||||
s.Nil(err)
|
||||
txn.Set([]byte("key1"), []byte("value1"))
|
||||
flushed, err := txn.GetMemBuffer().Flush(true)
|
||||
s.Nil(err)
|
||||
s.True(flushed)
|
||||
s.Nil(txn.GetMemBuffer().FlushWait())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
_, err = txn.GetSnapshot().Get(ctx, []byte("key1"))
|
||||
s.True(tikverr.IsErrNotFound(err))
|
||||
s.Nil(txn.Commit(context.Background()))
|
||||
|
||||
// can see it after commit
|
||||
txn, err = s.store.Begin(tikv.WithPipelinedMemDB())
|
||||
s.Nil(err)
|
||||
defer txn.Rollback()
|
||||
val, err := txn.Get(context.Background(), []byte("key1"))
|
||||
s.Nil(err)
|
||||
s.Equal([]byte("value1"), val)
|
||||
}
|
||||
|
|
@ -522,6 +522,14 @@ func (s *mockTikvGrpcServer) CancelDisaggTask(context.Context, *disaggregated.Ca
|
|||
return nil, errors.New("unreachable")
|
||||
}
|
||||
|
||||
func (s *mockTikvGrpcServer) KvFlush(context.Context, *kvrpcpb.FlushRequest) (*kvrpcpb.FlushResponse, error) {
|
||||
return nil, errors.New("unreachable")
|
||||
}
|
||||
|
||||
func (s *mockTikvGrpcServer) KvBufferBatchGet(context.Context, *kvrpcpb.BufferBatchGetRequest) (*kvrpcpb.BufferBatchGetResponse, error) {
|
||||
return nil, errors.New("unreachable")
|
||||
}
|
||||
|
||||
func (s *testRegionRequestToSingleStoreSuite) TestNoReloadRegionForGrpcWhenCtxCanceled() {
|
||||
// prepare a mock tikv grpc server
|
||||
addr := "localhost:56341"
|
||||
|
|
|
|||
|
|
@ -86,6 +86,8 @@ type MemDB struct {
|
|||
vlogInvalid bool
|
||||
dirty bool
|
||||
stages []MemDBCheckpoint
|
||||
// when the MemDB is wrapper by upper RWMutex, we can skip the internal mutex.
|
||||
skipMutex bool
|
||||
}
|
||||
|
||||
func newMemDB() *MemDB {
|
||||
|
|
@ -96,6 +98,7 @@ func newMemDB() *MemDB {
|
|||
db.entrySizeLimit = math.MaxUint64
|
||||
db.bufferSizeLimit = math.MaxUint64
|
||||
db.vlog.memdb = db
|
||||
db.skipMutex = false
|
||||
return db
|
||||
}
|
||||
|
||||
|
|
@ -103,8 +106,10 @@ func newMemDB() *MemDB {
|
|||
// Subsequent writes will be temporarily stored in this new staging buffer.
|
||||
// When you think all modifications looks good, you can call `Release` to public all of them to the upper level buffer.
|
||||
func (db *MemDB) Staging() int {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
if !db.skipMutex {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
}
|
||||
|
||||
db.stages = append(db.stages, db.vlog.checkpoint())
|
||||
return len(db.stages)
|
||||
|
|
@ -112,8 +117,10 @@ func (db *MemDB) Staging() int {
|
|||
|
||||
// Release publish all modifications in the latest staging buffer to upper level.
|
||||
func (db *MemDB) Release(h int) {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
if !db.skipMutex {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
}
|
||||
|
||||
if h != len(db.stages) {
|
||||
// This should never happens in production environment.
|
||||
|
|
@ -133,8 +140,10 @@ func (db *MemDB) Release(h int) {
|
|||
// Cleanup cleanup the resources referenced by the StagingHandle.
|
||||
// If the changes are not published by `Release`, they will be discarded.
|
||||
func (db *MemDB) Cleanup(h int) {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
if !db.skipMutex {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
}
|
||||
|
||||
if h > len(db.stages) {
|
||||
return
|
||||
|
|
@ -311,8 +320,10 @@ func (db *MemDB) Dirty() bool {
|
|||
}
|
||||
|
||||
func (db *MemDB) set(key []byte, value []byte, ops ...kv.FlagsOp) error {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
if !db.skipMutex {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
}
|
||||
|
||||
if db.vlogInvalid {
|
||||
// panic for easier debugging.
|
||||
|
|
@ -877,3 +888,13 @@ func (db *MemDB) SetMemoryFootprintChangeHook(hook func(uint64)) {
|
|||
func (db *MemDB) Mem() uint64 {
|
||||
return db.allocator.capacity + db.vlog.capacity
|
||||
}
|
||||
|
||||
// SetEntrySizeLimit sets the size limit for each entry and total buffer.
|
||||
func (db *MemDB) SetEntrySizeLimit(entryLimit, bufferLimit uint64) {
|
||||
db.entrySizeLimit = entryLimit
|
||||
db.bufferSizeLimit = bufferLimit
|
||||
}
|
||||
|
||||
func (db *MemDB) setSkipMutex(skip bool) {
|
||||
db.skipMutex = skip
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@
|
|||
package unionstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
)
|
||||
|
||||
|
|
@ -87,7 +89,7 @@ type memdbSnapGetter struct {
|
|||
cp MemDBCheckpoint
|
||||
}
|
||||
|
||||
func (snap *memdbSnapGetter) Get(key []byte) ([]byte, error) {
|
||||
func (snap *memdbSnapGetter) Get(ctx context.Context, key []byte) ([]byte, error) {
|
||||
x := snap.db.traverse(key, false)
|
||||
if x.isNull() {
|
||||
return nil, tikverr.ErrNotExist
|
||||
|
|
|
|||
|
|
@ -0,0 +1,343 @@
|
|||
// Copyright 2024 TiKV Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unionstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pingcap/errors"
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
"github.com/tikv/client-go/v2/kv"
|
||||
"github.com/tikv/client-go/v2/metrics"
|
||||
"github.com/tikv/client-go/v2/util"
|
||||
)
|
||||
|
||||
// PipelinedMemDB is a Store which contains
|
||||
// - a mutable buffer for read and write
|
||||
// - an immutable onflushing buffer for read
|
||||
// - like MemDB, PipelinedMemDB also CANNOT be used concurrently
|
||||
type PipelinedMemDB struct {
|
||||
// Like MemDB, this RWMutex only used to ensure memdbSnapGetter.Get will not race with
|
||||
// concurrent memdb.Set, memdb.SetWithFlags, memdb.Delete and memdb.UpdateFlags.
|
||||
sync.RWMutex
|
||||
onFlushing atomic.Bool
|
||||
errCh chan error
|
||||
flushFunc FlushFunc
|
||||
bufferBatchGetter BufferBatchGetter
|
||||
memDB *MemDB
|
||||
flushingMemDB *MemDB // the flushingMemDB is not wrapped by a mutex, because there is no data race in it.
|
||||
len, size int // len and size records the total flushed and onflushing memdb.
|
||||
generation uint64
|
||||
entryLimit, bufferLimit uint64
|
||||
}
|
||||
|
||||
const (
|
||||
// MinFlushKeys is the minimum number of keys to trigger flush.
|
||||
// small batch can lead to poor performance and resource waste in random write workload.
|
||||
// 10K batch size is large enough to get good performance with random write workloads in tests.
|
||||
MinFlushKeys = 10000
|
||||
// MinFlushSize is the minimum size of MemDB to trigger flush.
|
||||
MinFlushSize = 16 * 1024 * 1024 // 16MB
|
||||
// ForceFlushSizeThreshold is the threshold to force flush MemDB, which controls the max memory consumption of PipelinedMemDB.
|
||||
ForceFlushSizeThreshold = 128 * 1024 * 1024 // 128MB
|
||||
)
|
||||
|
||||
type pipelinedMemDBSkipRemoteBuffer struct{}
|
||||
|
||||
// TODO: skip remote buffer by context is too obscure, add a new method to read local buffer.
|
||||
var pipelinedMemDBSkipRemoteBufferKey = pipelinedMemDBSkipRemoteBuffer{}
|
||||
|
||||
// WithPipelinedMemDBSkipRemoteBuffer is used to skip reading remote buffer for saving RPC.
|
||||
func WithPipelinedMemDBSkipRemoteBuffer(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, pipelinedMemDBSkipRemoteBufferKey, struct{}{})
|
||||
}
|
||||
|
||||
type FlushFunc func(uint64, *MemDB) error
|
||||
type BufferBatchGetter func(ctx context.Context, keys [][]byte) (map[string][]byte, error)
|
||||
|
||||
func NewPipelinedMemDB(bufferBatchGetter BufferBatchGetter, flushFunc FlushFunc) *PipelinedMemDB {
|
||||
memdb := newMemDB()
|
||||
memdb.setSkipMutex(true)
|
||||
return &PipelinedMemDB{
|
||||
memDB: memdb,
|
||||
errCh: make(chan error, 1),
|
||||
flushFunc: flushFunc,
|
||||
bufferBatchGetter: bufferBatchGetter,
|
||||
generation: 0,
|
||||
// keep entryLimit and bufferLimit same with the memdb's default values.
|
||||
entryLimit: memdb.entrySizeLimit,
|
||||
bufferLimit: memdb.bufferSizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
// Dirty returns whether the pipelined buffer is mutated.
|
||||
func (p *PipelinedMemDB) Dirty() bool {
|
||||
return p.memDB.Dirty() || p.len > 0
|
||||
}
|
||||
|
||||
// GetMemDB implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) GetMemDB() *MemDB {
|
||||
panic("GetMemDB should not be invoked for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// Get the value by given key, it returns tikverr.ErrNotExist if not exist.
|
||||
// The priority of the value is MemBuffer > flushingMemDB > flushed memdbs.
|
||||
func (p *PipelinedMemDB) Get(ctx context.Context, k []byte) ([]byte, error) {
|
||||
v, err := p.memDB.Get(k)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
if !tikverr.IsErrNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
if p.flushingMemDB != nil {
|
||||
v, err = p.flushingMemDB.Get(k)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
if !tikverr.IsErrNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ctx.Value(pipelinedMemDBSkipRemoteBufferKey) != nil {
|
||||
return nil, tikverr.ErrNotExist
|
||||
}
|
||||
// read remote buffer
|
||||
var (
|
||||
dataMap map[string][]byte
|
||||
ok bool
|
||||
)
|
||||
dataMap, err = p.bufferBatchGetter(ctx, [][]byte{k})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, ok = dataMap[string(k)]
|
||||
if !ok {
|
||||
return nil, tikverr.ErrNotExist
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) GetFlags(k []byte) (kv.KeyFlags, error) {
|
||||
f, err := p.memDB.GetFlags(k)
|
||||
if p.flushingMemDB != nil && tikverr.IsErrNotFound(err) {
|
||||
f, err = p.flushingMemDB.GetFlags(k)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) UpdateFlags(k []byte, ops ...kv.FlagsOp) {
|
||||
p.memDB.UpdateFlags(k, ops...)
|
||||
}
|
||||
|
||||
// Set sets the value for key k in the MemBuffer.
|
||||
func (p *PipelinedMemDB) Set(key, value []byte) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.memDB.Set(key, value)
|
||||
}
|
||||
|
||||
// SetWithFlags sets the value for key k in the MemBuffer with flags.
|
||||
func (p *PipelinedMemDB) SetWithFlags(key, value []byte, ops ...kv.FlagsOp) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.memDB.SetWithFlags(key, value, ops...)
|
||||
}
|
||||
|
||||
// Delete deletes the key k in the MemBuffer.
|
||||
func (p *PipelinedMemDB) Delete(key []byte) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.memDB.Delete(key)
|
||||
}
|
||||
|
||||
// DeleteWithFlags deletes the key k in the MemBuffer with flags.
|
||||
func (p *PipelinedMemDB) DeleteWithFlags(key []byte, ops ...kv.FlagsOp) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.memDB.DeleteWithFlags(key, ops...)
|
||||
}
|
||||
|
||||
// Flush is called during execution of a transaction, it does flush when there are enough keys and the ongoing flushingMemDB is done.
|
||||
// The first returned value indicates whether the flush is triggered.
|
||||
// The second returned value is the error if there is a failure, txn should abort when there is an error.
|
||||
// When the mutable memdb is too large, it blocks until the ongoing flush is done.
|
||||
func (p *PipelinedMemDB) Flush(force bool) (bool, error) {
|
||||
if p.flushFunc == nil {
|
||||
return false, errors.New("flushFunc is not provided")
|
||||
}
|
||||
if !force && !p.needFlush() {
|
||||
return false, nil
|
||||
}
|
||||
if p.flushingMemDB != nil {
|
||||
if err := <-p.errCh; err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
p.onFlushing.Store(true)
|
||||
p.flushingMemDB = p.memDB
|
||||
p.len += p.flushingMemDB.Len()
|
||||
p.size += p.flushingMemDB.Size()
|
||||
p.memDB = newMemDB()
|
||||
p.memDB.SetEntrySizeLimit(p.entryLimit, p.bufferLimit)
|
||||
p.memDB.setSkipMutex(true)
|
||||
p.generation++
|
||||
go func(generation uint64) {
|
||||
util.EvalFailpoint("beforePipelinedFlush")
|
||||
metrics.TiKVPipelinedFlushLenHistogram.Observe(float64(p.flushingMemDB.Len()))
|
||||
metrics.TiKVPipelinedFlushSizeHistogram.Observe(float64(p.flushingMemDB.Size()))
|
||||
flushStart := time.Now()
|
||||
err := p.flushFunc(generation, p.flushingMemDB)
|
||||
metrics.TiKVPipelinedFlushDuration.Observe(time.Since(flushStart).Seconds())
|
||||
p.onFlushing.Store(false)
|
||||
// Send the error to errCh after onFlushing status is set to false.
|
||||
// this guarantees the onFlushing.Store(true) in another goroutine's Flush happens after onFlushing.Store(false) in this function.
|
||||
p.errCh <- err
|
||||
}(p.generation)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) needFlush() bool {
|
||||
size := p.memDB.Size()
|
||||
// size < MinFlushSize, do not flush.
|
||||
// MinFlushSize <= size < ForceFlushSizeThreshold && keys < MinFlushKeys, do not flush.
|
||||
// MinFlushSize <= size < ForceFlushSizeThreshold && keys >= MinFlushKeys, flush.
|
||||
// size >= ForceFlushSizeThreshold, flush.
|
||||
if size < MinFlushSize || (p.memDB.Len() < MinFlushKeys && size < ForceFlushSizeThreshold) {
|
||||
return false
|
||||
}
|
||||
if p.onFlushing.Load() && size < ForceFlushSizeThreshold {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FlushWait will wait for all flushing tasks are done and return the error if there is a failure.
|
||||
func (p *PipelinedMemDB) FlushWait() error {
|
||||
if p.flushingMemDB != nil {
|
||||
err := <-p.errCh
|
||||
// cleanup the flushingMemDB so the next call of FlushWait will not wait for the error channel.
|
||||
p.flushingMemDB = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iter implements the Retriever interface.
|
||||
func (p *PipelinedMemDB) Iter([]byte, []byte) (Iterator, error) {
|
||||
return nil, errors.New("pipelined memdb does not support Iter")
|
||||
}
|
||||
|
||||
// IterReverse implements the Retriever interface.
|
||||
func (p *PipelinedMemDB) IterReverse([]byte, []byte) (Iterator, error) {
|
||||
return nil, errors.New("pipelined memdb does not support IterReverse")
|
||||
}
|
||||
|
||||
// SetEntrySizeLimit sets the size limit for each entry and total buffer.
|
||||
func (p *PipelinedMemDB) SetEntrySizeLimit(entryLimit, bufferLimit uint64) {
|
||||
p.entryLimit, p.bufferLimit = entryLimit, bufferLimit
|
||||
p.memDB.SetEntrySizeLimit(entryLimit, bufferLimit)
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) Len() int {
|
||||
return p.memDB.Len() + p.len
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) Size() int {
|
||||
return p.memDB.Size() + p.size
|
||||
}
|
||||
|
||||
func (p *PipelinedMemDB) OnFlushing() bool {
|
||||
return p.onFlushing.Load()
|
||||
}
|
||||
|
||||
// SetMemoryFootprintChangeHook sets the hook for memory footprint change.
|
||||
// TODO: implement this.
|
||||
func (p *PipelinedMemDB) SetMemoryFootprintChangeHook(hook func(uint64)) {}
|
||||
|
||||
// Mem returns the memory usage of MemBuffer.
|
||||
// TODO: implement this.
|
||||
func (p *PipelinedMemDB) Mem() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type errIterator struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *errIterator) Valid() bool { return true }
|
||||
func (e *errIterator) Next() error { return e.err }
|
||||
func (e *errIterator) Key() []byte { return nil }
|
||||
func (e *errIterator) Value() []byte { return nil }
|
||||
func (e *errIterator) Close() {}
|
||||
|
||||
// SnapshotIter implements MemBuffer interface, returns an iterator which outputs error.
|
||||
func (p *PipelinedMemDB) SnapshotIter(k, upperBound []byte) Iterator {
|
||||
return &errIterator{err: errors.New("SnapshotIter is not supported for PipelinedMemDB")}
|
||||
}
|
||||
|
||||
// SnapshotIterReverse implements MemBuffer interface, returns an iterator which outputs error.
|
||||
func (p *PipelinedMemDB) SnapshotIterReverse(k, lowerBound []byte) Iterator {
|
||||
return &errIterator{err: errors.New("SnapshotIter is not supported for PipelinedMemDB")}
|
||||
}
|
||||
|
||||
// The following methods are not implemented for PipelinedMemDB and DOES NOT return error because of the interface limitation.
|
||||
// It panics when the following methods are called, the application should not use those methods when PipelinedMemDB is enabled.
|
||||
|
||||
// RemoveFromBuffer implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) RemoveFromBuffer(key []byte) {
|
||||
panic("RemoveFromBuffer is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// InspectStage implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) InspectStage(int, func([]byte, kv.KeyFlags, []byte)) {
|
||||
panic("InspectStage is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// SnapshotGetter implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) SnapshotGetter() Getter {
|
||||
panic("SnapshotGetter is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// Staging is not supported for PipelinedMemDB, it returns 0 handle.
|
||||
func (p *PipelinedMemDB) Staging() int {
|
||||
panic("Staging is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// Cleanup implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) Cleanup(int) {
|
||||
panic("Cleanup is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// Release implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) Release(int) {
|
||||
panic("Release is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// Checkpoint implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) Checkpoint() *MemDBCheckpoint {
|
||||
panic("Checkpoint is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
||||
// RevertToCheckpoint implements MemBuffer interface.
|
||||
func (p *PipelinedMemDB) RevertToCheckpoint(*MemDBCheckpoint) {
|
||||
panic("RevertToCheckpoint is not supported for PipelinedMemDB")
|
||||
}
|
||||
|
|
@ -0,0 +1,299 @@
|
|||
// Copyright 2024 TiKV Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unionstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
)
|
||||
|
||||
func emptyBufferBatchGetter(ctx context.Context, keys [][]byte) (map[string][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestPipelinedFlushTrigger(t *testing.T) {
|
||||
avgKeySize := MinFlushSize / MinFlushKeys
|
||||
|
||||
// block the flush goroutine for checking the flushingMemDB status.
|
||||
blockCh := make(chan struct{})
|
||||
defer close(blockCh)
|
||||
// Will not flush when keys number >= MinFlushKeys and size < MinFlushSize
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, avgKeySize-len(key)-1) // (key + value) * MinFLushKeys < MinFlushKeys
|
||||
memdb.Set(key, value)
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.False(t, flushed)
|
||||
require.Nil(t, err)
|
||||
require.False(t, memdb.OnFlushing())
|
||||
}
|
||||
require.Equal(t, memdb.memDB.Len(), MinFlushKeys)
|
||||
require.Less(t, memdb.memDB.Size(), MinFlushSize)
|
||||
|
||||
// Will not flush when keys number < MinFlushKeys and size >= MinFlushSize
|
||||
memdb = NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < MinFlushKeys-1; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, avgKeySize-len(key)+1) // (key + value) * (MinFLushKeys - 1) > MinFlushKeys
|
||||
memdb.Set(key, value)
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.False(t, flushed)
|
||||
require.Nil(t, err)
|
||||
require.False(t, memdb.OnFlushing())
|
||||
}
|
||||
require.Less(t, memdb.memDB.Len(), MinFlushKeys)
|
||||
require.Greater(t, memdb.memDB.Size(), MinFlushSize)
|
||||
|
||||
// Flush when keys number >= MinFlushKeys and size >= MinFlushSize
|
||||
memdb = NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, avgKeySize-len(key)+1) // (key + value) * MinFLushKeys > MinFlushKeys
|
||||
memdb.Set(key, value)
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
if i == MinFlushKeys-1 {
|
||||
require.True(t, flushed)
|
||||
require.True(t, memdb.OnFlushing())
|
||||
} else {
|
||||
require.False(t, flushed)
|
||||
require.False(t, memdb.OnFlushing())
|
||||
}
|
||||
}
|
||||
require.Equal(t, memdb.memDB.Len(), 0)
|
||||
require.Equal(t, memdb.memDB.Size(), 0)
|
||||
// the flushingMemDB length and size should be added to the total length and size.
|
||||
require.Equal(t, memdb.Len(), MinFlushKeys)
|
||||
require.Equal(t, memdb.Size(), memdb.flushingMemDB.Size())
|
||||
}
|
||||
|
||||
func TestPipelinedFlushSkip(t *testing.T) {
|
||||
blockCh := make(chan struct{})
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.True(t, flushed)
|
||||
require.Nil(t, err)
|
||||
require.True(t, memdb.OnFlushing())
|
||||
require.Equal(t, memdb.memDB.Len(), 0)
|
||||
require.Equal(t, memdb.memDB.Size(), 0)
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(MinFlushKeys + i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
flushed, err = memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
// flush is skipped because there is an ongoing flush.
|
||||
require.False(t, flushed)
|
||||
require.Equal(t, memdb.memDB.Len(), MinFlushKeys)
|
||||
close(blockCh)
|
||||
require.Nil(t, memdb.FlushWait())
|
||||
// can flush when the ongoing flush is done.
|
||||
flushed, err = memdb.Flush(false)
|
||||
require.True(t, flushed)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, memdb.memDB.Len(), 0)
|
||||
require.Equal(t, memdb.len, 2*MinFlushKeys)
|
||||
}
|
||||
|
||||
func TestPipelinedFlushBlock(t *testing.T) {
|
||||
blockCh := make(chan struct{})
|
||||
defer close(blockCh)
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
require.True(t, flushed)
|
||||
require.True(t, memdb.OnFlushing())
|
||||
require.Equal(t, memdb.memDB.Len(), 0)
|
||||
require.Equal(t, memdb.memDB.Size(), 0)
|
||||
|
||||
// When size of memdb is greater than ForceFlushSizeThreshold, Flush will be blocked.
|
||||
for i := 0; i < MinFlushKeys-1; i++ {
|
||||
key := []byte(strconv.Itoa(MinFlushKeys + i))
|
||||
value := make([]byte, ForceFlushSizeThreshold/(MinFlushKeys-1)-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
require.Greater(t, memdb.memDB.Size(), ForceFlushSizeThreshold)
|
||||
flushReturned := make(chan struct{})
|
||||
oneSec := time.After(time.Second)
|
||||
go func() {
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
require.True(t, flushed)
|
||||
close(flushReturned)
|
||||
}()
|
||||
select {
|
||||
case <-flushReturned:
|
||||
require.Fail(t, "Flush should be blocked")
|
||||
case <-oneSec:
|
||||
}
|
||||
require.True(t, memdb.OnFlushing())
|
||||
blockCh <- struct{}{} // first flush done
|
||||
<-flushReturned // second flush start
|
||||
require.True(t, memdb.OnFlushing())
|
||||
}
|
||||
|
||||
func TestPipelinedFlushGet(t *testing.T) {
|
||||
blockCh := make(chan struct{})
|
||||
defer close(blockCh)
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
<-blockCh
|
||||
return nil
|
||||
})
|
||||
memdb.Set([]byte("key"), []byte("value"))
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
value, err := memdb.Get(context.Background(), []byte("key"))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, value, []byte("value"))
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.True(t, flushed)
|
||||
require.Nil(t, err)
|
||||
require.True(t, memdb.OnFlushing())
|
||||
|
||||
// The key is in flushingMemDB memdb instead of current mutable memdb.
|
||||
_, err = memdb.memDB.Get([]byte("key"))
|
||||
require.True(t, tikverr.IsErrNotFound(err))
|
||||
// But we still can get the value by PipelinedMemDB.Get.
|
||||
value, err = memdb.Get(context.Background(), []byte("key"))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, value, []byte("value"))
|
||||
|
||||
// finish the first flush
|
||||
blockCh <- struct{}{}
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
memdb.Set(key, value)
|
||||
}
|
||||
flushed, err = memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
require.True(t, flushed)
|
||||
require.True(t, memdb.OnFlushing())
|
||||
|
||||
// now the key is guaranteed to be flushed into stores, though PipelinedMemDB.Get does not see it, snapshot get should get it.
|
||||
_, err = memdb.Get(context.Background(), []byte("key"))
|
||||
require.True(t, tikverr.IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestPipelinedFlushSize(t *testing.T) {
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
return nil
|
||||
})
|
||||
size := 0
|
||||
keys := 0
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
keys++
|
||||
size += len(key) + len(value)
|
||||
memdb.Set(key, value)
|
||||
require.Equal(t, memdb.Len(), keys)
|
||||
require.Equal(t, memdb.Size(), size)
|
||||
}
|
||||
// keys & size should be accumulated into PipelinedMemDB.
|
||||
flushed, err := memdb.Flush(false)
|
||||
require.Nil(t, err)
|
||||
require.True(t, flushed)
|
||||
require.Equal(t, memdb.memDB.Len(), 0)
|
||||
require.Equal(t, memdb.memDB.Size(), 0)
|
||||
require.Equal(t, memdb.Len(), keys)
|
||||
require.Equal(t, memdb.Size(), size)
|
||||
|
||||
for i := 0; i < MinFlushKeys; i++ {
|
||||
key := []byte(strconv.Itoa(MinFlushKeys + i))
|
||||
value := make([]byte, MinFlushSize/MinFlushKeys-len(key)+1)
|
||||
keys++
|
||||
size += len(key) + len(value)
|
||||
memdb.Set(key, value)
|
||||
require.Equal(t, memdb.Len(), keys)
|
||||
require.Equal(t, memdb.Size(), size)
|
||||
}
|
||||
require.Equal(t, memdb.Len(), keys)
|
||||
require.Equal(t, memdb.Size(), size)
|
||||
// with final flush, keys & size should not be changed.
|
||||
flushed, err = memdb.Flush(true)
|
||||
require.Nil(t, err)
|
||||
require.True(t, flushed)
|
||||
require.Equal(t, memdb.Len(), keys)
|
||||
require.Equal(t, memdb.Size(), size)
|
||||
}
|
||||
|
||||
func TestPipelinedFlushGeneration(t *testing.T) {
|
||||
generationCh := make(chan uint64)
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(generation uint64, db *MemDB) error {
|
||||
generationCh <- generation
|
||||
return nil
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
memdb.Set([]byte{uint8(i)}, []byte{uint8(i)})
|
||||
memdb.Flush(true)
|
||||
// generation start from 1
|
||||
require.Equal(t, <-generationCh, uint64(i+1))
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorIterator(t *testing.T) {
|
||||
iteratorToErr := func(iter Iterator) {
|
||||
for iter.Valid() {
|
||||
err := iter.Next()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Log("iterator does not return error")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
memdb := NewPipelinedMemDB(emptyBufferBatchGetter, func(_ uint64, db *MemDB) error {
|
||||
return nil
|
||||
})
|
||||
iteratorToErr(memdb.SnapshotIter(nil, nil))
|
||||
iteratorToErr(memdb.SnapshotIterReverse(nil, nil))
|
||||
}
|
||||
|
|
@ -36,6 +36,7 @@ package unionstore
|
|||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
"github.com/tikv/client-go/v2/kv"
|
||||
|
|
@ -54,7 +55,7 @@ type Iterator interface {
|
|||
type Getter interface {
|
||||
// Get gets the value for key k from kv store.
|
||||
// If corresponding kv pair does not exist, it returns nil and ErrNotExist.
|
||||
Get(k []byte) ([]byte, error)
|
||||
Get(ctx context.Context, k []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// uSnapshot defines the interface for the snapshot fetched from KV store.
|
||||
|
|
@ -78,26 +79,26 @@ type uSnapshot interface {
|
|||
// KVUnionStore is an in-memory Store which contains a buffer for write and a
|
||||
// snapshot for read.
|
||||
type KVUnionStore struct {
|
||||
memBuffer *MemDB
|
||||
memBuffer MemBuffer
|
||||
snapshot uSnapshot
|
||||
}
|
||||
|
||||
// NewUnionStore builds a new unionStore.
|
||||
func NewUnionStore(snapshot uSnapshot) *KVUnionStore {
|
||||
func NewUnionStore(memBuffer MemBuffer, snapshot uSnapshot) *KVUnionStore {
|
||||
return &KVUnionStore{
|
||||
snapshot: snapshot,
|
||||
memBuffer: newMemDB(),
|
||||
memBuffer: memBuffer,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMemBuffer return the MemBuffer binding to this unionStore.
|
||||
func (us *KVUnionStore) GetMemBuffer() *MemDB {
|
||||
func (us *KVUnionStore) GetMemBuffer() MemBuffer {
|
||||
return us.memBuffer
|
||||
}
|
||||
|
||||
// Get implements the Retriever interface.
|
||||
func (us *KVUnionStore) Get(ctx context.Context, k []byte) ([]byte, error) {
|
||||
v, err := us.memBuffer.Get(k)
|
||||
v, err := us.memBuffer.Get(ctx, k)
|
||||
if tikverr.IsErrNotFound(err) {
|
||||
v, err = us.snapshot.Get(ctx, k)
|
||||
}
|
||||
|
|
@ -152,6 +153,109 @@ func (us *KVUnionStore) UnmarkPresumeKeyNotExists(k []byte) {
|
|||
|
||||
// SetEntrySizeLimit sets the size limit for each entry and total buffer.
|
||||
func (us *KVUnionStore) SetEntrySizeLimit(entryLimit, bufferLimit uint64) {
|
||||
us.memBuffer.entrySizeLimit = entryLimit
|
||||
us.memBuffer.bufferSizeLimit = bufferLimit
|
||||
if entryLimit == 0 {
|
||||
entryLimit = math.MaxUint64
|
||||
}
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = math.MaxUint64
|
||||
}
|
||||
us.memBuffer.SetEntrySizeLimit(entryLimit, bufferLimit)
|
||||
}
|
||||
|
||||
// MemBuffer is an interface that stores mutations that written during transaction execution.
|
||||
// It now unifies MemDB and PipelinedMemDB.
|
||||
// The implementations should follow the transaction guarantees:
|
||||
// 1. The transaction should see its own writes.
|
||||
// 2. The latter writes overwrite the earlier writes.
|
||||
type MemBuffer interface {
|
||||
// RLock locks the MemBuffer for shared reading.
|
||||
RLock()
|
||||
// RUnlock unlocks the MemBuffer for shared reading.
|
||||
RUnlock()
|
||||
// Get gets the value for key k from the MemBuffer.
|
||||
Get(context.Context, []byte) ([]byte, error)
|
||||
// GetFlags gets the flags for key k from the MemBuffer.
|
||||
GetFlags([]byte) (kv.KeyFlags, error)
|
||||
// Set sets the value for key k in the MemBuffer.
|
||||
Set([]byte, []byte) error
|
||||
// SetWithFlags sets the value for key k in the MemBuffer with flags.
|
||||
SetWithFlags([]byte, []byte, ...kv.FlagsOp) error
|
||||
// UpdateFlags updates the flags for key k in the MemBuffer.
|
||||
UpdateFlags([]byte, ...kv.FlagsOp)
|
||||
// RemoveFromBuffer removes the key k from the MemBuffer, only used for test.
|
||||
RemoveFromBuffer(key []byte)
|
||||
// Delete deletes the key k in the MemBuffer.
|
||||
Delete([]byte) error
|
||||
// DeleteWithFlags deletes the key k in the MemBuffer with flags.
|
||||
DeleteWithFlags([]byte, ...kv.FlagsOp) error
|
||||
// Iter implements the Retriever interface.
|
||||
Iter([]byte, []byte) (Iterator, error)
|
||||
// IterReverse implements the Retriever interface.
|
||||
IterReverse([]byte, []byte) (Iterator, error)
|
||||
// SnapshotIter returns an Iterator for a snapshot of MemBuffer.
|
||||
SnapshotIter([]byte, []byte) Iterator
|
||||
// SnapshotIterReverse returns a reversed Iterator for a snapshot of MemBuffer.
|
||||
SnapshotIterReverse([]byte, []byte) Iterator
|
||||
// SnapshotGetter returns a Getter for a snapshot of MemBuffer.
|
||||
SnapshotGetter() Getter
|
||||
// InspectStage iterates all buffered keys and values in MemBuffer.
|
||||
InspectStage(handle int, f func([]byte, kv.KeyFlags, []byte))
|
||||
// SetEntrySizeLimit sets the size limit for each entry and total buffer.
|
||||
SetEntrySizeLimit(uint64, uint64)
|
||||
// Dirty returns true if the MemBuffer is NOT read only.
|
||||
Dirty() bool
|
||||
// SetMemoryFootprintChangeHook sets the hook for memory footprint change.
|
||||
SetMemoryFootprintChangeHook(hook func(uint64))
|
||||
// Mem returns the memory usage of MemBuffer.
|
||||
Mem() uint64
|
||||
// Len returns the count of entries in the MemBuffer.
|
||||
Len() int
|
||||
// Size returns the size of the MemBuffer.
|
||||
Size() int
|
||||
// Staging create a new staging buffer inside the MemBuffer.
|
||||
Staging() int
|
||||
// Cleanup the resources referenced by the StagingHandle.
|
||||
Cleanup(int)
|
||||
// Release publish all modifications in the latest staging buffer to upper level.
|
||||
Release(int)
|
||||
// Checkpoint returns the checkpoint of the MemBuffer.
|
||||
Checkpoint() *MemDBCheckpoint
|
||||
// RevertToCheckpoint reverts the MemBuffer to the specified checkpoint.
|
||||
RevertToCheckpoint(*MemDBCheckpoint)
|
||||
// GetMemDB returns the MemDB binding to this MemBuffer.
|
||||
// This method can also be used for bypassing the wrapper of MemDB.
|
||||
GetMemDB() *MemDB
|
||||
// Flush flushes the pipelined memdb when the keys or sizes reach the threshold.
|
||||
// If force is true, it will flush the memdb without size limitation.
|
||||
// it returns true when the memdb is flushed, and returns error when there are any failures.
|
||||
Flush(force bool) (bool, error)
|
||||
// FlushWait waits for the flushing task done and return error.
|
||||
FlushWait() error
|
||||
}
|
||||
|
||||
var (
|
||||
_ MemBuffer = &MemDBWithContext{}
|
||||
_ MemBuffer = &PipelinedMemDB{}
|
||||
)
|
||||
|
||||
// MemDBWithContext wraps MemDB to satisfy the MemBuffer interface.
|
||||
type MemDBWithContext struct {
|
||||
*MemDB
|
||||
}
|
||||
|
||||
func NewMemDBWithContext() *MemDBWithContext {
|
||||
return &MemDBWithContext{MemDB: newMemDB()}
|
||||
}
|
||||
|
||||
func (db *MemDBWithContext) Get(_ context.Context, k []byte) ([]byte, error) {
|
||||
return db.MemDB.Get(k)
|
||||
}
|
||||
|
||||
func (db *MemDBWithContext) Flush(bool) (bool, error) { return false, nil }
|
||||
|
||||
func (db *MemDBWithContext) FlushWait() error { return nil }
|
||||
|
||||
// GetMemDB returns the inner MemDB
|
||||
func (db *MemDBWithContext) GetMemDB() *MemDB {
|
||||
return db.MemDB
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ import (
|
|||
func TestUnionStoreGetSet(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store := newMemDB()
|
||||
us := NewUnionStore(&mockSnapshot{store})
|
||||
us := NewUnionStore(NewMemDBWithContext(), &mockSnapshot{store})
|
||||
|
||||
err := store.Set([]byte("1"), []byte("1"))
|
||||
assert.Nil(err)
|
||||
|
|
@ -64,7 +64,7 @@ func TestUnionStoreGetSet(t *testing.T) {
|
|||
func TestUnionStoreDelete(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store := newMemDB()
|
||||
us := NewUnionStore(&mockSnapshot{store})
|
||||
us := NewUnionStore(NewMemDBWithContext(), &mockSnapshot{store})
|
||||
|
||||
err := store.Set([]byte("1"), []byte("1"))
|
||||
assert.Nil(err)
|
||||
|
|
@ -83,7 +83,7 @@ func TestUnionStoreDelete(t *testing.T) {
|
|||
func TestUnionStoreSeek(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store := newMemDB()
|
||||
us := NewUnionStore(&mockSnapshot{store})
|
||||
us := NewUnionStore(NewMemDBWithContext(), &mockSnapshot{store})
|
||||
|
||||
err := store.Set([]byte("1"), []byte("1"))
|
||||
assert.Nil(err)
|
||||
|
|
@ -116,7 +116,7 @@ func TestUnionStoreSeek(t *testing.T) {
|
|||
func TestUnionStoreIterReverse(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store := newMemDB()
|
||||
us := NewUnionStore(&mockSnapshot{store})
|
||||
us := NewUnionStore(NewMemDBWithContext(), &mockSnapshot{store})
|
||||
|
||||
err := store.Set([]byte("1"), []byte("1"))
|
||||
assert.Nil(err)
|
||||
|
|
|
|||
|
|
@ -105,6 +105,9 @@ var (
|
|||
TiKVStaleReadCounter *prometheus.CounterVec
|
||||
TiKVStaleReadReqCounter *prometheus.CounterVec
|
||||
TiKVStaleReadBytes *prometheus.CounterVec
|
||||
TiKVPipelinedFlushLenHistogram prometheus.Histogram
|
||||
TiKVPipelinedFlushSizeHistogram prometheus.Histogram
|
||||
TiKVPipelinedFlushDuration prometheus.Histogram
|
||||
)
|
||||
|
||||
// Label constants.
|
||||
|
|
@ -736,6 +739,33 @@ func initMetrics(namespace, subsystem string, constLabels prometheus.Labels) {
|
|||
Help: "Counter of stale read requests bytes",
|
||||
}, []string{LblResult, LblDirection})
|
||||
|
||||
TiKVPipelinedFlushLenHistogram = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "pipelined_flush_len",
|
||||
Help: "Bucketed histogram of length of pipelined flushed memdb",
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 16), // 1K ~ 32M
|
||||
})
|
||||
|
||||
TiKVPipelinedFlushSizeHistogram = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "pipelined_flush_size",
|
||||
Help: "Bucketed histogram of size of pipelined flushed memdb",
|
||||
Buckets: prometheus.ExponentialBuckets(16*1024*1024, 1.2, 13), // 16M ~ 142M
|
||||
})
|
||||
|
||||
TiKVPipelinedFlushDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "pipelined_flush_duration",
|
||||
Help: "Flush time of pipelined memdb.",
|
||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 28), // 0.5ms ~ 18h
|
||||
})
|
||||
|
||||
initShortcuts()
|
||||
}
|
||||
|
||||
|
|
@ -820,6 +850,9 @@ func RegisterMetrics() {
|
|||
prometheus.MustRegister(TiKVStaleReadCounter)
|
||||
prometheus.MustRegister(TiKVStaleReadReqCounter)
|
||||
prometheus.MustRegister(TiKVStaleReadBytes)
|
||||
prometheus.MustRegister(TiKVPipelinedFlushLenHistogram)
|
||||
prometheus.MustRegister(TiKVPipelinedFlushSizeHistogram)
|
||||
prometheus.MustRegister(TiKVPipelinedFlushDuration)
|
||||
}
|
||||
|
||||
// readCounter reads the value of a prometheus.Counter.
|
||||
|
|
|
|||
|
|
@ -855,6 +855,13 @@ func WithStartTS(startTS uint64) TxnOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithPipelinedMemDB creates transaction with pipelined memdb.
|
||||
func WithPipelinedMemDB() TxnOption {
|
||||
return func(st *transaction.TxnOptions) {
|
||||
st.PipelinedMemDB = true
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove once tidb and br are ready
|
||||
|
||||
// KVTxn contains methods to interact with a TiKV transaction.
|
||||
|
|
|
|||
|
|
@ -52,5 +52,10 @@ type Iterator = unionstore.Iterator
|
|||
// If there are persistent flags associated with key, we will keep this key in node without value.
|
||||
type MemDB = unionstore.MemDB
|
||||
|
||||
// MemBuffer is the interface for the MemDB buffer.
|
||||
type MemBuffer = unionstore.MemBuffer
|
||||
|
||||
// MemDBCheckpoint is the checkpoint of memory DB.
|
||||
type MemDBCheckpoint = unionstore.MemDBCheckpoint
|
||||
|
||||
var WithPipelinedMemDBSkipRemoteBuffer = unionstore.WithPipelinedMemDBSkipRemoteBuffer
|
||||
|
|
|
|||
|
|
@ -74,6 +74,8 @@ const (
|
|||
CmdCheckSecondaryLocks
|
||||
CmdFlashbackToVersion
|
||||
CmdPrepareFlashbackToVersion
|
||||
CmdFlush
|
||||
CmdBufferBatchGet
|
||||
|
||||
CmdRawGet CmdType = 256 + iota
|
||||
CmdRawBatchGet
|
||||
|
|
@ -212,6 +214,10 @@ func (t CmdType) String() string {
|
|||
return "PrepareFlashbackToVersion"
|
||||
case CmdGetTiFlashSystemTable:
|
||||
return "GetTiFlashSystemTable"
|
||||
case CmdFlush:
|
||||
return "Flush"
|
||||
case CmdBufferBatchGet:
|
||||
return "BufferBatchGet"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
|
@ -542,11 +548,21 @@ func (req *Request) FlashbackToVersion() *kvrpcpb.FlashbackToVersionRequest {
|
|||
return req.Req.(*kvrpcpb.FlashbackToVersionRequest)
|
||||
}
|
||||
|
||||
// PrepareFlashbackToVersion returns PrepareFlashbackToVersion in request.
|
||||
// PrepareFlashbackToVersion returns PrepareFlashbackToVersionRequest in request.
|
||||
func (req *Request) PrepareFlashbackToVersion() *kvrpcpb.PrepareFlashbackToVersionRequest {
|
||||
return req.Req.(*kvrpcpb.PrepareFlashbackToVersionRequest)
|
||||
}
|
||||
|
||||
// Flush returns FlushRequest in request.
|
||||
func (req *Request) Flush() *kvrpcpb.FlushRequest {
|
||||
return req.Req.(*kvrpcpb.FlushRequest)
|
||||
}
|
||||
|
||||
// BufferBatchGet returns BufferBatchGetRequest in request.
|
||||
func (req *Request) BufferBatchGet() *kvrpcpb.BufferBatchGetRequest {
|
||||
return req.Req.(*kvrpcpb.BufferBatchGetRequest)
|
||||
}
|
||||
|
||||
// ToBatchCommandsRequest converts the request to an entry in BatchCommands request.
|
||||
func (req *Request) ToBatchCommandsRequest() *tikvpb.BatchCommandsRequest_Request {
|
||||
switch req.Type {
|
||||
|
|
@ -606,6 +622,10 @@ func (req *Request) ToBatchCommandsRequest() *tikvpb.BatchCommandsRequest_Reques
|
|||
return &tikvpb.BatchCommandsRequest_Request{Cmd: &tikvpb.BatchCommandsRequest_Request_FlashbackToVersion{FlashbackToVersion: req.FlashbackToVersion()}}
|
||||
case CmdPrepareFlashbackToVersion:
|
||||
return &tikvpb.BatchCommandsRequest_Request{Cmd: &tikvpb.BatchCommandsRequest_Request_PrepareFlashbackToVersion{PrepareFlashbackToVersion: req.PrepareFlashbackToVersion()}}
|
||||
case CmdFlush:
|
||||
return &tikvpb.BatchCommandsRequest_Request{Cmd: &tikvpb.BatchCommandsRequest_Request_Flush{Flush: req.Flush()}}
|
||||
case CmdBufferBatchGet:
|
||||
return &tikvpb.BatchCommandsRequest_Request{Cmd: &tikvpb.BatchCommandsRequest_Request_BufferBatchGet{BufferBatchGet: req.BufferBatchGet()}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -677,6 +697,10 @@ func FromBatchCommandsResponse(res *tikvpb.BatchCommandsResponse_Response) (*Res
|
|||
return &Response{Resp: res.CheckTxnStatus}, nil
|
||||
case *tikvpb.BatchCommandsResponse_Response_CheckSecondaryLocks:
|
||||
return &Response{Resp: res.CheckSecondaryLocks}, nil
|
||||
case *tikvpb.BatchCommandsResponse_Response_Flush:
|
||||
return &Response{Resp: res.Flush}, nil
|
||||
case *tikvpb.BatchCommandsResponse_Response_BufferBatchGet:
|
||||
return &Response{Resp: res.BufferBatchGet}, nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
|
@ -801,6 +825,10 @@ func AttachContext(req *Request, rpcCtx kvrpcpb.Context) bool {
|
|||
req.FlashbackToVersion().Context = ctx
|
||||
case CmdPrepareFlashbackToVersion:
|
||||
req.PrepareFlashbackToVersion().Context = ctx
|
||||
case CmdFlush:
|
||||
req.Flush().Context = ctx
|
||||
case CmdBufferBatchGet:
|
||||
req.BufferBatchGet().Context = ctx
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
|
@ -971,6 +999,14 @@ func GenRegionErrorResp(req *Request, e *errorpb.Error) (*Response, error) {
|
|||
p = &kvrpcpb.PrepareFlashbackToVersionResponse{
|
||||
RegionError: e,
|
||||
}
|
||||
case CmdFlush:
|
||||
p = &kvrpcpb.FlushResponse{
|
||||
RegionError: e,
|
||||
}
|
||||
case CmdBufferBatchGet:
|
||||
p = &kvrpcpb.BufferBatchGetResponse{
|
||||
RegionError: e,
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid request type %v", req.Type)
|
||||
}
|
||||
|
|
@ -1141,6 +1177,10 @@ func CallRPC(ctx context.Context, client tikvpb.TikvClient, req *Request) (*Resp
|
|||
resp.Resp, err = client.KvPrepareFlashbackToVersion(ctx, req.PrepareFlashbackToVersion())
|
||||
case CmdGetTiFlashSystemTable:
|
||||
resp.Resp, err = client.GetTiFlashSystemTable(ctx, req.GetTiFlashSystemTable())
|
||||
case CmdFlush:
|
||||
resp.Resp, err = client.KvFlush(ctx, req.Flush())
|
||||
case CmdBufferBatchGet:
|
||||
resp.Resp, err = client.KvBufferBatchGet(ctx, req.BufferBatchGet())
|
||||
default:
|
||||
return nil, errors.Errorf("invalid request type: %v", req.Type)
|
||||
}
|
||||
|
|
@ -1304,7 +1344,8 @@ func (req *Request) IsTxnWriteRequest() bool {
|
|||
req.Type == CmdTxnHeartBeat ||
|
||||
req.Type == CmdResolveLock ||
|
||||
req.Type == CmdFlashbackToVersion ||
|
||||
req.Type == CmdPrepareFlashbackToVersion {
|
||||
req.Type == CmdPrepareFlashbackToVersion ||
|
||||
req.Type == CmdFlush {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
@ -1357,7 +1398,11 @@ func (req *Request) GetStartTS() uint64 {
|
|||
case CmdFlashbackToVersion:
|
||||
return req.FlashbackToVersion().GetStartTs()
|
||||
case CmdPrepareFlashbackToVersion:
|
||||
req.PrepareFlashbackToVersion().GetStartTs()
|
||||
return req.PrepareFlashbackToVersion().GetStartTs()
|
||||
case CmdFlush:
|
||||
return req.Flush().GetStartTs()
|
||||
case CmdBufferBatchGet:
|
||||
return req.BufferBatchGet().GetVersion()
|
||||
case CmdCop:
|
||||
return req.Cop().GetStartTs()
|
||||
case CmdCopStream:
|
||||
|
|
|
|||
|
|
@ -195,6 +195,11 @@ type twoPhaseCommitter struct {
|
|||
isInternal bool
|
||||
|
||||
forUpdateTSConstraints map[string]uint64
|
||||
|
||||
pipelinedCommitInfo struct {
|
||||
primaryOp kvrpcpb.Op
|
||||
pipelinedStart, pipelinedEnd []byte
|
||||
}
|
||||
}
|
||||
|
||||
type memBufferMutations struct {
|
||||
|
|
@ -461,7 +466,7 @@ func (c *PlainMutations) AppendMutation(mutation PlainMutation) {
|
|||
|
||||
// newTwoPhaseCommitter creates a twoPhaseCommitter.
|
||||
func newTwoPhaseCommitter(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, error) {
|
||||
return &twoPhaseCommitter{
|
||||
committer := &twoPhaseCommitter{
|
||||
store: txn.store,
|
||||
txn: txn,
|
||||
startTS: txn.StartTS(),
|
||||
|
|
@ -471,7 +476,8 @@ func newTwoPhaseCommitter(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, err
|
|||
binlog: txn.binlog,
|
||||
diskFullOpt: kvrpcpb.DiskFullOpt_NotAllowedOnFull,
|
||||
resourceGroupName: txn.resourceGroupName,
|
||||
}, nil
|
||||
}
|
||||
return committer, nil
|
||||
}
|
||||
|
||||
func (c *twoPhaseCommitter) extractKeyExistsErr(err *tikverr.ErrKeyExist) error {
|
||||
|
|
@ -543,7 +549,7 @@ func (c *twoPhaseCommitter) initKeysAndMutations(ctx context.Context) error {
|
|||
var size, putCnt, delCnt, lockCnt, checkCnt int
|
||||
|
||||
txn := c.txn
|
||||
memBuf := txn.GetMemBuffer()
|
||||
memBuf := txn.GetMemBuffer().GetMemDB()
|
||||
sizeHint := txn.us.GetMemBuffer().Len()
|
||||
c.mutations = newMemBufferMutations(sizeHint, memBuf)
|
||||
c.isPessimistic = txn.IsPessimistic()
|
||||
|
|
@ -920,7 +926,9 @@ const CommitSecondaryMaxBackoff = 41000
|
|||
// doActionOnGroupedMutations splits groups into batches (there is one group per region, and potentially many batches per group, but all mutations
|
||||
// in a batch will belong to the same region).
|
||||
func (c *twoPhaseCommitter) doActionOnGroupMutations(bo *retry.Backoffer, action twoPhaseCommitAction, groups []groupedMutations) error {
|
||||
action.tiKVTxnRegionsNumHistogram().Observe(float64(len(groups)))
|
||||
if histogram := action.tiKVTxnRegionsNumHistogram(); histogram != nil {
|
||||
histogram.Observe(float64(len(groups)))
|
||||
}
|
||||
|
||||
var sizeFunc = c.keySize
|
||||
|
||||
|
|
@ -1387,7 +1395,9 @@ func (c *twoPhaseCommitter) cleanup(ctx context.Context) {
|
|||
|
||||
cleanupKeysCtx := context.WithValue(c.store.Ctx(), retry.TxnStartKey, ctx.Value(retry.TxnStartKey))
|
||||
var err error
|
||||
if !c.isOnePC() {
|
||||
if c.txn.IsPipelined() {
|
||||
// TODO: cleanup pipelined txn
|
||||
} else if !c.isOnePC() {
|
||||
err = c.cleanupMutations(retry.NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations)
|
||||
} else if c.isPessimistic {
|
||||
err = c.pessimisticRollbackMutations(retry.NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations)
|
||||
|
|
@ -1460,23 +1470,25 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) {
|
|||
|
||||
commitDetail := c.getDetail()
|
||||
commitTSMayBeCalculated := false
|
||||
// Check async commit is available or not.
|
||||
if c.checkAsyncCommit() {
|
||||
commitTSMayBeCalculated = true
|
||||
c.setAsyncCommit(true)
|
||||
c.hasTriedAsyncCommit = true
|
||||
}
|
||||
// Check if 1PC is enabled.
|
||||
if c.checkOnePC() {
|
||||
commitTSMayBeCalculated = true
|
||||
c.setOnePC(true)
|
||||
c.hasTriedOnePC = true
|
||||
if !c.txn.isPipelined {
|
||||
// Check async commit is available or not.
|
||||
if c.checkAsyncCommit() {
|
||||
commitTSMayBeCalculated = true
|
||||
c.setAsyncCommit(true)
|
||||
c.hasTriedAsyncCommit = true
|
||||
}
|
||||
// Check if 1PC is enabled.
|
||||
if c.checkOnePC() {
|
||||
commitTSMayBeCalculated = true
|
||||
c.setOnePC(true)
|
||||
c.hasTriedOnePC = true
|
||||
}
|
||||
}
|
||||
|
||||
// if lazy uniqueness check is enabled in TiDB (@@constraint_check_in_place_pessimistic=0), for_update_ts might be
|
||||
// zero for a pessimistic transaction. We set it to the start_ts to force the PrewritePessimistic path in TiKV.
|
||||
// TODO: can we simply set for_update_ts = start_ts for all pessimistic transactions whose for_update_ts=0?
|
||||
if c.forUpdateTS == 0 {
|
||||
if c.forUpdateTS == 0 && !c.txn.isPipelined {
|
||||
for i := 0; i < c.mutations.Len(); i++ {
|
||||
if c.mutations.NeedConstraintCheckInPrewrite(i) {
|
||||
c.forUpdateTS = c.startTS
|
||||
|
|
@ -1528,6 +1540,20 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) {
|
|||
binlogChan = c.binlog.Prewrite(ctx, c.primary())
|
||||
}
|
||||
|
||||
if c.txn.IsPipelined() {
|
||||
if _, err = c.txn.GetMemBuffer().Flush(true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.txn.GetMemBuffer().FlushWait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(c.pipelinedCommitInfo.pipelinedStart) == 0 || len(c.pipelinedCommitInfo.pipelinedEnd) == 0 {
|
||||
return errors.Errorf("unexpected empty pipelinedStart(%s) or pipelinedEnd(%s)",
|
||||
c.pipelinedCommitInfo.pipelinedStart, c.pipelinedCommitInfo.pipelinedEnd)
|
||||
}
|
||||
return c.commitFlushedMutations(bo)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = c.prewriteMutations(bo, c.mutations)
|
||||
|
|
@ -1719,7 +1745,7 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) {
|
|||
}
|
||||
|
||||
func (c *twoPhaseCommitter) commitTxn(ctx context.Context, commitDetail *util.CommitDetails) error {
|
||||
c.txn.GetMemBuffer().DiscardValues()
|
||||
c.txn.GetMemBuffer().GetMemDB().DiscardValues()
|
||||
start := time.Now()
|
||||
|
||||
// Use the VeryLongMaxBackoff to commit the primary key.
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (b *BufferBatchGetter) BatchGet(ctx context.Context, keys [][]byte) (map[st
|
|||
bufferValues := make([][]byte, len(keys))
|
||||
shrinkKeys := make([][]byte, 0, len(keys))
|
||||
for i, key := range keys {
|
||||
val, err := b.buffer.Get(key)
|
||||
val, err := b.buffer.Get(ctx, key)
|
||||
if err == nil {
|
||||
bufferValues[i] = val
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func (s *mockBatchGetterStore) Len() int {
|
|||
return len(s.index)
|
||||
}
|
||||
|
||||
func (s *mockBatchGetterStore) Get(k []byte) ([]byte, error) {
|
||||
func (s *mockBatchGetterStore) Get(ctx context.Context, k []byte) ([]byte, error) {
|
||||
for i, key := range s.index {
|
||||
if kv.CmpKey(key, k) == 0 {
|
||||
return s.value[i], nil
|
||||
|
|
@ -95,7 +95,7 @@ func (s *mockBatchGetterStore) Get(k []byte) ([]byte, error) {
|
|||
func (s *mockBatchGetterStore) BatchGet(ctx context.Context, keys [][]byte) (map[string][]byte, error) {
|
||||
m := make(map[string][]byte)
|
||||
for _, k := range keys {
|
||||
v, err := s.Get(k)
|
||||
v, err := s.Get(ctx, k)
|
||||
if err == nil {
|
||||
m[string(k)] = v
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -0,0 +1,403 @@
|
|||
// Copyright 2024 TiKV Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transaction
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pingcap/errors"
|
||||
"github.com/pingcap/kvproto/pkg/kvrpcpb"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/tikv/client-go/v2/config/retry"
|
||||
tikverr "github.com/tikv/client-go/v2/error"
|
||||
"github.com/tikv/client-go/v2/internal/client"
|
||||
"github.com/tikv/client-go/v2/internal/locate"
|
||||
"github.com/tikv/client-go/v2/internal/logutil"
|
||||
"github.com/tikv/client-go/v2/kv"
|
||||
"github.com/tikv/client-go/v2/tikvrpc"
|
||||
"github.com/tikv/client-go/v2/txnkv/rangetask"
|
||||
"github.com/tikv/client-go/v2/txnkv/txnlock"
|
||||
"github.com/tikv/client-go/v2/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type actionPipelinedFlush struct {
|
||||
generation uint64
|
||||
}
|
||||
|
||||
var _ twoPhaseCommitAction = actionPipelinedFlush{}
|
||||
|
||||
func (action actionPipelinedFlush) String() string {
|
||||
return "pipelined_flush"
|
||||
}
|
||||
|
||||
func (action actionPipelinedFlush) tiKVTxnRegionsNumHistogram() prometheus.Observer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *twoPhaseCommitter) buildPipelinedFlushRequest(batch batchMutations, generation uint64) *tikvrpc.Request {
|
||||
m := batch.mutations
|
||||
mutations := make([]*kvrpcpb.Mutation, m.Len())
|
||||
|
||||
for i := 0; i < m.Len(); i++ {
|
||||
assertion := kvrpcpb.Assertion_None
|
||||
if m.IsAssertExists(i) {
|
||||
assertion = kvrpcpb.Assertion_Exist
|
||||
}
|
||||
if m.IsAssertNotExist(i) {
|
||||
assertion = kvrpcpb.Assertion_NotExist
|
||||
}
|
||||
mutations[i] = &kvrpcpb.Mutation{
|
||||
Op: m.GetOp(i),
|
||||
Key: m.GetKey(i),
|
||||
Value: m.GetValue(i),
|
||||
Assertion: assertion,
|
||||
}
|
||||
}
|
||||
|
||||
minCommitTS := c.startTS + 1
|
||||
|
||||
req := &kvrpcpb.FlushRequest{
|
||||
Mutations: mutations,
|
||||
PrimaryKey: c.primary(),
|
||||
StartTs: c.startTS,
|
||||
MinCommitTs: minCommitTS,
|
||||
Generation: generation,
|
||||
LockTtl: max(defaultLockTTL, ManagedLockTTL),
|
||||
AssertionLevel: c.txn.assertionLevel,
|
||||
}
|
||||
|
||||
r := tikvrpc.NewRequest(
|
||||
tikvrpc.CmdFlush, req, kvrpcpb.Context{
|
||||
Priority: c.priority,
|
||||
SyncLog: c.syncLog,
|
||||
ResourceGroupTag: c.resourceGroupTag,
|
||||
DiskFullOpt: c.txn.diskFullOpt,
|
||||
TxnSource: c.txn.txnSource,
|
||||
MaxExecutionDurationMs: uint64(client.MaxWriteExecutionTime.Milliseconds()),
|
||||
RequestSource: c.txn.GetRequestSource(),
|
||||
ResourceControlContext: &kvrpcpb.ResourceControlContext{
|
||||
ResourceGroupName: c.resourceGroupName,
|
||||
},
|
||||
},
|
||||
)
|
||||
return r
|
||||
}
|
||||
|
||||
func (action actionPipelinedFlush) handleSingleBatch(
|
||||
c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations,
|
||||
) (err error) {
|
||||
if len(c.primaryKey) == 0 {
|
||||
return errors.New("[pipelined dml] primary key should be set before pipelined flush")
|
||||
}
|
||||
|
||||
tBegin := time.Now()
|
||||
attempts := 0
|
||||
|
||||
req := c.buildPipelinedFlushRequest(batch, action.generation)
|
||||
sender := locate.NewRegionRequestSender(c.store.GetRegionCache(), c.store.GetTiKVClient())
|
||||
var resolvingRecordToken *int
|
||||
|
||||
for {
|
||||
attempts++
|
||||
reqBegin := time.Now()
|
||||
if reqBegin.Sub(tBegin) > slowRequestThreshold {
|
||||
logutil.BgLogger().Warn(
|
||||
"[pipelined dml] slow pipelined flush request",
|
||||
zap.Uint64("startTS", c.startTS),
|
||||
zap.Stringer("region", &batch.region),
|
||||
zap.Int("attempts", attempts),
|
||||
)
|
||||
tBegin = time.Now()
|
||||
}
|
||||
resp, _, err := sender.SendReq(bo, req, batch.region, client.ReadTimeoutShort)
|
||||
// Unexpected error occurs, return it
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
regionErr, err := resp.GetRegionError()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if regionErr != nil {
|
||||
// For other region error and the fake region error, backoff because
|
||||
// there's something wrong.
|
||||
// For the real EpochNotMatch error, don't backoff.
|
||||
if regionErr.GetEpochNotMatch() == nil || locate.IsFakeRegionError(regionErr) {
|
||||
err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if regionErr.GetDiskFull() != nil {
|
||||
storeIds := regionErr.GetDiskFull().GetStoreId()
|
||||
desc := " "
|
||||
for _, i := range storeIds {
|
||||
desc += strconv.FormatUint(i, 10) + " "
|
||||
}
|
||||
|
||||
logutil.Logger(bo.GetCtx()).Error(
|
||||
"Request failed cause of TiKV disk full",
|
||||
zap.String("store_id", desc),
|
||||
zap.String("reason", regionErr.GetDiskFull().GetReason()),
|
||||
)
|
||||
|
||||
return errors.New(regionErr.String())
|
||||
}
|
||||
same, err := batch.relocate(bo, c.store.GetRegionCache())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if same {
|
||||
continue
|
||||
}
|
||||
err = c.doActionOnMutations(bo, actionPipelinedFlush{generation: action.generation}, batch.mutations)
|
||||
return err
|
||||
}
|
||||
if resp.Resp == nil {
|
||||
return errors.WithStack(tikverr.ErrBodyMissing)
|
||||
}
|
||||
flushResp := resp.Resp.(*kvrpcpb.FlushResponse)
|
||||
keyErrs := flushResp.GetErrors()
|
||||
if len(keyErrs) == 0 {
|
||||
// Clear the RPC Error since the request is evaluated successfully.
|
||||
sender.SetRPCError(nil)
|
||||
|
||||
// Update CommitDetails
|
||||
reqDuration := time.Since(reqBegin)
|
||||
c.getDetail().MergeFlushReqDetails(
|
||||
reqDuration,
|
||||
batch.region.GetID(),
|
||||
sender.GetStoreAddr(),
|
||||
flushResp.ExecDetailsV2,
|
||||
)
|
||||
|
||||
if batch.isPrimary {
|
||||
// start keepalive after primary key is written.
|
||||
c.run(c, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
locks := make([]*txnlock.Lock, 0, len(keyErrs))
|
||||
|
||||
for _, keyErr := range keyErrs {
|
||||
// Check already exists error
|
||||
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
|
||||
e := &tikverr.ErrKeyExist{AlreadyExist: alreadyExist}
|
||||
return c.extractKeyExistsErr(e)
|
||||
}
|
||||
|
||||
// Extract lock from key error
|
||||
lock, err1 := txnlock.ExtractLockFromKeyErr(keyErr)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
logutil.BgLogger().Info(
|
||||
"[pipelined dml] encounters lock",
|
||||
zap.Uint64("session", c.sessionID),
|
||||
zap.Uint64("txnID", c.startTS),
|
||||
zap.Stringer("lock", lock),
|
||||
)
|
||||
// If an optimistic transaction encounters a lock with larger TS, this transaction will certainly
|
||||
// fail due to a WriteConflict error. So we can construct and return an error here early.
|
||||
// Pessimistic transactions don't need such an optimization. If this key needs a pessimistic lock,
|
||||
// TiKV will return a PessimisticLockNotFound error directly if it encounters a different lock. Otherwise,
|
||||
// TiKV returns lock.TTL = 0, and we still need to resolve the lock.
|
||||
if lock.TxnID > c.startTS && !c.isPessimistic {
|
||||
return tikverr.NewErrWriteConflictWithArgs(
|
||||
c.startTS,
|
||||
lock.TxnID,
|
||||
0,
|
||||
lock.Key,
|
||||
kvrpcpb.WriteConflict_Optimistic,
|
||||
)
|
||||
}
|
||||
locks = append(locks, lock)
|
||||
}
|
||||
if resolvingRecordToken == nil {
|
||||
token := c.store.GetLockResolver().RecordResolvingLocks(locks, c.startTS)
|
||||
resolvingRecordToken = &token
|
||||
defer c.store.GetLockResolver().ResolveLocksDone(c.startTS, *resolvingRecordToken)
|
||||
} else {
|
||||
c.store.GetLockResolver().UpdateResolvingLocks(locks, c.startTS, *resolvingRecordToken)
|
||||
}
|
||||
resolveLockOpts := txnlock.ResolveLocksOptions{
|
||||
CallerStartTS: c.startTS,
|
||||
Locks: locks,
|
||||
Detail: &c.getDetail().ResolveLock,
|
||||
}
|
||||
resolveLockRes, err := c.store.GetLockResolver().ResolveLocksWithOpts(bo, resolveLockOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msBeforeExpired := resolveLockRes.TTL
|
||||
if msBeforeExpired > 0 {
|
||||
err = bo.BackoffWithCfgAndMaxSleep(
|
||||
retry.BoTxnLock,
|
||||
int(msBeforeExpired),
|
||||
errors.Errorf("[pipelined dml] flush lockedKeys: %d", len(locks)),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *twoPhaseCommitter) pipelinedFlushMutations(bo *retry.Backoffer, mutations CommitterMutations, generation uint64) error {
|
||||
if span := opentracing.SpanFromContext(bo.GetCtx()); span != nil && span.Tracer() != nil {
|
||||
span1 := span.Tracer().StartSpan("twoPhaseCommitter.pipelinedFlushMutations", opentracing.ChildOf(span.Context()))
|
||||
defer span1.Finish()
|
||||
bo.SetCtx(opentracing.ContextWithSpan(bo.GetCtx(), span1))
|
||||
}
|
||||
|
||||
return c.doActionOnMutations(bo, actionPipelinedFlush{generation}, mutations)
|
||||
}
|
||||
|
||||
func (c *twoPhaseCommitter) commitFlushedMutations(bo *retry.Backoffer) error {
|
||||
logutil.BgLogger().Info("[pipelined dml] start to commit transaction",
|
||||
zap.Int("keys", c.txn.GetMemBuffer().Len()),
|
||||
zap.String("size", units.HumanSize(float64(c.txn.GetMemBuffer().Size()))))
|
||||
commitTS, err := c.store.GetTimestampWithRetry(bo, c.txn.GetScope())
|
||||
if err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Warn("[pipelined dml] commit transaction get commitTS failed",
|
||||
zap.Error(err),
|
||||
zap.Uint64("txnStartTS", c.startTS))
|
||||
return err
|
||||
}
|
||||
atomic.StoreUint64(&c.commitTS, commitTS)
|
||||
|
||||
if _, err := util.EvalFailpoint("pipelinedCommitFail"); err == nil {
|
||||
return errors.New("pipelined DML commit failed")
|
||||
}
|
||||
|
||||
primaryMutation := NewPlainMutations(1)
|
||||
primaryMutation.Push(c.pipelinedCommitInfo.primaryOp, c.primaryKey, nil, false, false, false, false)
|
||||
if err = c.commitMutations(bo, &primaryMutation); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
c.mu.RLock()
|
||||
c.mu.committed = true
|
||||
c.mu.RUnlock()
|
||||
logutil.BgLogger().Info("[pipelined dml] transaction is committed")
|
||||
|
||||
if _, err := util.EvalFailpoint("pipelinedSkipResolveLock"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// async resolve the rest locks.
|
||||
commitBo := retry.NewBackofferWithVars(c.store.Ctx(), CommitSecondaryMaxBackoff, c.txn.vars)
|
||||
go c.resolveFlushedLocks(commitBo, c.pipelinedCommitInfo.pipelinedStart, c.pipelinedCommitInfo.pipelinedEnd)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildPipelinedResolveHandler returns a function which resolves all locks for the given region.
|
||||
// If the region cache is stale, it reloads the region info and resolve the rest ranges.
|
||||
// The function also count resolved regions.
|
||||
func (c *twoPhaseCommitter) buildPipelinedResolveHandler(commit bool, resolved *atomic.Uint64) (rangetask.TaskHandler, error) {
|
||||
commitVersion := uint64(0)
|
||||
if commit {
|
||||
commitVersion = atomic.LoadUint64(&c.commitTS)
|
||||
if commitVersion == 0 {
|
||||
return nil, errors.New("commitTS is 0")
|
||||
}
|
||||
}
|
||||
maxBackOff := cleanupMaxBackoff
|
||||
if commit {
|
||||
maxBackOff = CommitSecondaryMaxBackoff
|
||||
}
|
||||
regionCache := c.store.GetRegionCache()
|
||||
return func(ctx context.Context, r kv.KeyRange) (rangetask.TaskStat, error) {
|
||||
start := r.StartKey
|
||||
res := rangetask.TaskStat{}
|
||||
for {
|
||||
lreq := &kvrpcpb.ResolveLockRequest{
|
||||
StartVersion: c.startTS,
|
||||
CommitVersion: commitVersion,
|
||||
}
|
||||
req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, lreq, kvrpcpb.Context{
|
||||
RequestSource: c.txn.GetRequestSource(),
|
||||
})
|
||||
bo := retry.NewBackoffer(ctx, maxBackOff)
|
||||
loc, err := regionCache.LocateKey(bo, start)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
resp, err := c.store.SendReq(bo, req, loc.Region, client.MaxWriteExecutionTime)
|
||||
if err != nil {
|
||||
err = bo.Backoff(retry.BoRegionMiss, err)
|
||||
if err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("send resolve lock request error", zap.Error(err))
|
||||
return res, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
regionErr, err := resp.GetRegionError()
|
||||
if err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("get region error failed", zap.Error(err))
|
||||
return res, err
|
||||
}
|
||||
if regionErr != nil {
|
||||
err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String()))
|
||||
if err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("send resolve lock get region error", zap.Error(err))
|
||||
return res, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if resp.Resp == nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("send resolve lock response body missing", zap.Error(errors.WithStack(tikverr.ErrBodyMissing)))
|
||||
return res, err
|
||||
}
|
||||
cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse)
|
||||
if keyErr := cmdResp.GetError(); keyErr != nil {
|
||||
err = errors.Errorf("unexpected resolve err: %s", keyErr)
|
||||
logutil.BgLogger().Error("resolveLock error", zap.Error(err))
|
||||
return res, err
|
||||
}
|
||||
resolved.Add(1)
|
||||
res.CompletedRegions++
|
||||
if loc.EndKey == nil || bytes.Compare(loc.EndKey, r.EndKey) >= 0 {
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *twoPhaseCommitter) resolveFlushedLocks(bo *retry.Backoffer, start, end []byte) {
|
||||
// TODO: implement cleanup.
|
||||
const RESOLVE_CONCURRENCY = 8
|
||||
var resolved atomic.Uint64
|
||||
handler, err := c.buildPipelinedResolveHandler(true, &resolved)
|
||||
if err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("[pipelined dml] build buildPipelinedResolveHandler error", zap.Error(err))
|
||||
return
|
||||
}
|
||||
runner := rangetask.NewRangeTaskRunner("pipelined-dml-commit", c.store, RESOLVE_CONCURRENCY, handler)
|
||||
if err = runner.RunOnRange(bo.GetCtx(), start, end); err != nil {
|
||||
logutil.Logger(bo.GetCtx()).Error("[pipelined dml] commit transaction secondaries failed",
|
||||
zap.Uint64("resolved regions", resolved.Load()),
|
||||
zap.Error(err))
|
||||
} else {
|
||||
logutil.BgLogger().Info("[pipelined dml] commit transaction secondaries done",
|
||||
zap.Uint64("resolved regions", resolved.Load()))
|
||||
}
|
||||
}
|
||||
|
|
@ -50,6 +50,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/dgryski/go-farm"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pingcap/failpoint"
|
||||
"github.com/pingcap/kvproto/pkg/kvrpcpb"
|
||||
|
|
@ -111,8 +112,9 @@ func (e *tempLockBufferEntry) trySkipLockingOnRetry(returnValue bool, checkExist
|
|||
// TxnOptions indicates the option when beginning a transaction.
|
||||
// TxnOptions are set by the TxnOption values passed to Begin
|
||||
type TxnOptions struct {
|
||||
TxnScope string
|
||||
StartTS *uint64
|
||||
TxnScope string
|
||||
StartTS *uint64
|
||||
PipelinedMemDB bool
|
||||
}
|
||||
|
||||
// KVTxn contains methods to interact with a TiKV transaction.
|
||||
|
|
@ -162,6 +164,8 @@ type KVTxn struct {
|
|||
aggressiveLockingDirty atomic.Bool
|
||||
|
||||
forUpdateTSChecks map[string]uint64
|
||||
|
||||
isPipelined bool
|
||||
}
|
||||
|
||||
// NewTiKVTxn creates a new KVTxn.
|
||||
|
|
@ -169,7 +173,6 @@ func NewTiKVTxn(store kvstore, snapshot *txnsnapshot.KVSnapshot, startTS uint64,
|
|||
cfg := config.GetGlobalConfig()
|
||||
newTiKVTxn := &KVTxn{
|
||||
snapshot: snapshot,
|
||||
us: unionstore.NewUnionStore(snapshot),
|
||||
store: store,
|
||||
startTS: startTS,
|
||||
startTime: time.Now(),
|
||||
|
|
@ -181,6 +184,13 @@ func NewTiKVTxn(store kvstore, snapshot *txnsnapshot.KVSnapshot, startTS uint64,
|
|||
diskFullOpt: kvrpcpb.DiskFullOpt_NotAllowedOnFull,
|
||||
RequestSource: snapshot.RequestSource,
|
||||
}
|
||||
if !options.PipelinedMemDB {
|
||||
newTiKVTxn.us = unionstore.NewUnionStore(unionstore.NewMemDBWithContext(), snapshot)
|
||||
return newTiKVTxn, nil
|
||||
}
|
||||
if err := newTiKVTxn.InitPipelinedMemDB(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTiKVTxn, nil
|
||||
}
|
||||
|
||||
|
|
@ -227,7 +237,7 @@ func (txn *KVTxn) BatchGet(ctx context.Context, keys [][]byte) (map[string][]byt
|
|||
// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
|
||||
func (txn *KVTxn) Set(k []byte, v []byte) error {
|
||||
txn.setCnt++
|
||||
return txn.us.GetMemBuffer().Set(k, v)
|
||||
return txn.GetMemBuffer().Set(k, v)
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer interface.
|
||||
|
|
@ -254,7 +264,7 @@ func (txn *KVTxn) IterReverse(k, lowerBound []byte) (unionstore.Iterator, error)
|
|||
|
||||
// Delete removes the entry for key k from kv store.
|
||||
func (txn *KVTxn) Delete(k []byte) error {
|
||||
return txn.us.GetMemBuffer().Delete(k)
|
||||
return txn.GetMemBuffer().Delete(k)
|
||||
}
|
||||
|
||||
// SetSchemaLeaseChecker sets a hook to check schema version.
|
||||
|
|
@ -269,6 +279,9 @@ func (txn *KVTxn) EnableForceSyncLog() {
|
|||
|
||||
// SetPessimistic indicates if the transaction should use pessimictic lock.
|
||||
func (txn *KVTxn) SetPessimistic(b bool) {
|
||||
if txn.IsPipelined() {
|
||||
panic("can not set a txn with pipelined memdb to pessimistic mode")
|
||||
}
|
||||
txn.isPessimistic = b
|
||||
}
|
||||
|
||||
|
|
@ -392,6 +405,147 @@ func (txn *KVTxn) IsPessimistic() bool {
|
|||
return txn.isPessimistic
|
||||
}
|
||||
|
||||
// IsPipelined returns true if it's a pipelined transaction.
|
||||
func (txn *KVTxn) IsPipelined() bool {
|
||||
return txn.isPipelined
|
||||
}
|
||||
|
||||
func (txn *KVTxn) InitPipelinedMemDB() error {
|
||||
if txn.committer != nil {
|
||||
return errors.New("pipelined memdb should be set before the transaction is committed")
|
||||
}
|
||||
txn.isPipelined = true
|
||||
txn.snapshot.SetPipelined(txn.startTS)
|
||||
// TODO: set the correct sessionID
|
||||
committer, err := newTwoPhaseCommitter(txn, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txn.committer = committer
|
||||
// disable 1pc and async commit for pipelined txn.
|
||||
if txn.committer.isOnePC() || txn.committer.isAsyncCommit() {
|
||||
logutil.BgLogger().Fatal("[pipelined dml] should not enable 1pc or async commit for pipelined txn",
|
||||
zap.Uint64("startTS", txn.startTS),
|
||||
zap.Bool("1pc", txn.committer.isOnePC()),
|
||||
zap.Bool("async commit", txn.committer.isAsyncCommit()))
|
||||
}
|
||||
commitDetail := &util.CommitDetails{
|
||||
ResolveLock: util.ResolveLockDetail{},
|
||||
}
|
||||
txn.committer.setDetail(commitDetail)
|
||||
// generation is increased when the memdb is flushed to kv store.
|
||||
// note the first generation is 1, which can mark pipelined dml's lock.
|
||||
flushedKeys, flushedSize := 0, 0
|
||||
pipelinedMemDB := unionstore.NewPipelinedMemDB(func(ctx context.Context, keys [][]byte) (map[string][]byte, error) {
|
||||
return txn.snapshot.BatchGetWithTier(ctx, keys, txnsnapshot.BatchGetBufferTier)
|
||||
}, func(generation uint64, memdb *unionstore.MemDB) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
txn.committer.ttlManager.close()
|
||||
}
|
||||
flushedKeys += memdb.Len()
|
||||
flushedSize += memdb.Size()
|
||||
}()
|
||||
logutil.BgLogger().Info("[pipelined dml] flush memdb to kv store",
|
||||
zap.Int("keys", memdb.Len()), zap.String("size", units.HumanSize(float64(memdb.Size()))),
|
||||
zap.Int("flushed keys", flushedKeys), zap.String("flushed size", units.HumanSize(float64(flushedSize))))
|
||||
// The flush function will not be called concurrently.
|
||||
// TODO: set backoffer from upper context.
|
||||
bo := retry.NewBackofferWithVars(context.Background(), 20000, nil)
|
||||
mutations := newMemBufferMutations(memdb.Len(), memdb)
|
||||
if memdb.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
// update bounds
|
||||
{
|
||||
var it unionstore.Iterator
|
||||
// lower bound
|
||||
it = memdb.IterWithFlags(nil, nil)
|
||||
if !it.Valid() {
|
||||
return errors.New("invalid iterator")
|
||||
}
|
||||
startKey := it.Key()
|
||||
if len(txn.committer.pipelinedCommitInfo.pipelinedStart) == 0 || bytes.Compare(txn.committer.pipelinedCommitInfo.pipelinedStart, startKey) > 0 {
|
||||
txn.committer.pipelinedCommitInfo.pipelinedStart = make([]byte, len(startKey))
|
||||
copy(txn.committer.pipelinedCommitInfo.pipelinedStart, startKey)
|
||||
}
|
||||
it.Close()
|
||||
// upper bound
|
||||
it = memdb.IterReverseWithFlags(nil)
|
||||
if !it.Valid() {
|
||||
return errors.New("invalid iterator")
|
||||
}
|
||||
endKey := it.Key()
|
||||
if len(txn.committer.pipelinedCommitInfo.pipelinedEnd) == 0 || bytes.Compare(txn.committer.pipelinedCommitInfo.pipelinedEnd, endKey) < 0 {
|
||||
txn.committer.pipelinedCommitInfo.pipelinedEnd = make([]byte, len(endKey))
|
||||
copy(txn.committer.pipelinedCommitInfo.pipelinedEnd, endKey)
|
||||
}
|
||||
it.Close()
|
||||
}
|
||||
// TODO: reuse initKeysAndMutations
|
||||
for it := memdb.IterWithFlags(nil, nil); it.Valid(); err = it.Next() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flags := it.Flags()
|
||||
var value []byte
|
||||
var op kvrpcpb.Op
|
||||
|
||||
if !it.HasValue() {
|
||||
if !flags.HasLocked() {
|
||||
continue
|
||||
}
|
||||
op = kvrpcpb.Op_Lock
|
||||
} else {
|
||||
value = it.Value()
|
||||
if len(value) > 0 {
|
||||
op = kvrpcpb.Op_Put
|
||||
if flags.HasPresumeKeyNotExists() {
|
||||
op = kvrpcpb.Op_Insert
|
||||
}
|
||||
} else {
|
||||
if flags.HasPresumeKeyNotExists() {
|
||||
// delete-your-writes keys in optimistic txn need check not exists in prewrite-phase
|
||||
// due to `Op_CheckNotExists` doesn't prewrite lock, so mark those keys should not be used in commit-phase.
|
||||
op = kvrpcpb.Op_CheckNotExists
|
||||
} else {
|
||||
if flags.HasNewlyInserted() {
|
||||
// The delete-your-write keys in pessimistic transactions, only lock needed keys and skip
|
||||
// other deletes for example the secondary index delete.
|
||||
// Here if `tidb_constraint_check_in_place` is enabled and the transaction is in optimistic mode,
|
||||
// the logic is same as the pessimistic mode.
|
||||
if flags.HasLocked() {
|
||||
op = kvrpcpb.Op_Lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
op = kvrpcpb.Op_Del
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(txn.committer.primaryKey) == 0 && op != kvrpcpb.Op_CheckNotExists {
|
||||
pk := it.Key()
|
||||
txn.committer.primaryKey = make([]byte, len(pk))
|
||||
// copy the primary key to avoid reference to the memory arena.
|
||||
copy(txn.committer.primaryKey, pk)
|
||||
txn.committer.pipelinedCommitInfo.primaryOp = op
|
||||
}
|
||||
|
||||
mustExist, mustNotExist := flags.HasAssertExist(), flags.HasAssertNotExist()
|
||||
if txn.assertionLevel == kvrpcpb.AssertionLevel_Off {
|
||||
mustExist, mustNotExist = false, false
|
||||
}
|
||||
mutations.Push(op, false, mustExist, mustNotExist, flags.HasNeedConstraintCheckInPrewrite(), it.Handle())
|
||||
}
|
||||
return txn.committer.pipelinedFlushMutations(bo, mutations, generation)
|
||||
})
|
||||
txn.us = unionstore.NewUnionStore(pipelinedMemDB, txn.snapshot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCasualConsistency returns if the transaction allows linearizability
|
||||
// inconsistency.
|
||||
func (txn *KVTxn) IsCasualConsistency() bool {
|
||||
|
|
@ -467,22 +621,26 @@ func (txn *KVTxn) Commit(ctx context.Context) error {
|
|||
txn.committer = committer
|
||||
}
|
||||
|
||||
txn.committer.SetDiskFullOpt(txn.diskFullOpt)
|
||||
txn.committer.SetTxnSource(txn.txnSource)
|
||||
committer.SetDiskFullOpt(txn.diskFullOpt)
|
||||
committer.SetTxnSource(txn.txnSource)
|
||||
txn.committer.forUpdateTSConstraints = txn.forUpdateTSChecks
|
||||
|
||||
defer committer.ttlManager.close()
|
||||
|
||||
initRegion := trace.StartRegion(ctx, "InitKeys")
|
||||
err = committer.initKeysAndMutations(ctx)
|
||||
initRegion.End()
|
||||
if !txn.isPipelined {
|
||||
initRegion := trace.StartRegion(ctx, "InitKeys")
|
||||
err = committer.initKeysAndMutations(ctx)
|
||||
initRegion.End()
|
||||
} else if !txn.GetMemBuffer().Dirty() {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
if txn.IsPessimistic() {
|
||||
txn.asyncPessimisticRollback(ctx, committer.mutations.GetKeys(), txn.committer.forUpdateTS)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if committer.mutations.Len() == 0 {
|
||||
if !txn.isPipelined && committer.mutations.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -583,6 +741,9 @@ func (txn *KVTxn) Rollback() error {
|
|||
logutil.BgLogger().Error(err.Error())
|
||||
}
|
||||
}
|
||||
if txn.IsPipelined() && txn.committer != nil {
|
||||
txn.committer.ttlManager.close()
|
||||
}
|
||||
txn.close()
|
||||
logutil.BgLogger().Debug("[kv] rollback txn", zap.Uint64("txnStartTS", txn.StartTS()))
|
||||
if txn.isInternal() {
|
||||
|
|
@ -611,7 +772,7 @@ func (txn *KVTxn) rollbackPessimisticLocks() error {
|
|||
|
||||
func (txn *KVTxn) collectLockedKeys() [][]byte {
|
||||
keys := make([][]byte, 0, txn.lockedCnt)
|
||||
buf := txn.GetMemBuffer()
|
||||
buf := txn.GetMemBuffer().GetMemDB()
|
||||
var err error
|
||||
for it := buf.IterWithFlags(nil, nil); it.Valid(); err = it.Next() {
|
||||
_ = err
|
||||
|
|
@ -1410,18 +1571,13 @@ func (txn *KVTxn) Size() int {
|
|||
return txn.us.GetMemBuffer().Size()
|
||||
}
|
||||
|
||||
// Reset reset the Transaction to initial states.
|
||||
func (txn *KVTxn) Reset() {
|
||||
txn.us.GetMemBuffer().Reset()
|
||||
}
|
||||
|
||||
// GetUnionStore returns the UnionStore binding to this transaction.
|
||||
func (txn *KVTxn) GetUnionStore() *unionstore.KVUnionStore {
|
||||
return txn.us
|
||||
}
|
||||
|
||||
// GetMemBuffer return the MemBuffer binding to this transaction.
|
||||
func (txn *KVTxn) GetMemBuffer() *unionstore.MemDB {
|
||||
func (txn *KVTxn) GetMemBuffer() unionstore.MemBuffer {
|
||||
return txn.us.GetMemBuffer()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -153,6 +153,7 @@ type KVSnapshot struct {
|
|||
}
|
||||
sampleStep uint32
|
||||
*util.RequestSource
|
||||
isPipelined bool
|
||||
}
|
||||
|
||||
// NewTiKVSnapshot creates a snapshot of an TiKV store.
|
||||
|
|
@ -200,10 +201,20 @@ func (s *KVSnapshot) IsInternal() bool {
|
|||
// The map will not contain nonexistent keys.
|
||||
// NOTE: Don't modify keys. Some codes rely on the order of keys.
|
||||
func (s *KVSnapshot) BatchGet(ctx context.Context, keys [][]byte) (map[string][]byte, error) {
|
||||
return s.BatchGetWithTier(ctx, keys, BatchGetSnapshotTier)
|
||||
}
|
||||
|
||||
const (
|
||||
BatchGetSnapshotTier = 1 << iota
|
||||
BatchGetBufferTier
|
||||
)
|
||||
|
||||
// BatchGetWithTier gets all the keys' value from kv-server with given tier and returns a map contains key/value pairs.
|
||||
func (s *KVSnapshot) BatchGetWithTier(ctx context.Context, keys [][]byte, readTier int) (map[string][]byte, error) {
|
||||
// Check the cached value first.
|
||||
m := make(map[string][]byte)
|
||||
s.mu.RLock()
|
||||
if s.mu.cached != nil {
|
||||
if s.mu.cached != nil && readTier == BatchGetSnapshotTier {
|
||||
tmp := make([][]byte, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
if val, ok := s.mu.cached[string(key)]; ok {
|
||||
|
|
@ -238,7 +249,7 @@ func (s *KVSnapshot) BatchGet(ctx context.Context, keys [][]byte) (map[string][]
|
|||
s.mu.RUnlock()
|
||||
// Create a map to collect key-values from region servers.
|
||||
var mu sync.Mutex
|
||||
err := s.batchGetKeysByRegions(bo, keys, func(k, v []byte) {
|
||||
err := s.batchGetKeysByRegions(bo, keys, readTier, func(k, v []byte) {
|
||||
if len(v) == 0 {
|
||||
return
|
||||
}
|
||||
|
|
@ -257,6 +268,10 @@ func (s *KVSnapshot) BatchGet(ctx context.Context, keys [][]byte) (map[string][]
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if readTier != BatchGetSnapshotTier {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Update the cache.
|
||||
s.mu.Lock()
|
||||
if s.mu.cached == nil {
|
||||
|
|
@ -323,7 +338,7 @@ func appendBatchKeysBySize(b []batchKeys, region locate.RegionVerID, keys [][]by
|
|||
return b
|
||||
}
|
||||
|
||||
func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, collectF func(k, v []byte)) error {
|
||||
func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, readTier int, collectF func(k, v []byte)) error {
|
||||
defer func(start time.Time) {
|
||||
if s.IsInternal() {
|
||||
metrics.TxnCmdHistogramWithBatchGetInternal.Observe(time.Since(start).Seconds())
|
||||
|
|
@ -351,7 +366,7 @@ func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, c
|
|||
return nil
|
||||
}
|
||||
if len(batches) == 1 {
|
||||
return s.batchGetSingleRegion(bo, batches[0], collectF)
|
||||
return s.batchGetSingleRegion(bo, batches[0], readTier, collectF)
|
||||
}
|
||||
ch := make(chan error)
|
||||
for _, batch1 := range batches {
|
||||
|
|
@ -359,12 +374,12 @@ func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, c
|
|||
go func() {
|
||||
backoffer, cancel := bo.Fork()
|
||||
defer cancel()
|
||||
ch <- s.batchGetSingleRegion(backoffer, batch, collectF)
|
||||
ch <- s.batchGetSingleRegion(backoffer, batch, readTier, collectF)
|
||||
}()
|
||||
}
|
||||
for i := 0; i < len(batches); i++ {
|
||||
if e := <-ch; e != nil {
|
||||
logutil.BgLogger().Debug("snapshot batchGet failed",
|
||||
logutil.BgLogger().Debug("snapshot BatchGetWithTier failed",
|
||||
zap.Error(e),
|
||||
zap.Uint64("txnStartTS", s.version))
|
||||
err = errors.WithStack(e)
|
||||
|
|
@ -373,7 +388,40 @@ func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, c
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys, collectF func(k, v []byte)) error {
|
||||
func (s *KVSnapshot) buildBatchGetRequest(keys [][]byte, busyThresholdMs int64, readTier int) (*tikvrpc.Request, error) {
|
||||
ctx := kvrpcpb.Context{
|
||||
Priority: s.priority.ToPB(),
|
||||
NotFillCache: s.notFillCache,
|
||||
TaskId: s.mu.taskID,
|
||||
ResourceGroupTag: s.mu.resourceGroupTag,
|
||||
IsolationLevel: s.isolationLevel.ToPB(),
|
||||
ResourceControlContext: &kvrpcpb.ResourceControlContext{
|
||||
ResourceGroupName: s.mu.resourceGroupName,
|
||||
},
|
||||
BusyThresholdMs: uint32(busyThresholdMs),
|
||||
}
|
||||
switch readTier {
|
||||
case BatchGetSnapshotTier:
|
||||
req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdBatchGet, &kvrpcpb.BatchGetRequest{
|
||||
Keys: keys,
|
||||
Version: s.version,
|
||||
}, s.mu.replicaRead, &s.replicaReadSeed, ctx)
|
||||
return req, nil
|
||||
case BatchGetBufferTier:
|
||||
if !s.isPipelined {
|
||||
return nil, errors.New("only snapshot with pipelined dml can read from buffer")
|
||||
}
|
||||
req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdBufferBatchGet, &kvrpcpb.BufferBatchGetRequest{
|
||||
Keys: keys,
|
||||
Version: s.version,
|
||||
}, s.mu.replicaRead, &s.replicaReadSeed, ctx)
|
||||
return req, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown read tier %d", readTier)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys, readTier int, collectF func(k, v []byte)) error {
|
||||
cli := NewClientHelper(s.store, &s.resolvedLocks, &s.committedLocks, false)
|
||||
s.mu.RLock()
|
||||
if s.mu.stats != nil {
|
||||
|
|
@ -393,20 +441,10 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys,
|
|||
var readType string
|
||||
for {
|
||||
s.mu.RLock()
|
||||
req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdBatchGet, &kvrpcpb.BatchGetRequest{
|
||||
Keys: pending,
|
||||
Version: s.version,
|
||||
}, s.mu.replicaRead, &s.replicaReadSeed, kvrpcpb.Context{
|
||||
Priority: s.priority.ToPB(),
|
||||
NotFillCache: s.notFillCache,
|
||||
TaskId: s.mu.taskID,
|
||||
ResourceGroupTag: s.mu.resourceGroupTag,
|
||||
IsolationLevel: s.isolationLevel.ToPB(),
|
||||
ResourceControlContext: &kvrpcpb.ResourceControlContext{
|
||||
ResourceGroupName: s.mu.resourceGroupName,
|
||||
},
|
||||
BusyThresholdMs: uint32(busyThresholdMs),
|
||||
})
|
||||
req, err := s.buildBatchGetRequest(pending, busyThresholdMs, readTier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.InputRequestSource = s.GetRequestSource()
|
||||
if readType != "" {
|
||||
req.ReadType = readType
|
||||
|
|
@ -467,17 +505,32 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys,
|
|||
if same {
|
||||
continue
|
||||
}
|
||||
return s.batchGetKeysByRegions(bo, pending, collectF)
|
||||
return s.batchGetKeysByRegions(bo, pending, readTier, collectF)
|
||||
}
|
||||
if resp.Resp == nil {
|
||||
return errors.WithStack(tikverr.ErrBodyMissing)
|
||||
}
|
||||
batchGetResp := resp.Resp.(*kvrpcpb.BatchGetResponse)
|
||||
var (
|
||||
lockedKeys [][]byte
|
||||
locks []*txnlock.Lock
|
||||
|
||||
keyErr *kvrpcpb.KeyError
|
||||
pairs []*kvrpcpb.KvPair
|
||||
details *kvrpcpb.ExecDetailsV2
|
||||
)
|
||||
if keyErr := batchGetResp.GetError(); keyErr != nil {
|
||||
switch v := resp.Resp.(type) {
|
||||
case *kvrpcpb.BatchGetResponse:
|
||||
keyErr = v.GetError()
|
||||
pairs = v.Pairs
|
||||
details = v.GetExecDetailsV2()
|
||||
case *kvrpcpb.BufferBatchGetResponse:
|
||||
keyErr = v.GetError()
|
||||
pairs = v.Pairs
|
||||
details = v.GetExecDetailsV2()
|
||||
default:
|
||||
return errors.Errorf("unknown response %T", v)
|
||||
}
|
||||
if keyErr != nil {
|
||||
// If a response-level error happens, skip reading pairs.
|
||||
lock, err := txnlock.ExtractLockFromKeyErr(keyErr)
|
||||
if err != nil {
|
||||
|
|
@ -486,7 +539,7 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys,
|
|||
lockedKeys = append(lockedKeys, lock.Key)
|
||||
locks = append(locks, lock)
|
||||
} else {
|
||||
for _, pair := range batchGetResp.Pairs {
|
||||
for _, pair := range pairs {
|
||||
keyErr := pair.GetError()
|
||||
if keyErr == nil {
|
||||
collectF(pair.GetKey(), pair.GetValue())
|
||||
|
|
@ -500,17 +553,17 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys,
|
|||
locks = append(locks, lock)
|
||||
}
|
||||
}
|
||||
if batchGetResp.ExecDetailsV2 != nil {
|
||||
readKeys := len(batchGetResp.Pairs)
|
||||
if details != nil {
|
||||
readKeys := len(pairs)
|
||||
var readTime float64
|
||||
if timeDetail := batchGetResp.ExecDetailsV2.GetTimeDetailV2(); timeDetail != nil {
|
||||
if timeDetail := details.GetTimeDetailV2(); timeDetail != nil {
|
||||
readTime = float64(timeDetail.GetKvReadWallTimeNs()) / 1000000000.
|
||||
} else if timeDetail := batchGetResp.ExecDetailsV2.GetTimeDetail(); timeDetail != nil {
|
||||
} else if timeDetail := details.GetTimeDetail(); timeDetail != nil {
|
||||
readTime = float64(timeDetail.GetKvReadWallTimeMs()) / 1000.
|
||||
}
|
||||
readSize := float64(batchGetResp.ExecDetailsV2.GetScanDetailV2().GetProcessedVersionsSize())
|
||||
readSize := float64(details.GetScanDetailV2().GetProcessedVersionsSize())
|
||||
metrics.ObserveReadSLI(uint64(readKeys), readTime, readSize)
|
||||
s.mergeExecDetail(batchGetResp.ExecDetailsV2)
|
||||
s.mergeExecDetail(details)
|
||||
}
|
||||
if len(lockedKeys) > 0 {
|
||||
if resolvingRecordToken == nil {
|
||||
|
|
@ -536,14 +589,14 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys,
|
|||
return err
|
||||
}
|
||||
if msBeforeExpired > 0 {
|
||||
err = bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.Errorf("batchGet lockedKeys: %d", len(lockedKeys)))
|
||||
err = bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.Errorf("BatchGetWithTier lockedKeys: %d", len(lockedKeys)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Only reduce pending keys when there is no response-level error. Otherwise,
|
||||
// lockedKeys may be incomplete.
|
||||
if batchGetResp.GetError() == nil {
|
||||
if keyErr == nil {
|
||||
pending = lockedKeys
|
||||
}
|
||||
continue
|
||||
|
|
@ -1025,6 +1078,17 @@ func (s *KVSnapshot) getResolveLockDetail() *util.ResolveLockDetail {
|
|||
return s.mu.stats.resolveLockDetail
|
||||
}
|
||||
|
||||
// SetPipelined sets the snapshot to pipelined mode.
|
||||
func (s *KVSnapshot) SetPipelined(ts uint64) {
|
||||
s.isPipelined = true
|
||||
// In pipelined mode, some locks are flushed into stores during the execution.
|
||||
// If a read request encounters these pipelined locks, it'll be a situation where lock.ts == start_ts.
|
||||
// In order to allow the snapshot to proceed normally, we need to skip these locks.
|
||||
// Otherwise, the transaction will attempt to resolve its own lock, leading to a mutual wait with the primary key TTL.
|
||||
// Currently, we skip these locks by resolvedLocks mechanism.
|
||||
s.resolvedLocks.Put(ts)
|
||||
}
|
||||
|
||||
// SnapshotRuntimeStats records the runtime stats of snapshot.
|
||||
type SnapshotRuntimeStats struct {
|
||||
rpcStats locate.RegionRequestRuntimeStats
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func (s SnapshotProbe) FormatStats() string {
|
|||
|
||||
// BatchGetSingleRegion gets a batch of keys from a region.
|
||||
func (s SnapshotProbe) BatchGetSingleRegion(bo *retry.Backoffer, region locate.RegionVerID, keys [][]byte, collectF func(k, v []byte)) error {
|
||||
return s.batchGetSingleRegion(bo, batchKeys{region: region, keys: keys}, collectF)
|
||||
return s.batchGetSingleRegion(bo, batchKeys{region: region, keys: keys}, BatchGetSnapshotTier, collectF)
|
||||
}
|
||||
|
||||
// NewScanner returns a scanner to iterate given key range.
|
||||
|
|
|
|||
|
|
@ -221,6 +221,10 @@ func (cd *CommitDetails) MergeCommitReqDetails(reqDuration time.Duration, region
|
|||
}
|
||||
}
|
||||
|
||||
func (cd *CommitDetails) MergeFlushReqDetails(reqDuration time.Duration, regionID uint64, addr string, execDetails *kvrpcpb.ExecDetailsV2) {
|
||||
// leave it empty for now
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of itself.
|
||||
func (cd *CommitDetails) Clone() *CommitDetails {
|
||||
commit := &CommitDetails{
|
||||
|
|
|
|||
Loading…
Reference in New Issue