mirror of https://github.com/tikv/client-rust.git
Update kvproto (#323)
* update proto Signed-off-by: andylokandy <andylokandy@hotmail.com> * fix text Signed-off-by: andylokandy <andylokandy@hotmail.com>
This commit is contained in:
parent
b1399f369a
commit
c045d1e6bd
|
@ -182,17 +182,7 @@ mod test {
|
|||
|_: &dyn Any| {
|
||||
Ok(Box::new(kvrpcpb::CommitResponse {
|
||||
region_error: None,
|
||||
error: Some(kvrpcpb::KeyError {
|
||||
locked: None,
|
||||
retryable: String::new(),
|
||||
abort: String::new(),
|
||||
conflict: None,
|
||||
already_exist: None,
|
||||
deadlock: None,
|
||||
commit_ts_expired: None,
|
||||
txn_not_found: None,
|
||||
commit_ts_too_large: None,
|
||||
}),
|
||||
error: Some(kvrpcpb::KeyError::default()),
|
||||
commit_version: 0,
|
||||
}) as Box<dyn Any>)
|
||||
},
|
||||
|
|
|
@ -3,7 +3,7 @@ package backup;
|
|||
|
||||
import "kvrpcpb.proto";
|
||||
import "errorpb.proto";
|
||||
|
||||
import "encryptionpb.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "rustproto.proto";
|
||||
|
||||
|
@ -19,11 +19,19 @@ message BackupMeta {
|
|||
// ID and version of backuped cluster.
|
||||
uint64 cluster_id = 1;
|
||||
string cluster_version = 2;
|
||||
// Save the version of BR running backup jobs.
|
||||
string br_version = 11;
|
||||
// The backupmeta scheme version.
|
||||
int32 version = 12;
|
||||
|
||||
// path field is no longer used.
|
||||
reserved 3; reserved "path";
|
||||
// A set of files that compose a backup.
|
||||
// Note: `files` is deprecated, as it bloats backupmeta. It is kept for
|
||||
// compatibility, so new BR can restore older backups.
|
||||
repeated File files = 4;
|
||||
// An index to files contains data files.
|
||||
MetaFile file_index = 13;
|
||||
|
||||
// A pair of timestamp specifies a time range of a backup.
|
||||
// For full backup, the start_version equals to the end_version,
|
||||
|
@ -33,19 +41,34 @@ message BackupMeta {
|
|||
uint64 start_version = 5;
|
||||
uint64 end_version = 6;
|
||||
|
||||
// Additional metadata describes database and table info.
|
||||
// Table metadata describes database and table info.
|
||||
// Note: `schemas` is deprecated, as it bloats backupmeta. It is kept for
|
||||
// compatibility, so new BR can restore older backups.
|
||||
repeated Schema schemas = 7;
|
||||
// An index to files contains Schemas.
|
||||
MetaFile schema_index = 14;
|
||||
|
||||
// If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be ignored, and the
|
||||
// backup data's range is represented by raw_ranges.
|
||||
// If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be
|
||||
// ignored, and the backup data's range is represented by raw_ranges.
|
||||
bool is_raw_kv = 8;
|
||||
// Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
|
||||
// compatibility, so new BR can restore older backups.
|
||||
repeated RawRange raw_ranges = 9;
|
||||
// An index to files contains RawRanges.
|
||||
MetaFile raw_range_index = 15;
|
||||
|
||||
// In incremental backup, DDLs which are completed in (lastBackupTS, backupTS] will be stored here.
|
||||
// In incremental backup, DDLs which are completed in
|
||||
// (lastBackupTS, backupTS] will be stored here.
|
||||
// Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
|
||||
// compatibility, so new BR can restore older backups.
|
||||
bytes ddls = 10;
|
||||
// An index to files contains DDLs.
|
||||
MetaFile ddl_indexes = 16;
|
||||
// the backup result into `backupmeta` file
|
||||
string backup_result = 17;
|
||||
|
||||
// Save the version of BR running backup jobs.
|
||||
string br_version = 11;
|
||||
// API version implies the encode of the key and value.
|
||||
kvrpcpb.APIVersion api_version = 18;
|
||||
}
|
||||
|
||||
message File {
|
||||
|
@ -64,6 +87,23 @@ message File {
|
|||
string cf = 10;
|
||||
|
||||
uint64 size = 11;
|
||||
// cipher_iv is used for AES cipher
|
||||
bytes cipher_iv = 12;
|
||||
}
|
||||
|
||||
// MetaFile describes a multi-level index of data used in backup.
|
||||
message MetaFile {
|
||||
// A set of files that contains a MetaFile.
|
||||
// It is used as a multi-level index.
|
||||
repeated File meta_files = 1;
|
||||
// A set of files that contains user data.
|
||||
repeated File data_files = 2;
|
||||
// A set of files that contains Schemas.
|
||||
repeated Schema schemas = 3;
|
||||
// A set of files that contains RawRanges.
|
||||
repeated RawRange raw_ranges = 4;
|
||||
// A set of files that contains DDLs.
|
||||
repeated bytes ddls = 5;
|
||||
}
|
||||
|
||||
message Schema {
|
||||
|
@ -109,6 +149,11 @@ enum CompressionType {
|
|||
ZSTD = 3;
|
||||
}
|
||||
|
||||
message CipherInfo {
|
||||
encryptionpb.EncryptionMethod cipher_type = 1;
|
||||
bytes cipher_key = 2;
|
||||
}
|
||||
|
||||
message BackupRequest {
|
||||
uint64 cluster_id = 1;
|
||||
|
||||
|
@ -135,6 +180,8 @@ message BackupRequest {
|
|||
CompressionType compression_type = 12;
|
||||
// sst compression level, some algorithms support negative compression levels
|
||||
int32 compression_level = 13;
|
||||
// The cipher_info is Used to encrypt sst
|
||||
CipherInfo cipher_info = 14;
|
||||
}
|
||||
|
||||
message StorageBackend {
|
||||
|
@ -144,6 +191,8 @@ message StorageBackend {
|
|||
S3 s3 = 3;
|
||||
GCS gcs = 4;
|
||||
CloudDynamic cloud_dynamic = 5;
|
||||
HDFS hdfs = 6;
|
||||
AzureBlobStorage azure_blob_storage = 7;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,6 +237,29 @@ message GCS {
|
|||
string credentials_blob = 6;
|
||||
}
|
||||
|
||||
// AzureBlobStorage storage backend saves files into azure blob storage.
|
||||
message AzureBlobStorage {
|
||||
string endpoint = 1;
|
||||
// Alias: container
|
||||
string bucket = 2;
|
||||
// Notice: prefix starts without `/`, otherwise the first directory's name is empty.
|
||||
string prefix = 3;
|
||||
// Alias: access_tier.
|
||||
// See https://docs.microsoft.com/en-us/azure/storage/blobs/access-tiers-overview
|
||||
string storage_class = 4;
|
||||
|
||||
// if empty, try to read account_name from the node's environment variable $AZURE_STORAGE_ACCOUNT.
|
||||
string account_name = 5;
|
||||
// Use shared key to access the azure blob
|
||||
// If the node's environment variables($AZURE_CLIENT_ID, $AZURE_TENANT_ID, $AZURE_CLIENT_SECRET) exist,
|
||||
// prefer to use token to access the azure blob.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/azure/storage/common/identity-library-acquire-token?toc=/azure/storage/blobs/toc.json
|
||||
//
|
||||
// Otherwise, if empty, try to read shared key from the node's environment variable $AZURE_STORAGE_KEY.
|
||||
string shared_key = 6;
|
||||
}
|
||||
|
||||
message Bucket {
|
||||
string endpoint = 1;
|
||||
string region = 3;
|
||||
|
@ -199,10 +271,16 @@ message Bucket {
|
|||
// CloudDynamic allows testing new cloud providers and new fields without changing protobuf definitions
|
||||
message CloudDynamic {
|
||||
Bucket bucket = 1;
|
||||
string provider_name = 2; // s3 and gcs are supported
|
||||
string provider_name = 2; // s3, gcs and azureBlobStorage are supported
|
||||
map<string, string> attrs = 3;
|
||||
}
|
||||
|
||||
// HDFS storage backend saves file into HDFS compatible storages
|
||||
message HDFS {
|
||||
// a URL: hdfs:///some/path or hdfs://host:port/some/path
|
||||
string remote = 1;
|
||||
}
|
||||
|
||||
|
||||
message BackupResponse {
|
||||
Error error = 1;
|
|
@ -29,12 +29,22 @@ message Compatibility {
|
|||
string required_version = 1;
|
||||
}
|
||||
|
||||
// ClusterIDMismatch is an error variable that
|
||||
// tells people that the cluster ID of the request does not match the TiKV cluster ID.
|
||||
message ClusterIDMismatch {
|
||||
// The current tikv cluster ID.
|
||||
uint64 current = 1;
|
||||
// The cluster ID of the TiCDC request.
|
||||
uint64 request = 2;
|
||||
}
|
||||
|
||||
message Error {
|
||||
errorpb.NotLeader not_leader = 1;
|
||||
errorpb.RegionNotFound region_not_found = 2;
|
||||
errorpb.EpochNotMatch epoch_not_match = 3;
|
||||
DuplicateRequest duplicate_request = 4;
|
||||
Compatibility compatibility = 5;
|
||||
ClusterIDMismatch cluster_id_mismatch = 6;
|
||||
}
|
||||
|
||||
message TxnInfo {
|
||||
|
|
|
@ -36,6 +36,8 @@ message Request {
|
|||
// Any schema-ful storage to validate schema correctness if necessary.
|
||||
int64 schema_ver = 8;
|
||||
bool is_trace_enabled = 9;
|
||||
// paging_size is 0 when it's disabled, otherwise, it should be a positive number.
|
||||
uint64 paging_size = 10;
|
||||
}
|
||||
|
||||
message Response {
|
||||
|
|
|
@ -70,6 +70,9 @@ service Debug {
|
|||
|
||||
// Get cluster ID
|
||||
rpc GetClusterInfo(GetClusterInfoRequest) returns (GetClusterInfoResponse) {}
|
||||
|
||||
// Get all region IDs in the store
|
||||
rpc GetAllRegionsInStore(GetAllRegionsInStoreRequest) returns (GetAllRegionsInStoreResponse) {}
|
||||
}
|
||||
|
||||
enum DB {
|
||||
|
@ -240,6 +243,7 @@ message GetStoreInfoRequest {
|
|||
|
||||
message GetStoreInfoResponse {
|
||||
uint64 store_id = 1;
|
||||
kvrpcpb.APIVersion api_version = 2;
|
||||
}
|
||||
|
||||
message GetClusterInfoRequest {
|
||||
|
@ -248,3 +252,10 @@ message GetClusterInfoRequest {
|
|||
message GetClusterInfoResponse {
|
||||
uint64 cluster_id = 1;
|
||||
}
|
||||
|
||||
message GetAllRegionsInStoreRequest {
|
||||
}
|
||||
|
||||
message GetAllRegionsInStoreResponse {
|
||||
repeated uint64 regions = 1;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
syntax = "proto3";
|
||||
package disk_usage;
|
||||
|
||||
import "rustproto.proto";
|
||||
|
||||
option (rustproto.lite_runtime_all) = true;
|
||||
|
||||
option java_package = "org.tikv.kvproto";
|
||||
|
||||
enum DiskUsage {
|
||||
Normal = 0;
|
||||
AlmostFull = 1;
|
||||
AlreadyFull = 2;
|
||||
}
|
|
@ -21,6 +21,13 @@ message NotLeader {
|
|||
metapb.Peer leader = 2;
|
||||
}
|
||||
|
||||
message DiskFull {
|
||||
// The requested store ID
|
||||
repeated uint64 store_id = 1;
|
||||
// The detailed info
|
||||
string reason = 2;
|
||||
}
|
||||
|
||||
// StoreNotMatch is the error variant that tells the request is sent to wrong store.
|
||||
// (i.e. inconsistency of the store ID that request shows and the real store ID of this server.)
|
||||
message StoreNotMatch {
|
||||
|
@ -37,6 +44,13 @@ message RegionNotFound {
|
|||
uint64 region_id = 1;
|
||||
}
|
||||
|
||||
// RegionNotInitialized is the error variant that tells there isn't any initialized peer
|
||||
// matchesthe request region ID.
|
||||
message RegionNotInitialized {
|
||||
// The request region ID
|
||||
uint64 region_id = 1;
|
||||
}
|
||||
|
||||
// KeyNotInRegion is the error variant that tells the key the request requires isn't present in
|
||||
// this region.
|
||||
message KeyNotInRegion {
|
||||
|
@ -132,4 +146,6 @@ message Error {
|
|||
ReadIndexNotReady read_index_not_ready = 11;
|
||||
ProposalInMergingMode proposal_in_merging_mode = 12;
|
||||
DataIsNotReady data_is_not_ready = 13;
|
||||
RegionNotInitialized region_not_initialized = 14;
|
||||
DiskFull disk_full = 15;
|
||||
}
|
||||
|
|
|
@ -5,9 +5,10 @@ package import_sstpb;
|
|||
import "metapb.proto";
|
||||
import "errorpb.proto";
|
||||
import "kvrpcpb.proto";
|
||||
import "raft_serverpb.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "rustproto.proto";
|
||||
import "backup.proto";
|
||||
import "brpb.proto";
|
||||
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
|
@ -48,9 +49,13 @@ service ImportSST {
|
|||
|
||||
// Open a write stream to generate sst files
|
||||
rpc Write(stream WriteRequest) returns (WriteResponse) {}
|
||||
rpc RawWrite(stream RawWriteRequest) returns (RawWriteResponse) {}
|
||||
|
||||
// Ingest Multiple files in one request
|
||||
rpc MultiIngest(MultiIngestRequest) returns (IngestResponse) {}
|
||||
|
||||
// Collect duplicate data from TiKV.
|
||||
rpc DuplicateDetect(DuplicateDetectRequest) returns (stream DuplicateDetectResponse) {}
|
||||
}
|
||||
|
||||
enum SwitchMode {
|
||||
|
@ -79,11 +84,14 @@ message SSTMeta {
|
|||
uint64 region_id = 6;
|
||||
metapb.RegionEpoch region_epoch = 7;
|
||||
bool end_key_exclusive = 8;
|
||||
|
||||
// total_kvs and total_bytes is equivalent to PD's approximate_keys and approximate_size
|
||||
// set these values can save time from tikv upload keys and size to PD through Heartbeat.
|
||||
uint64 total_kvs = 9;
|
||||
uint64 total_bytes = 10;
|
||||
// API version implies the encode of the key and value.
|
||||
kvrpcpb.APIVersion api_version = 11;
|
||||
// cipher_iv is used to encrypt/decrypt sst
|
||||
bytes cipher_iv = 12;
|
||||
}
|
||||
|
||||
// A rewrite rule is applied on the *encoded* keys (the internal storage
|
||||
|
@ -160,6 +168,8 @@ message DownloadRequest {
|
|||
backup.StorageBackend storage_backend = 14;
|
||||
|
||||
bool is_raw_kv = 15;
|
||||
// cipher_info is used to decrypt sst when download sst
|
||||
backup.CipherInfo cipher_info = 16;
|
||||
}
|
||||
|
||||
// For now it is just used for distinguishing the error of the request with the error
|
||||
|
@ -221,3 +231,52 @@ message WriteResponse {
|
|||
Error error = 1;
|
||||
repeated SSTMeta metas = 2;
|
||||
}
|
||||
|
||||
message RawWriteBatch {
|
||||
uint64 ttl = 1;
|
||||
repeated Pair pairs = 2;
|
||||
}
|
||||
|
||||
message RawWriteRequest {
|
||||
oneof chunk {
|
||||
SSTMeta meta = 1;
|
||||
RawWriteBatch batch = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message RawWriteResponse {
|
||||
Error error = 1;
|
||||
repeated SSTMeta metas = 2;
|
||||
}
|
||||
|
||||
message DuplicateDetectRequest {
|
||||
kvrpcpb.Context context = 1;
|
||||
bytes start_key = 2;
|
||||
bytes end_key = 3;
|
||||
// Return only the keys found by scanning, not their values.
|
||||
bool key_only = 4;
|
||||
// We only check the data whose timestamp is larger than `min_commit_ts`. `min_commit_ts` is exclueded.
|
||||
uint64 min_commit_ts = 5;
|
||||
}
|
||||
|
||||
message KvPair {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
uint64 commit_ts = 3;
|
||||
}
|
||||
|
||||
message DuplicateDetectResponse {
|
||||
errorpb.Error region_error = 1;
|
||||
Error key_error = 2;
|
||||
// The these keys will be in asc order (but commit time is in desc order),
|
||||
// and the content is just like following:
|
||||
// [
|
||||
// {key: "key1", value: "value11", commit_ts: 1005},
|
||||
// {key: "key1", value: "value12", commit_ts: 1004},
|
||||
// {key: "key1", value: "value13", commit_ts: 1001},
|
||||
// {key: "key2", value: "value21", commit_ts: 1004},
|
||||
// {key: "key2", value: "value22", commit_ts: 1002},
|
||||
// ...
|
||||
// ]
|
||||
repeated KvPair pairs = 3;
|
||||
}
|
||||
|
|
|
@ -106,6 +106,8 @@ message PrewriteRequest {
|
|||
// The max commit ts is reserved for limiting the commit ts of 1PC or async commit, which can be used to avoid
|
||||
// inconsistency with schema change.
|
||||
uint64 max_commit_ts = 14;
|
||||
// The level of assertion to use on this prewrte request.
|
||||
AssertionLevel assertion_level = 15;
|
||||
}
|
||||
|
||||
message PrewriteResponse {
|
||||
|
@ -142,7 +144,7 @@ message PessimisticLockRequest {
|
|||
int64 wait_timeout = 8;
|
||||
// If it is true, TiKV will acquire the pessimistic lock regardless of write conflict
|
||||
// and return the latest value. It's only supported for single mutation.
|
||||
bool force = 9;
|
||||
bool force = 9 [deprecated = true];
|
||||
// If it is true, TiKV will return values of the keys if no error, so TiDB can cache the values for
|
||||
// later read in the same transaction.
|
||||
// When 'force' is set to true, this field is ignored.
|
||||
|
@ -150,14 +152,19 @@ message PessimisticLockRequest {
|
|||
// If min_commit_ts > 0, this is large transaction proto, the final commit_ts
|
||||
// would be infered from min_commit_ts.
|
||||
uint64 min_commit_ts = 11;
|
||||
// If set to true, it means TiKV need to check if the key exists, and return the result in
|
||||
// the `not_founds` feild in the response. This works no matter if `return_values` is set. If
|
||||
// `return_values` is set, it simply makes no difference; otherwise, the `value` field of the
|
||||
// repsonse will be empty while the `not_founds` field still indicates the keys' existence.
|
||||
bool check_existence = 12;
|
||||
}
|
||||
|
||||
message PessimisticLockResponse {
|
||||
errorpb.Error region_error = 1;
|
||||
repeated KeyError errors = 2;
|
||||
// It carries the latest value and its commit ts if force in PessimisticLockRequest is true.
|
||||
uint64 commit_ts = 3;
|
||||
bytes value = 4;
|
||||
uint64 commit_ts = 3 [deprecated = true];
|
||||
bytes value = 4 [deprecated = true];
|
||||
// The values is set if 'return_values' is true in the request and no error.
|
||||
// If 'force' is true, this field is not used.
|
||||
repeated bytes values = 5;
|
||||
|
@ -459,7 +466,10 @@ message RawBatchPutRequest {
|
|||
Context context = 1;
|
||||
repeated KvPair pairs = 2;
|
||||
string cf = 3;
|
||||
uint64 ttl = 4;
|
||||
// The time-to-live for each keys in seconds, and if the length of `ttls`
|
||||
// is exactly one, the ttl will be applied to all keys. Otherwise, the length
|
||||
// mismatch between `ttls` and `pairs` will return an error.
|
||||
repeated uint64 ttls = 4;
|
||||
bool for_cas = 5;
|
||||
}
|
||||
|
||||
|
@ -594,6 +604,8 @@ message SplitRegionRequest {
|
|||
Context context = 1;
|
||||
bytes split_key = 2 [deprecated=true];
|
||||
repeated bytes split_keys = 3; // when use it to do batch split, `split_key` should be empty.
|
||||
// Once enabled, the split_key will not be encoded.
|
||||
bool is_raw_kv = 4;
|
||||
}
|
||||
|
||||
message SplitRegionResponse {
|
||||
|
@ -667,6 +679,8 @@ message Context {
|
|||
bool record_scan_stat = 11;
|
||||
|
||||
bool replica_read = 12;
|
||||
// Read requests can ignore locks belonging to these transactions because either
|
||||
// these transactions are rolled back or theirs commit_ts > read request's start_ts.
|
||||
repeated uint64 resolved_locks = 13;
|
||||
uint64 max_execution_duration_ms = 14;
|
||||
|
||||
|
@ -683,6 +697,68 @@ message Context {
|
|||
|
||||
// Any additional serialized information about the request.
|
||||
bytes resource_group_tag = 18;
|
||||
|
||||
// Used to tell TiKV whether operations are allowed or not on different disk usages.
|
||||
DiskFullOpt disk_full_opt = 19;
|
||||
|
||||
// Indicates the request is a retry request and the same request may have been sent before.
|
||||
bool is_retry_request = 20;
|
||||
|
||||
// API version implies the encode of the key and value.
|
||||
APIVersion api_version = 21;
|
||||
|
||||
// Read request should read through locks belonging to these transactions because these
|
||||
// transactions are committed and theirs commit_ts <= read request's start_ts.
|
||||
repeated uint64 committed_locks = 22;
|
||||
}
|
||||
|
||||
// The API version the server and the client is using.
|
||||
// See more details in https://github.com/tikv/rfcs/blob/master/text/0069-api-v2.md.
|
||||
enum APIVersion {
|
||||
// Mainly for TxnKV and not safe to use RawKV along with TxnKV.
|
||||
//
|
||||
// V1 server only accepts V1 requests. Except that the V1 raw requests with TTL
|
||||
// will be rejected.
|
||||
V1 = 0;
|
||||
// Only RawKV is available, and then 8 bytes representing the unix timestamp in
|
||||
// seconds for expiring time will be append to the value of all RawKV kv pairs.
|
||||
//
|
||||
// ------------------------------------------------------------
|
||||
// | User value | Expire Ts |
|
||||
// ------------------------------------------------------------
|
||||
// | 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff |
|
||||
// ------------------------------------------------------------
|
||||
//
|
||||
// V1TTL server only accepts V1 raw requests.
|
||||
// V1 client should not use `V1TTL` in request. V1 client should always send `V1`.
|
||||
V1TTL = 1;
|
||||
// TxnKV keys start with `x{keyspace id}`, `m`, or `t`.
|
||||
//
|
||||
// RawKV keys must be in `default` CF and all start with `r{keyspace id}` prefix,
|
||||
// where the keyspace id is in varint format (little endian), whose bytes expect
|
||||
// the last one always sets the most significant bit to 1.
|
||||
//
|
||||
// The last byte in the raw value must be a meta flag. For example:
|
||||
//
|
||||
// --------------------------------------
|
||||
// | User value | Meta flags |
|
||||
// --------------------------------------
|
||||
// | 0x12 0x34 0x56 | 0x00 (0b00000000) |
|
||||
// --------------------------------------
|
||||
//
|
||||
// As shown in the example below, the least significant bit of the meta flag
|
||||
// indicates whether the value contains 8 bytes expire ts at the very left to the
|
||||
// meta flags.
|
||||
//
|
||||
// --------------------------------------------------------------------------------
|
||||
// | User value | Expire Ts | Meta flags |
|
||||
// --------------------------------------------------------------------------------
|
||||
// | 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff | 0x01 (0b00000001) |
|
||||
// --------------------------------------------------------------------------------
|
||||
//
|
||||
// V2 server accpets V2 requests and V1 txn requests that statrts with TiDB key
|
||||
// prefix (`m` and `t`).
|
||||
V2 = 2;
|
||||
}
|
||||
|
||||
message LockInfo {
|
||||
|
@ -710,6 +786,7 @@ message KeyError {
|
|||
CommitTsExpired commit_ts_expired = 7; // Commit ts is earlier than min commit ts of a transaction.
|
||||
TxnNotFound txn_not_found = 8; // Txn not found when checking txn status.
|
||||
CommitTsTooLarge commit_ts_too_large = 9; // Calculated commit TS exceeds the limit given by the user.
|
||||
AssertionFailed assertion_failed = 10; // Assertion of a `Mutation` is evaluated as a failure.
|
||||
}
|
||||
|
||||
message WriteConflict {
|
||||
|
@ -747,6 +824,14 @@ message CommitTsTooLarge {
|
|||
uint64 commit_ts = 1; // The calculated commit TS.
|
||||
}
|
||||
|
||||
message AssertionFailed {
|
||||
uint64 start_ts = 1;
|
||||
bytes key = 2;
|
||||
Assertion assertion = 3;
|
||||
uint64 existing_start_ts = 4;
|
||||
uint64 existing_commit_ts = 5;
|
||||
}
|
||||
|
||||
enum CommandPri {
|
||||
Normal = 0; // Normal is the default value.
|
||||
Low = 1;
|
||||
|
@ -758,6 +843,13 @@ enum IsolationLevel {
|
|||
RC = 1; // RC = read committed
|
||||
}
|
||||
|
||||
// Operation allowed info during each TiKV storage threshold.
|
||||
enum DiskFullOpt {
|
||||
NotAllowedOnFull = 0; // The default value, means operations are not allowed either under almost full or already full.
|
||||
AllowedOnAlmostFull = 1; // Means operations will be allowed when disk is almost full.
|
||||
AllowedOnAlreadyFull = 2; // Means operations will be allowed when disk is already full.
|
||||
}
|
||||
|
||||
message TimeDetail {
|
||||
// Off-cpu wall time elapsed in TiKV side. Usually this includes queue waiting time and
|
||||
// other kind of waitings in series.
|
||||
|
@ -792,6 +884,10 @@ message ScanDetailV2 {
|
|||
// Selection.
|
||||
uint64 processed_versions = 1;
|
||||
|
||||
// Number of bytes of user key-value pairs scanned from the storage, i.e.
|
||||
// total size of data returned from MVCC layer.
|
||||
uint64 processed_versions_size = 8;
|
||||
|
||||
// Approximate number of MVCC keys meet during scanning. It includes
|
||||
// deleted versions, but does not include RocksDB tombstone keys.
|
||||
//
|
||||
|
@ -860,6 +956,15 @@ enum Assertion {
|
|||
NotExist = 2;
|
||||
}
|
||||
|
||||
enum AssertionLevel {
|
||||
// No assertion.
|
||||
Off = 0;
|
||||
// Assertion is enabled, but not enforced when it might affect performance.
|
||||
Fast = 1;
|
||||
// Assertion is enabled and enforced.
|
||||
Strict = 2;
|
||||
}
|
||||
|
||||
message Mutation {
|
||||
Op op = 1;
|
||||
bytes key = 2;
|
||||
|
@ -872,6 +977,9 @@ message MvccWrite {
|
|||
uint64 start_ts = 2;
|
||||
uint64 commit_ts = 3;
|
||||
bytes short_value = 4;
|
||||
bool has_overlapped_rollback = 5;
|
||||
bool has_gc_fence = 6;
|
||||
uint64 gc_fence = 7;
|
||||
}
|
||||
|
||||
message MvccValue {
|
||||
|
@ -884,6 +992,12 @@ message MvccLock {
|
|||
uint64 start_ts = 2;
|
||||
bytes primary = 3;
|
||||
bytes short_value = 4;
|
||||
uint64 ttl = 5;
|
||||
uint64 for_update_ts = 6;
|
||||
uint64 txn_size = 7;
|
||||
bool use_async_commit = 8;
|
||||
repeated bytes secondaries = 9;
|
||||
repeated uint64 rollback_ts = 10;
|
||||
}
|
||||
|
||||
message MvccInfo {
|
||||
|
@ -1012,3 +1126,21 @@ message RawCoprocessorResponse {
|
|||
string error = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
enum ChecksumAlgorithm {
|
||||
Crc64_Xor = 0;
|
||||
}
|
||||
|
||||
message RawChecksumRequest {
|
||||
Context context = 1;
|
||||
ChecksumAlgorithm algorithm = 2;
|
||||
repeated KeyRange ranges = 3;
|
||||
}
|
||||
|
||||
message RawChecksumResponse {
|
||||
errorpb.Error region_error = 1;
|
||||
string error = 2;
|
||||
uint64 checksum = 3;
|
||||
uint64 total_kvs = 4;
|
||||
uint64 total_bytes = 5;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,13 @@ message TaskMeta {
|
|||
string address = 4; // target address of this task.
|
||||
}
|
||||
|
||||
message IsAliveRequest {
|
||||
}
|
||||
|
||||
message IsAliveResponse {
|
||||
bool available = 1;
|
||||
}
|
||||
|
||||
// Dipsatch the task request to different tiflash servers.
|
||||
message DispatchTaskRequest {
|
||||
TaskMeta meta = 1;
|
||||
|
@ -51,10 +58,12 @@ message EstablishMPPConnectionRequest {
|
|||
TaskMeta receiver_meta = 2; // node closer to the tidb mpp gather.
|
||||
}
|
||||
|
||||
// Data packets wrap tipb.SelectResponse.
|
||||
// when TiFlash sends data to TiDB, Data packets wrap tipb.SelectResponse, i.e., serialize tipb.SelectResponse into data;
|
||||
// when TiFlash sends data to TiFlash, data blocks are serialized into chunks, and the execution_summaries in tipb.SelectResponse are serialized into data only for the last packet.
|
||||
message MPPDataPacket {
|
||||
bytes data = 1;
|
||||
Error error = 2;
|
||||
repeated bytes chunks = 3;
|
||||
}
|
||||
|
||||
message Error {
|
||||
|
|
|
@ -3,6 +3,7 @@ package pdpb;
|
|||
|
||||
import "metapb.proto";
|
||||
import "eraftpb.proto";
|
||||
import "raft_serverpb.proto";
|
||||
import "replication_modepb.proto";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
@ -80,6 +81,8 @@ service PD {
|
|||
|
||||
rpc SplitRegions(SplitRegionsRequest) returns (SplitRegionsResponse) {}
|
||||
|
||||
rpc SplitAndScatterRegions(SplitAndScatterRegionsRequest) returns (SplitAndScatterRegionsResponse) {}
|
||||
|
||||
rpc GetDCLocationInfo(GetDCLocationInfoRequest) returns (GetDCLocationInfoResponse) {}
|
||||
}
|
||||
|
||||
|
@ -336,6 +339,11 @@ message RegionHeartbeatRequest {
|
|||
// Term is the term of raft group.
|
||||
uint64 term = 14;
|
||||
replication_modepb.RegionReplicationStatus replication_status = 15;
|
||||
// QueryStats reported write query stats, and there are read query stats in store heartbeat
|
||||
QueryStats query_stats = 16;
|
||||
// cpu_usage is the CPU time usage of the leader region since the last heartbeat,
|
||||
// which is calculated by cpu_time_delta/heartbeat_reported_interval.
|
||||
uint64 cpu_usage = 17;
|
||||
}
|
||||
|
||||
message ChangePeer {
|
||||
|
@ -350,6 +358,7 @@ message ChangePeerV2 {
|
|||
|
||||
message TransferLeader {
|
||||
metapb.Peer peer = 1;
|
||||
repeated metapb.Peer peers = 2;
|
||||
}
|
||||
|
||||
message Merge {
|
||||
|
@ -474,6 +483,7 @@ message PeerStat {
|
|||
uint64 region_id = 1;
|
||||
uint64 read_keys = 2;
|
||||
uint64 read_bytes = 3;
|
||||
QueryStats query_stats = 4;
|
||||
}
|
||||
|
||||
message StoreStats {
|
||||
|
@ -516,18 +526,43 @@ message StoreStats {
|
|||
repeated RecordPair op_latencies = 19;
|
||||
// Hot peer stat in the store
|
||||
repeated PeerStat peer_stats = 20;
|
||||
// Store query stats
|
||||
QueryStats query_stats = 21;
|
||||
// Score that represents the speed of the store, ranges in [1, 100], lower is better.
|
||||
uint64 slow_score = 22;
|
||||
// Damaged regions on the store that need to be removed by PD.
|
||||
repeated uint64 damaged_regions_id = 23;
|
||||
}
|
||||
|
||||
message PeerReport {
|
||||
raft_serverpb.RaftLocalState raft_state = 1;
|
||||
raft_serverpb.RegionLocalState region_state = 2;
|
||||
}
|
||||
|
||||
message StoreReport {
|
||||
repeated PeerReport peer_reports = 1;
|
||||
}
|
||||
|
||||
message StoreHeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
StoreStats stats = 2;
|
||||
// Detailed store report that is only filled up on PD's demand for online unsafe recover.
|
||||
StoreReport store_report = 3;
|
||||
}
|
||||
|
||||
message RecoveryPlan {
|
||||
repeated metapb.Region creates = 1;
|
||||
repeated metapb.Region updates = 2;
|
||||
repeated uint64 deletes = 3;
|
||||
}
|
||||
|
||||
message StoreHeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
replication_modepb.ReplicationStatus replication_status = 2;
|
||||
string cluster_version = 3;
|
||||
bool require_detailed_report = 4;
|
||||
RecoveryPlan plan = 5;
|
||||
}
|
||||
|
||||
message ScatterRegionRequest {
|
||||
|
@ -643,12 +678,14 @@ message GetOperatorResponse {
|
|||
message SyncMaxTSRequest {
|
||||
RequestHeader header = 1;
|
||||
Timestamp max_ts = 2;
|
||||
// If skip_check is true, the sync will try to write the max_ts without checking whether it's bigger.
|
||||
bool skip_check = 3;
|
||||
}
|
||||
|
||||
message SyncMaxTSResponse {
|
||||
ResponseHeader header = 1;
|
||||
Timestamp max_local_ts = 2;
|
||||
repeated string dcs = 3;
|
||||
repeated string synced_dcs = 3;
|
||||
}
|
||||
|
||||
message SplitRegionsRequest {
|
||||
|
@ -663,6 +700,20 @@ message SplitRegionsResponse {
|
|||
repeated uint64 regions_id = 3;
|
||||
}
|
||||
|
||||
message SplitAndScatterRegionsRequest {
|
||||
RequestHeader header = 1;
|
||||
repeated bytes split_keys = 2;
|
||||
string group = 3;
|
||||
uint64 retry_limit = 4;
|
||||
}
|
||||
|
||||
message SplitAndScatterRegionsResponse {
|
||||
ResponseHeader header = 1;
|
||||
uint64 split_finished_percentage = 2;
|
||||
uint64 scatter_finished_percentage = 3;
|
||||
repeated uint64 regions_id = 4;
|
||||
}
|
||||
|
||||
message GetDCLocationInfoRequest {
|
||||
RequestHeader header = 1;
|
||||
string dc_location = 2;
|
||||
|
@ -676,3 +727,32 @@ message GetDCLocationInfoResponse {
|
|||
// which it's set when the number of the max suffix bits changes.
|
||||
Timestamp max_ts = 3;
|
||||
}
|
||||
|
||||
message QueryStats {
|
||||
uint64 GC = 1;
|
||||
uint64 Get = 2;
|
||||
uint64 Scan = 3;
|
||||
uint64 Coprocessor = 4;
|
||||
uint64 Delete = 5;
|
||||
uint64 DeleteRange = 6;
|
||||
uint64 Put = 7;
|
||||
uint64 Prewrite = 8;
|
||||
uint64 AcquirePessimisticLock = 9;
|
||||
uint64 Commit = 10;
|
||||
uint64 Rollback = 11;
|
||||
}
|
||||
|
||||
enum QueryKind {
|
||||
Others = 0;
|
||||
GC = 1;
|
||||
Get = 2;
|
||||
Scan = 3;
|
||||
Coprocessor = 4;
|
||||
Delete = 5;
|
||||
DeleteRange = 6;
|
||||
Put = 7;
|
||||
Prewrite = 8;
|
||||
AcquirePessimisticLock = 9;
|
||||
Commit = 10;
|
||||
Rollback = 11;
|
||||
}
|
||||
|
|
|
@ -172,6 +172,7 @@ message CompactLogResponse {}
|
|||
|
||||
message TransferLeaderRequest {
|
||||
metapb.Peer peer = 1;
|
||||
repeated metapb.Peer peers = 2;
|
||||
}
|
||||
|
||||
message TransferLeaderResponse {}
|
||||
|
|
|
@ -3,6 +3,8 @@ package raft_serverpb;
|
|||
|
||||
import "eraftpb.proto";
|
||||
import "metapb.proto";
|
||||
import "kvrpcpb.proto";
|
||||
import "disk_usage.proto";
|
||||
import "rustproto.proto";
|
||||
|
||||
option (rustproto.lite_runtime_all) = true;
|
||||
|
@ -24,6 +26,8 @@ message RaftMessage {
|
|||
metapb.Region merge_target = 9;
|
||||
ExtraMessage extra_msg = 10;
|
||||
bytes extra_ctx = 11;
|
||||
|
||||
disk_usage.DiskUsage disk_usage = 12;
|
||||
}
|
||||
|
||||
message RaftTruncatedState {
|
||||
|
@ -66,6 +70,7 @@ message RaftSnapshotData {
|
|||
message StoreIdent {
|
||||
uint64 cluster_id = 1;
|
||||
uint64 store_id = 2;
|
||||
kvrpcpb.APIVersion api_version = 3;
|
||||
}
|
||||
|
||||
message RaftLocalState {
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package resource_usage_agent;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "rustproto.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (rustproto.lite_runtime_all) = true;
|
||||
|
||||
option java_package = "org.tikv.kvproto";
|
||||
|
||||
// ResourceUsageAgent is the service for storing resource usage records.
|
||||
service ResourceUsageAgent {
|
||||
// DEPRECATED: We now use `Report` to report not only CPU time.
|
||||
//
|
||||
// Report the CPU time records. By default, the records with the same
|
||||
// resource group tag will be batched by minute.
|
||||
rpc ReportCPUTime(stream CPUTimeRecord) returns (EmptyResponse) {}
|
||||
|
||||
// Report the resource usage records. By default, the records with the same
|
||||
// resource group tag will be batched by minute.
|
||||
rpc Report(stream ResourceUsageRecord) returns (EmptyResponse) {}
|
||||
}
|
||||
|
||||
message CPUTimeRecord {
|
||||
bytes resource_group_tag = 1;
|
||||
|
||||
// The following 2 repeated zipped together represents a List<(UnixTimestamp, CPUTime)>
|
||||
|
||||
// UNIX timestamp in second.
|
||||
repeated uint64 record_list_timestamp_sec = 2;
|
||||
// The value can be greater than 1000ms if the requests are running parallelly.
|
||||
repeated uint32 record_list_cpu_time_ms = 3;
|
||||
|
||||
}
|
||||
|
||||
message ResourceUsageRecord {
|
||||
bytes resource_group_tag = 1;
|
||||
|
||||
// The following repeated zipped together represents a List<(UnixTimestamp, Record)>
|
||||
|
||||
// UNIX timestamp in second.
|
||||
repeated uint64 record_list_timestamp_sec = 2;
|
||||
|
||||
// The value can be greater than 1000ms if the requests are running parallelly.
|
||||
repeated uint32 record_list_cpu_time_ms = 3;
|
||||
|
||||
// The number of reads of keys associated with resource_group_tag.
|
||||
repeated uint32 record_list_read_keys = 4;
|
||||
|
||||
// The number of writes of keys associated with resource_group_tag.
|
||||
repeated uint32 record_list_write_keys = 5;
|
||||
}
|
||||
|
||||
message EmptyResponse {}
|
||||
|
||||
// TiKV implements ResourceMeteringPubSub service for clients to subscribe to resource metering records.
|
||||
service ResourceMeteringPubSub {
|
||||
// Clients subscribe to resource metering records through this RPC, and TiKV periodically (e.g. per minute)
|
||||
// publishes resource metering records to clients via gRPC stream.
|
||||
rpc Subscribe(ResourceMeteringRequest) returns (stream ResourceUsageRecord) {}
|
||||
}
|
||||
|
||||
message ResourceMeteringRequest {}
|
|
@ -51,6 +51,7 @@ service Tikv {
|
|||
rpc RawGetKeyTTL(kvrpcpb.RawGetKeyTTLRequest) returns (kvrpcpb.RawGetKeyTTLResponse) {}
|
||||
// Compare if the value in database equals to `RawCASRequest.previous_value` before putting the new value. If not, this request will have no effect and the value in the database will be returned.
|
||||
rpc RawCompareAndSwap(kvrpcpb.RawCASRequest) returns (kvrpcpb.RawCASResponse) {}
|
||||
rpc RawChecksum(kvrpcpb.RawChecksumRequest) returns (kvrpcpb.RawChecksumResponse) {}
|
||||
|
||||
// Store commands (sent to a each TiKV node in a cluster, rather than a certain region).
|
||||
rpc UnsafeDestroyRange(kvrpcpb.UnsafeDestroyRangeRequest) returns (kvrpcpb.UnsafeDestroyRangeResponse) {}
|
||||
|
@ -89,6 +90,7 @@ service Tikv {
|
|||
rpc DispatchMPPTask(mpp.DispatchTaskRequest) returns (mpp.DispatchTaskResponse) {}
|
||||
rpc CancelMPPTask(mpp.CancelTaskRequest) returns (mpp.CancelTaskResponse) {}
|
||||
rpc EstablishMPPConnection(mpp.EstablishMPPConnectionRequest) returns (stream mpp.MPPDataPacket) {}
|
||||
rpc IsAlive(mpp.IsAliveRequest) returns (mpp.IsAliveResponse) {}
|
||||
|
||||
/// CheckLeader sends all information (includes region term and epoch) to other stores.
|
||||
/// Once a store receives a request, it checks term and epoch for each region, and sends the regions whose
|
||||
|
|
|
@ -232,17 +232,7 @@ mod test {
|
|||
|
||||
let mut resp: Result<_, Error> = Ok(kvrpcpb::CommitResponse {
|
||||
region_error: None,
|
||||
error: Some(kvrpcpb::KeyError {
|
||||
locked: None,
|
||||
retryable: String::new(),
|
||||
abort: String::new(),
|
||||
conflict: None,
|
||||
already_exist: None,
|
||||
deadlock: None,
|
||||
commit_ts_expired: None,
|
||||
txn_not_found: None,
|
||||
commit_ts_too_large: None,
|
||||
}),
|
||||
error: Some(kvrpcpb::KeyError::default()),
|
||||
commit_version: 0,
|
||||
});
|
||||
assert!(resp.key_errors().is_some());
|
||||
|
|
Loading…
Reference in New Issue