mirror of https://github.com/tikv/client-rust.git
chore: Bump prost to 0.12 and tonic to 0.10 (#430)
* chore: Bump prost to 0.12 and tonic to 0.10 Signed-off-by: Xuanwo <github@xuanwo.io> * Fix build Signed-off-by: Xuanwo <github@xuanwo.io> * Commit changes Signed-off-by: Xuanwo <github@xuanwo.io> --------- Signed-off-by: Xuanwo <github@xuanwo.io>
This commit is contained in:
parent
a7885be25b
commit
802b361df7
|
@ -34,7 +34,7 @@ lazy_static = "1"
|
|||
log = "0.4"
|
||||
pin-project = "1"
|
||||
prometheus = { version = "0.13", features = ["push"], default-features = false }
|
||||
prost = "0.11"
|
||||
prost = "0.12"
|
||||
rand = "0.8"
|
||||
regex = "1"
|
||||
semver = "1.0"
|
||||
|
@ -42,7 +42,7 @@ serde = "1.0"
|
|||
serde_derive = "1.0"
|
||||
thiserror = "1"
|
||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
|
||||
[dev-dependencies]
|
||||
clap = "2"
|
||||
|
|
|
@ -12,6 +12,4 @@ edition = "2021"
|
|||
|
||||
[dependencies]
|
||||
glob = "0.3"
|
||||
tonic-build = "0.9"
|
||||
# Suppress doctest bug (https://stackoverflow.com/questions/66074003/how-to-turn-off-cargo-doc-test-and-compile-for-a-specific-module-in-rust)
|
||||
tonic-disable-doctest = "0.1.0"
|
||||
tonic-build = { version = "0.10", features = ["cleanup-markdown"] }
|
|
@ -1,10 +1,7 @@
|
|||
// Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
|
||||
|
||||
fn main() {
|
||||
use tonic_disable_doctest::BuilderEx;
|
||||
|
||||
tonic_build::configure()
|
||||
.disable_doctests_for_types([".google.api.HttpRule"])
|
||||
.emit_rerun_if_changed(false)
|
||||
.build_server(false)
|
||||
.include_file("mod.rs")
|
||||
|
|
|
@ -15,7 +15,7 @@ pub struct BackupMeta {
|
|||
pub version: i32,
|
||||
/// A set of files that compose a backup.
|
||||
/// Note: `files` is deprecated, as it bloats backupmeta. It is kept for
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
#[prost(message, repeated, tag = "4")]
|
||||
pub files: ::prost::alloc::vec::Vec<File>,
|
||||
/// An index to files contains data files.
|
||||
|
@ -25,14 +25,14 @@ pub struct BackupMeta {
|
|||
/// For full backup, the start_version equals to the end_version,
|
||||
/// it means point in time.
|
||||
/// For incremental backup, the time range is specified as
|
||||
/// (start_version, end_version].
|
||||
/// (start_version, end_version\].
|
||||
#[prost(uint64, tag = "5")]
|
||||
pub start_version: u64,
|
||||
#[prost(uint64, tag = "6")]
|
||||
pub end_version: u64,
|
||||
/// Table metadata describes database and table info.
|
||||
/// Note: `schemas` is deprecated, as it bloats backupmeta. It is kept for
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
#[prost(message, repeated, tag = "7")]
|
||||
pub schemas: ::prost::alloc::vec::Vec<Schema>,
|
||||
/// An index to files contains Schemas.
|
||||
|
@ -43,16 +43,16 @@ pub struct BackupMeta {
|
|||
#[prost(bool, tag = "8")]
|
||||
pub is_raw_kv: bool,
|
||||
/// Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
#[prost(message, repeated, tag = "9")]
|
||||
pub raw_ranges: ::prost::alloc::vec::Vec<RawRange>,
|
||||
/// An index to files contains RawRanges.
|
||||
#[prost(message, optional, tag = "15")]
|
||||
pub raw_range_index: ::core::option::Option<MetaFile>,
|
||||
/// In incremental backup, DDLs which are completed in
|
||||
/// (lastBackupTS, backupTS] will be stored here.
|
||||
/// (lastBackupTS, backupTS\] will be stored here.
|
||||
/// Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
/// compatibility, so new BR can restore older backups.
|
||||
#[prost(bytes = "vec", tag = "10")]
|
||||
pub ddls: ::prost::alloc::vec::Vec<u8>,
|
||||
/// An index to files contains DDLs.
|
||||
|
@ -332,7 +332,7 @@ pub struct AzureBlobStorage {
|
|||
/// If the node's environment variables($AZURE_CLIENT_ID, $AZURE_TENANT_ID, $AZURE_CLIENT_SECRET) exist,
|
||||
/// prefer to use token to access the azure blob.
|
||||
///
|
||||
/// See <https://docs.microsoft.com/en-us/azure/storage/common/identity-library-acquire-token?toc=/azure/storage/blobs/toc.json>
|
||||
/// See <https://docs.microsoft.com/en-us/azure/storage/common/identity-library-acquire-token?toc=/azure/storage/blobs/toc.json>
|
||||
///
|
||||
/// Otherwise, if empty, try to read shared key from the node's environment variable $AZURE_STORAGE_KEY.
|
||||
#[prost(string, tag = "6")]
|
||||
|
@ -371,7 +371,7 @@ pub struct CloudDynamic {
|
|||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct Hdfs {
|
||||
/// a URL: hdfs:///some/path or hdfs://host:port/some/path
|
||||
/// a URL: hdfs:///some/path or hdfs://host:port/some/path
|
||||
#[prost(string, tag = "1")]
|
||||
pub remote: ::prost::alloc::string::String,
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/// [start, end)
|
||||
/// \[start, end)
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct KeyRange {
|
||||
|
|
|
@ -361,12 +361,13 @@ pub mod debug_client {
|
|||
/// Debug service for TiKV.
|
||||
///
|
||||
/// Errors are defined as follow:
|
||||
/// - OK: Okay, we are good!
|
||||
/// - UNKNOWN: For unknown error.
|
||||
/// - INVALID_ARGUMENT: Something goes wrong within requests.
|
||||
/// - NOT_FOUND: It is key or region not found, it's based on context, detailed
|
||||
/// reason can be found in grpc message.
|
||||
/// Note: It bypasses raft layer.
|
||||
///
|
||||
/// * OK: Okay, we are good!
|
||||
/// * UNKNOWN: For unknown error.
|
||||
/// * INVALID_ARGUMENT: Something goes wrong within requests.
|
||||
/// * NOT_FOUND: It is key or region not found, it's based on context, detailed
|
||||
/// reason can be found in grpc message.
|
||||
/// Note: It bypasses raft layer.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DebugClient<T> {
|
||||
inner: tonic::client::Grpc<T>,
|
||||
|
@ -539,7 +540,7 @@ pub mod debug_client {
|
|||
}
|
||||
/// Scan a specific range.
|
||||
/// Note: DO NOT CALL IT IN PRODUCTION, it's really expensive.
|
||||
/// Server uses keys directly w/o any encoding.
|
||||
/// Server uses keys directly w/o any encoding.
|
||||
pub async fn scan_mvcc(
|
||||
&mut self,
|
||||
request: impl tonic::IntoRequest<super::ScanMvccRequest>,
|
||||
|
|
|
@ -95,12 +95,12 @@ pub struct ServerInfoItem {
|
|||
pub name: ::prost::alloc::string::String,
|
||||
/// all key-value pairs for specified item, e.g:
|
||||
/// ServerInfoItem {
|
||||
/// tp = "network"
|
||||
/// name = "eth0"
|
||||
/// paris = [
|
||||
/// ServerInfoPair { key = "readbytes", value = "4k"},
|
||||
/// ServerInfoPair { key = "writebytes", value = "1k"},
|
||||
/// ]
|
||||
/// tp = "network"
|
||||
/// name = "eth0"
|
||||
/// paris = \[
|
||||
/// ServerInfoPair { key = "readbytes", value = "4k"},
|
||||
/// ServerInfoPair { key = "writebytes", value = "1k"},
|
||||
/// \]
|
||||
/// }
|
||||
#[prost(message, repeated, tag = "3")]
|
||||
pub pairs: ::prost::alloc::vec::Vec<ServerInfoPair>,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/// Defines the HTTP configuration for an API service. It contains a list of
|
||||
/// \[HttpRule][google.api.HttpRule\], each specifying the mapping of an RPC method
|
||||
/// \[HttpRule\]\[google.api.HttpRule\], each specifying the mapping of an RPC method
|
||||
/// to one or more HTTP REST API methods.
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
|
@ -26,7 +26,7 @@ pub struct Http {
|
|||
/// APIs](<https://github.com/googleapis/googleapis>),
|
||||
/// [Cloud Endpoints](<https://cloud.google.com/endpoints>), [gRPC
|
||||
/// Gateway](<https://github.com/grpc-ecosystem/grpc-gateway>),
|
||||
/// and \[Envoy\](<https://github.com/envoyproxy/envoy>) proxy support this feature
|
||||
/// and [Envoy](<https://github.com/envoyproxy/envoy>) proxy support this feature
|
||||
/// and use it for large scale production services.
|
||||
///
|
||||
/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
|
||||
|
@ -43,53 +43,57 @@ pub struct Http {
|
|||
///
|
||||
/// Example:
|
||||
///
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get: "/v1/{name=messages/*}"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// string name = 1; // Mapped to URL path.
|
||||
/// }
|
||||
/// message Message {
|
||||
/// string text = 1; // The resource content.
|
||||
/// }
|
||||
/// ```text
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get: "/v1/{name=messages/*}"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// string name = 1; // Mapped to URL path.
|
||||
/// }
|
||||
/// message Message {
|
||||
/// string text = 1; // The resource content.
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This enables an HTTP REST to gRPC mapping as below:
|
||||
///
|
||||
/// HTTP | gRPC
|
||||
/// -----|-----
|
||||
/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
|
||||
/// |HTTP|gRPC|
|
||||
/// |----|----|
|
||||
/// |`GET /v1/messages/123456`|`GetMessage(name: "messages/123456")`|
|
||||
///
|
||||
/// Any fields in the request message which are not bound by the path template
|
||||
/// automatically become HTTP query parameters if there is no HTTP request body.
|
||||
/// For example:
|
||||
///
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get:"/v1/messages/{message_id}"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// message SubMessage {
|
||||
/// string subfield = 1;
|
||||
/// }
|
||||
/// string message_id = 1; // Mapped to URL path.
|
||||
/// int64 revision = 2; // Mapped to URL query parameter `revision`.
|
||||
/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
|
||||
/// }
|
||||
/// ```text
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get:"/v1/messages/{message_id}"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// message SubMessage {
|
||||
/// string subfield = 1;
|
||||
/// }
|
||||
/// string message_id = 1; // Mapped to URL path.
|
||||
/// int64 revision = 2; // Mapped to URL query parameter `revision`.
|
||||
/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This enables a HTTP JSON to RPC mapping as below:
|
||||
///
|
||||
/// HTTP | gRPC
|
||||
/// -----|-----
|
||||
/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
|
||||
/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
|
||||
/// "foo"))`
|
||||
/// |HTTP|gRPC|
|
||||
/// |----|----|
|
||||
/// |`GET /v1/messages/123456?revision=2&sub.subfield=foo`||
|
||||
/// |\`GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:||
|
||||
/// |"foo"))\`||
|
||||
///
|
||||
/// Note that fields which are mapped to URL query parameters must have a
|
||||
/// primitive type or a repeated primitive type or a non-repeated message type.
|
||||
|
@ -102,53 +106,56 @@ pub struct Http {
|
|||
/// specifies the mapping. Consider a REST update method on the
|
||||
/// message resource collection:
|
||||
///
|
||||
/// service Messaging {
|
||||
/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// patch: "/v1/messages/{message_id}"
|
||||
/// body: "message"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message UpdateMessageRequest {
|
||||
/// string message_id = 1; // mapped to the URL
|
||||
/// Message message = 2; // mapped to the body
|
||||
/// }
|
||||
/// ```text
|
||||
/// service Messaging {
|
||||
/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// patch: "/v1/messages/{message_id}"
|
||||
/// body: "message"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message UpdateMessageRequest {
|
||||
/// string message_id = 1; // mapped to the URL
|
||||
/// Message message = 2; // mapped to the body
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The following HTTP JSON to RPC mapping is enabled, where the
|
||||
/// representation of the JSON in the request body is determined by
|
||||
/// protos JSON encoding:
|
||||
///
|
||||
/// HTTP | gRPC
|
||||
/// -----|-----
|
||||
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
/// "123456" message { text: "Hi!" })`
|
||||
/// |HTTP|gRPC|
|
||||
/// |----|----|
|
||||
/// |`PATCH /v1/messages/123456 { "text": "Hi!" }`|\`UpdateMessage(message_id:|
|
||||
/// |"123456" message { text: "Hi!" })\`||
|
||||
///
|
||||
/// The special name `*` can be used in the body mapping to define that
|
||||
/// every field not bound by the path template should be mapped to the
|
||||
/// request body. This enables the following alternative definition of
|
||||
/// the update method:
|
||||
///
|
||||
/// service Messaging {
|
||||
/// rpc UpdateMessage(Message) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// patch: "/v1/messages/{message_id}"
|
||||
/// body: "*"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message Message {
|
||||
/// string message_id = 1;
|
||||
/// string text = 2;
|
||||
/// }
|
||||
///
|
||||
/// ```text
|
||||
/// service Messaging {
|
||||
/// rpc UpdateMessage(Message) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// patch: "/v1/messages/{message_id}"
|
||||
/// body: "*"
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message Message {
|
||||
/// string message_id = 1;
|
||||
/// string text = 2;
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The following HTTP JSON to RPC mapping is enabled:
|
||||
///
|
||||
/// HTTP | gRPC
|
||||
/// -----|-----
|
||||
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
/// "123456" text: "Hi!")`
|
||||
/// |HTTP|gRPC|
|
||||
/// |----|----|
|
||||
/// |`PATCH /v1/messages/123456 { "text": "Hi!" }`|\`UpdateMessage(message_id:|
|
||||
/// |"123456" text: "Hi!")\`||
|
||||
///
|
||||
/// Note that when using `*` in the body mapping, it is not possible to
|
||||
/// have HTTP parameters, as all fields not bound by the path end in
|
||||
|
@ -159,53 +166,57 @@ pub struct Http {
|
|||
/// It is possible to define multiple HTTP methods for one RPC by using
|
||||
/// the `additional_bindings` option. Example:
|
||||
///
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get: "/v1/messages/{message_id}"
|
||||
/// additional_bindings {
|
||||
/// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
/// }
|
||||
/// };
|
||||
/// ```text
|
||||
/// service Messaging {
|
||||
/// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
/// option (google.api.http) = {
|
||||
/// get: "/v1/messages/{message_id}"
|
||||
/// additional_bindings {
|
||||
/// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// string message_id = 1;
|
||||
/// string user_id = 2;
|
||||
/// }
|
||||
/// };
|
||||
/// }
|
||||
/// }
|
||||
/// message GetMessageRequest {
|
||||
/// string message_id = 1;
|
||||
/// string user_id = 2;
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This enables the following two alternative HTTP JSON to RPC mappings:
|
||||
///
|
||||
/// HTTP | gRPC
|
||||
/// -----|-----
|
||||
/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
|
||||
/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
|
||||
/// "123456")`
|
||||
/// |HTTP|gRPC|
|
||||
/// |----|----|
|
||||
/// |`GET /v1/messages/123456`|`GetMessage(message_id: "123456")`|
|
||||
/// |`GET /v1/users/me/messages/123456`|\`GetMessage(user_id: "me" message_id:|
|
||||
/// |"123456")\`||
|
||||
///
|
||||
/// ## Rules for HTTP mapping
|
||||
///
|
||||
/// 1. Leaf request fields (recursive expansion nested messages in the request
|
||||
/// message) are classified into three categories:
|
||||
/// - Fields referred by the path template. They are passed via the URL path.
|
||||
/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They are passed via the HTTP
|
||||
/// request body.
|
||||
/// - All other fields are passed via the URL query parameters, and the
|
||||
/// parameter name is the field path in the request message. A repeated
|
||||
/// field can be represented as multiple query parameters under the same
|
||||
/// name.
|
||||
/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL query parameter, all fields
|
||||
/// are passed via URL path and HTTP request body.
|
||||
/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP request body, all
|
||||
/// fields are passed via URL path and URL query parameters.
|
||||
/// message) are classified into three categories:
|
||||
/// * Fields referred by the path template. They are passed via the URL path.
|
||||
/// * Fields referred by the \[HttpRule.body\]\[google.api.HttpRule.body\]. They are passed via the HTTP
|
||||
/// request body.
|
||||
/// * All other fields are passed via the URL query parameters, and the
|
||||
/// parameter name is the field path in the request message. A repeated
|
||||
/// field can be represented as multiple query parameters under the same
|
||||
/// name.
|
||||
/// 1. If \[HttpRule.body\]\[google.api.HttpRule.body\] is "\*", there is no URL query parameter, all fields
|
||||
/// are passed via URL path and HTTP request body.
|
||||
/// 1. If \[HttpRule.body\]\[google.api.HttpRule.body\] is omitted, there is no HTTP request body, all
|
||||
/// fields are passed via URL path and URL query parameters.
|
||||
///
|
||||
/// ### Path template syntax
|
||||
///
|
||||
/// Template = "/" Segments [ Verb ] ;
|
||||
/// Segments = Segment { "/" Segment } ;
|
||||
/// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
/// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||
/// FieldPath = IDENT { "." IDENT } ;
|
||||
/// Verb = ":" LITERAL ;
|
||||
/// ```text
|
||||
/// Template = "/" Segments \[ Verb \] ;
|
||||
/// Segments = Segment { "/" Segment } ;
|
||||
/// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
/// Variable = "{" FieldPath \[ "=" Segments \] "}" ;
|
||||
/// FieldPath = IDENT { "." IDENT } ;
|
||||
/// Verb = ":" LITERAL ;
|
||||
/// ```
|
||||
///
|
||||
/// The syntax `*` matches a single URL path segment. The syntax `**` matches
|
||||
/// zero or more URL path segments, which must be the last part of the URL path
|
||||
|
@ -254,11 +265,13 @@ pub struct Http {
|
|||
///
|
||||
/// Example:
|
||||
///
|
||||
/// http:
|
||||
/// rules:
|
||||
/// # Selects a gRPC method and applies HttpRule to it.
|
||||
/// - selector: example.v1.Messaging.GetMessage
|
||||
/// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
/// ```text
|
||||
/// http:
|
||||
/// rules:
|
||||
/// # Selects a gRPC method and applies HttpRule to it.
|
||||
/// - selector: example.v1.Messaging.GetMessage
|
||||
/// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
/// ```
|
||||
///
|
||||
/// ## Special notes
|
||||
///
|
||||
|
@ -287,18 +300,12 @@ pub struct Http {
|
|||
/// If an API needs to use a JSON array for request or response body, it can map
|
||||
/// the request or response body to a repeated field. However, some gRPC
|
||||
/// Transcoding implementations may not support this feature.
|
||||
#[cfg(not(doctest))]
|
||||
#[allow(dead_code)]
|
||||
pub struct __GoogleApiHttpRuleDocs;
|
||||
/// HACK: for docs see [`__GoogleApiHttpRuleDocs`]
|
||||
///
|
||||
/// this hack allows full doctest pass without failures on examples from that doc
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct HttpRule {
|
||||
/// Selects a method to which this rule applies.
|
||||
///
|
||||
/// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax details.
|
||||
/// Refer to \[selector\]\[google.api.DocumentationRule.selector\] for syntax details.
|
||||
#[prost(string, tag = "1")]
|
||||
pub selector: ::prost::alloc::string::String,
|
||||
/// The name of the request field whose value is mapped to the HTTP request
|
||||
|
@ -333,12 +340,6 @@ pub mod http_rule {
|
|||
/// Determines the URL pattern is matched by this rules. This pattern can be
|
||||
/// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||
/// can be defined using the 'custom' field.
|
||||
#[cfg(not(doctest))]
|
||||
#[allow(dead_code)]
|
||||
pub struct __GoogleApiHttpRuleDocs;
|
||||
/// HACK: for docs see [`__GoogleApiHttpRuleDocs`]
|
||||
///
|
||||
/// this hack allows full doctest pass without failures on examples from that doc
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Oneof)]
|
||||
pub enum Pattern {
|
||||
|
@ -359,7 +360,7 @@ pub mod http_rule {
|
|||
#[prost(string, tag = "6")]
|
||||
Patch(::prost::alloc::string::String),
|
||||
/// The custom pattern is used for specifying an HTTP method that is not
|
||||
/// included in the `pattern` field, such as HEAD, or "*" to leave the
|
||||
/// included in the `pattern` field, such as HEAD, or "\*" to leave the
|
||||
/// HTTP method unspecified for this rule. The wild-card rule is useful
|
||||
/// for services that provide content to Web (HTML) clients.
|
||||
#[prost(message, tag = "8")]
|
||||
|
|
|
@ -211,21 +211,22 @@ pub mod import_kv_client {
|
|||
/// ImportKV provides a service to import key-value pairs to TiKV.
|
||||
///
|
||||
/// In order to import key-value pairs to TiKV, the user should:
|
||||
///
|
||||
/// 1. Open an engine identified by an UUID.
|
||||
/// 2. Open write streams to write key-value batches to the opened engine.
|
||||
/// Different streams/clients can write to the same engine concurrently.
|
||||
/// 3. Close the engine after all write batches have been finished. An
|
||||
/// engine can only be closed when all write streams are closed. An
|
||||
/// engine can only be closed once, and it can not be opened again
|
||||
/// once it is closed.
|
||||
/// 4. Import the data in the engine to the target cluster. Note that
|
||||
/// the import process is not atomic, it requires the data to be
|
||||
/// idempotent on retry. An engine can only be imported after it is
|
||||
/// closed. An engine can be imported multiple times, but can not be
|
||||
/// imported concurrently.
|
||||
/// 5. Clean up the engine after it has been imported. Delete all data
|
||||
/// in the engine. An engine can not be cleaned up when it is
|
||||
/// writing or importing.
|
||||
/// 1. Open write streams to write key-value batches to the opened engine.
|
||||
/// Different streams/clients can write to the same engine concurrently.
|
||||
/// 1. Close the engine after all write batches have been finished. An
|
||||
/// engine can only be closed when all write streams are closed. An
|
||||
/// engine can only be closed once, and it can not be opened again
|
||||
/// once it is closed.
|
||||
/// 1. Import the data in the engine to the target cluster. Note that
|
||||
/// the import process is not atomic, it requires the data to be
|
||||
/// idempotent on retry. An engine can only be imported after it is
|
||||
/// closed. An engine can be imported multiple times, but can not be
|
||||
/// imported concurrently.
|
||||
/// 1. Clean up the engine after it has been imported. Delete all data
|
||||
/// in the engine. An engine can not be cleaned up when it is
|
||||
/// writing or importing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ImportKvClient<T> {
|
||||
inner: tonic::client::Grpc<T>,
|
||||
|
|
|
@ -131,7 +131,7 @@ pub struct DownloadRequest {
|
|||
/// Performs a key prefix rewrite after downloading the SST file.
|
||||
/// All keys in the SST will be rewritten as:
|
||||
///
|
||||
/// new_key = new_key_prefix + old_key\[len(old_key_prefix)..\]
|
||||
/// new_key = new_key_prefix + old_key\[len(old_key_prefix)..\]
|
||||
///
|
||||
/// When used for TiDB, rewriting the prefix changes the table ID. Please
|
||||
/// note that key-rewrite is applied on the origin keys in encoded
|
||||
|
@ -339,15 +339,15 @@ pub struct DuplicateDetectResponse {
|
|||
#[prost(message, optional, tag = "2")]
|
||||
pub key_error: ::core::option::Option<Error>,
|
||||
/// The these keys will be in asc order (but commit time is in desc order),
|
||||
/// and the content is just like following:
|
||||
/// [
|
||||
/// {key: "key1", value: "value11", commit_ts: 1005},
|
||||
/// {key: "key1", value: "value12", commit_ts: 1004},
|
||||
/// {key: "key1", value: "value13", commit_ts: 1001},
|
||||
/// {key: "key2", value: "value21", commit_ts: 1004},
|
||||
/// {key: "key2", value: "value22", commit_ts: 1002},
|
||||
/// ...
|
||||
/// ]
|
||||
/// and the content is just like following:
|
||||
/// \[
|
||||
/// {key: "key1", value: "value11", commit_ts: 1005},
|
||||
/// {key: "key1", value: "value12", commit_ts: 1004},
|
||||
/// {key: "key1", value: "value13", commit_ts: 1001},
|
||||
/// {key: "key2", value: "value21", commit_ts: 1004},
|
||||
/// {key: "key2", value: "value22", commit_ts: 1002},
|
||||
/// ...
|
||||
/// \]
|
||||
#[prost(message, repeated, tag = "3")]
|
||||
pub pairs: ::prost::alloc::vec::Vec<KvPair>,
|
||||
}
|
||||
|
@ -385,9 +385,10 @@ pub mod import_sst_client {
|
|||
/// ImportSST provides a service to import a generated SST file to a region in TiKV.
|
||||
///
|
||||
/// In order to import an SST file to a region, the user should:
|
||||
///
|
||||
/// 1. Retrieve the meta of the region according to the SST file's range.
|
||||
/// 2. Upload the SST file to the servers where the region's peers locate in.
|
||||
/// 3. Issue an ingest request to the region's leader with the SST file's metadata.
|
||||
/// 1. Upload the SST file to the servers where the region's peers locate in.
|
||||
/// 1. Issue an ingest request to the region's leader with the SST file's metadata.
|
||||
///
|
||||
/// It's the user's responsibility to make sure that the SST file is uploaded to
|
||||
/// the servers where the region's peers locate in, before issue the ingest
|
||||
|
|
|
@ -49,8 +49,8 @@ pub struct ScanRequest {
|
|||
pub key_only: bool,
|
||||
#[prost(bool, tag = "6")]
|
||||
pub reverse: bool,
|
||||
/// For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key;
|
||||
/// and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key.
|
||||
/// For compatibility, when scanning forward, the range to scan is \[start_key, end_key), where start_key \< end_key;
|
||||
/// and when scanning backward, it scans \[end_key, start_key) in descending order, where end_key \< start_key.
|
||||
#[prost(bytes = "vec", tag = "7")]
|
||||
pub end_key: ::prost::alloc::vec::Vec<u8>,
|
||||
/// If sample_step > 0, skips 'sample_step - 1' number of keys after each returned key.
|
||||
|
@ -313,9 +313,9 @@ pub struct CheckTxnStatusResponse {
|
|||
#[prost(message, optional, tag = "2")]
|
||||
pub error: ::core::option::Option<KeyError>,
|
||||
/// Three kinds of transaction status:
|
||||
/// locked: lock_ttl > 0
|
||||
/// committed: commit_version > 0
|
||||
/// rollbacked: lock_ttl = 0 && commit_version = 0
|
||||
/// locked: lock_ttl > 0
|
||||
/// committed: commit_version > 0
|
||||
/// rollbacked: lock_ttl = 0 && commit_version = 0
|
||||
#[prost(uint64, tag = "3")]
|
||||
pub lock_ttl: u64,
|
||||
#[prost(uint64, tag = "4")]
|
||||
|
@ -723,8 +723,8 @@ pub struct RawScanRequest {
|
|||
pub cf: ::prost::alloc::string::String,
|
||||
#[prost(bool, tag = "6")]
|
||||
pub reverse: bool,
|
||||
/// For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key;
|
||||
/// and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key.
|
||||
/// For compatibility, when scanning forward, the range to scan is \[start_key, end_key), where start_key \< end_key;
|
||||
/// and when scanning backward, it scans \[end_key, start_key) in descending order, where end_key \< start_key.
|
||||
#[prost(bytes = "vec", tag = "7")]
|
||||
pub end_key: ::prost::alloc::vec::Vec<u8>,
|
||||
}
|
||||
|
@ -1021,7 +1021,7 @@ pub struct Context {
|
|||
#[prost(enumeration = "ApiVersion", tag = "21")]
|
||||
pub api_version: i32,
|
||||
/// Read request should read through locks belonging to these transactions because these
|
||||
/// transactions are committed and theirs commit_ts <= read request's start_ts.
|
||||
/// transactions are committed and theirs commit_ts \<= read request's start_ts.
|
||||
#[prost(uint64, repeated, tag = "22")]
|
||||
pub committed_locks: ::prost::alloc::vec::Vec<u64>,
|
||||
}
|
||||
|
@ -1394,7 +1394,7 @@ pub struct CheckLeaderResponse {
|
|||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct StoreSafeTsRequest {
|
||||
/// Get the minimal `safe_ts` from regions that overlap with the key range [`start_key`, `end_key`)
|
||||
/// Get the minimal `safe_ts` from regions that overlap with the key range \[`start_key`, `end_key`)
|
||||
/// An empty key range means all regions in the store
|
||||
#[prost(message, optional, tag = "1")]
|
||||
pub key_range: ::core::option::Option<KeyRange>,
|
||||
|
@ -1542,11 +1542,11 @@ pub enum ApiVersion {
|
|||
/// Only RawKV is available, and then 8 bytes representing the unix timestamp in
|
||||
/// seconds for expiring time will be append to the value of all RawKV kv pairs.
|
||||
///
|
||||
/// ------------------------------------------------------------
|
||||
/// | User value | Expire Ts |
|
||||
/// ------------------------------------------------------------
|
||||
/// | 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff |
|
||||
/// ------------------------------------------------------------
|
||||
/// ---
|
||||
///
|
||||
/// ## \| User value | Expire Ts |
|
||||
///
|
||||
/// ## \| 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff |
|
||||
///
|
||||
/// V1TTL server only accepts V1 raw requests.
|
||||
/// V1 client should not use `V1TTL` in request. V1 client should always send `V1`.
|
||||
|
@ -1559,21 +1559,21 @@ pub enum ApiVersion {
|
|||
///
|
||||
/// The last byte in the raw value must be a meta flag. For example:
|
||||
///
|
||||
/// --------------------------------------
|
||||
/// | User value | Meta flags |
|
||||
/// --------------------------------------
|
||||
/// | 0x12 0x34 0x56 | 0x00 (0b00000000) |
|
||||
/// --------------------------------------
|
||||
/// ---
|
||||
///
|
||||
/// ## \| User value | Meta flags |
|
||||
///
|
||||
/// ## \| 0x12 0x34 0x56 | 0x00 (0b00000000) |
|
||||
///
|
||||
/// As shown in the example below, the least significant bit of the meta flag
|
||||
/// indicates whether the value contains 8 bytes expire ts at the very left to the
|
||||
/// meta flags.
|
||||
///
|
||||
/// --------------------------------------------------------------------------------
|
||||
/// | User value | Expire Ts | Meta flags |
|
||||
/// --------------------------------------------------------------------------------
|
||||
/// | 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff | 0x01 (0b00000001) |
|
||||
/// --------------------------------------------------------------------------------
|
||||
/// ---
|
||||
///
|
||||
/// ## \| User value | Expire Ts | Meta flags |
|
||||
///
|
||||
/// ## \| 0x12 0x34 0x56 | 0x00 0x00 0x00 0x00 0x00 0x00 0xff 0xff | 0x01 (0b00000001) |
|
||||
///
|
||||
/// V2 server accpets V2 requests and V1 txn requests that statrts with TiDB key
|
||||
/// prefix (`m` and `t`).
|
||||
|
|
|
@ -69,7 +69,7 @@ pub struct RegionEpoch {
|
|||
pub struct Region {
|
||||
#[prost(uint64, tag = "1")]
|
||||
pub id: u64,
|
||||
/// Region key range [start_key, end_key).
|
||||
/// Region key range \[start_key, end_key).
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub start_key: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "vec", tag = "3")]
|
||||
|
|
|
@ -198,7 +198,7 @@ pub struct ScanRegionsRequest {
|
|||
pub header: ::core::option::Option<RequestHeader>,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub start_key: ::prost::alloc::vec::Vec<u8>,
|
||||
/// no limit when limit <= 0.
|
||||
/// no limit when limit \<= 0.
|
||||
#[prost(int32, tag = "3")]
|
||||
pub limit: i32,
|
||||
/// end_key is +inf when it is empty.
|
||||
|
@ -415,15 +415,16 @@ pub struct RegionHeartbeatResponse {
|
|||
/// to pd regularly, pd will determine whether this region
|
||||
/// should do ChangePeer or not.
|
||||
/// E,g, max peer number is 3, region A, first only peer 1 in A.
|
||||
///
|
||||
/// 1. Pd region state -> Peers (1), ConfVer (1).
|
||||
/// 2. Leader peer 1 reports region state to pd, pd finds the
|
||||
/// peer number is < 3, so first changes its current region
|
||||
/// state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2.
|
||||
/// 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2),
|
||||
/// pd updates its state -> Peers (1, 2), ConfVer (2).
|
||||
/// 4. Leader may report old Peers (1), ConfVer (1) to pd before ConfChange
|
||||
/// finished, pd stills responses ChangePeer Adding 2, of course, we must
|
||||
/// guarantee the second ChangePeer can't be applied in TiKV.
|
||||
/// 1. Leader peer 1 reports region state to pd, pd finds the
|
||||
/// peer number is \< 3, so first changes its current region
|
||||
/// state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2.
|
||||
/// 1. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2),
|
||||
/// pd updates its state -> Peers (1, 2), ConfVer (2).
|
||||
/// 1. Leader may report old Peers (1), ConfVer (1) to pd before ConfChange
|
||||
/// finished, pd stills responses ChangePeer Adding 2, of course, we must
|
||||
/// guarantee the second ChangePeer can't be applied in TiKV.
|
||||
#[prost(message, optional, tag = "2")]
|
||||
pub change_peer: ::core::option::Option<ChangePeer>,
|
||||
/// Pd can return transfer_leader to let TiKV does leader transfer itself.
|
||||
|
@ -444,8 +445,8 @@ pub struct RegionHeartbeatResponse {
|
|||
pub split_region: ::core::option::Option<SplitRegion>,
|
||||
/// Multiple change peer operations atomically.
|
||||
/// Note: PD can use both ChangePeer and ChangePeerV2 at the same time
|
||||
/// (not in the same RegionHeartbeatResponse).
|
||||
/// Now, PD use ChangePeerV2 only for replacing peers.
|
||||
/// (not in the same RegionHeartbeatResponse).
|
||||
/// Now, PD use ChangePeerV2 only for replacing peers.
|
||||
#[prost(message, optional, tag = "9")]
|
||||
pub change_peer_v2: ::core::option::Option<ChangePeerV2>,
|
||||
}
|
||||
|
@ -622,7 +623,7 @@ pub struct StoreStats {
|
|||
/// Store query stats
|
||||
#[prost(message, optional, tag = "21")]
|
||||
pub query_stats: ::core::option::Option<QueryStats>,
|
||||
/// Score that represents the speed of the store, ranges in [1, 100], lower is better.
|
||||
/// Score that represents the speed of the store, ranges in \[1, 100\], lower is better.
|
||||
#[prost(uint64, tag = "22")]
|
||||
pub slow_score: u64,
|
||||
/// Damaged regions on the store that need to be removed by PD.
|
||||
|
|
|
@ -14,7 +14,7 @@ pub struct RaftMessage {
|
|||
/// true means to_peer is a tombstone peer and it should remove itself.
|
||||
#[prost(bool, tag = "6")]
|
||||
pub is_tombstone: bool,
|
||||
/// Region key range [start_key, end_key).
|
||||
/// Region key range \[start_key, end_key).
|
||||
#[prost(bytes = "vec", tag = "7")]
|
||||
pub start_key: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "vec", tag = "8")]
|
||||
|
|
|
@ -257,7 +257,7 @@ const ENGINE_LABEL_TIFLASH: &str = "tiflash";
|
|||
const ENGINE_LABEL_TIFLASH_COMPUTE: &str = "tiflash_compute";
|
||||
|
||||
fn is_valid_tikv_store(store: &metapb::Store) -> bool {
|
||||
if metapb::StoreState::from_i32(store.state).unwrap() == metapb::StoreState::Tombstone {
|
||||
if metapb::StoreState::try_from(store.state).unwrap() == metapb::StoreState::Tombstone {
|
||||
return false;
|
||||
}
|
||||
let is_tiflash = store.labels.iter().any(|label| {
|
||||
|
|
|
@ -61,7 +61,7 @@ macro_rules! impl_request {
|
|||
.map(|c| c.api_version)
|
||||
.unwrap_or_default();
|
||||
self.context = Some(context);
|
||||
self.set_api_version(kvrpcpb::ApiVersion::from_i32(api_version).unwrap());
|
||||
self.set_api_version(kvrpcpb::ApiVersion::try_from(api_version).unwrap());
|
||||
}
|
||||
|
||||
fn set_api_version(&mut self, api_version: kvrpcpb::ApiVersion) {
|
||||
|
|
|
@ -245,7 +245,7 @@ impl Buffer {
|
|||
}
|
||||
|
||||
pub(crate) fn mutate(&mut self, m: kvrpcpb::Mutation) {
|
||||
let op = kvrpcpb::Op::from_i32(m.op).unwrap();
|
||||
let op = kvrpcpb::Op::try_from(m.op).unwrap();
|
||||
match op {
|
||||
kvrpcpb::Op::Put => self.put(m.key.into(), m.value),
|
||||
kvrpcpb::Op::Del => self.delete(m.key.into()),
|
||||
|
|
|
@ -706,7 +706,7 @@ pub struct TransactionStatus {
|
|||
impl From<kvrpcpb::CheckTxnStatusResponse> for TransactionStatus {
|
||||
fn from(mut resp: kvrpcpb::CheckTxnStatusResponse) -> TransactionStatus {
|
||||
TransactionStatus {
|
||||
action: Action::from_i32(resp.action).unwrap(),
|
||||
action: Action::try_from(resp.action).unwrap(),
|
||||
kind: (resp.commit_version, resp.lock_ttl, resp.lock_info.take()).into(),
|
||||
is_expired: false,
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue