mirror of https://github.com/tikv/client-rust.git
Merge pull request #252 from ekexium/batch-get-for-update
Fix batch_get_for_update
This commit is contained in:
commit
4870985dcd
|
@ -10,8 +10,9 @@ use tikv_client_store::{HasError, Request};
|
|||
|
||||
pub use self::{
|
||||
plan::{
|
||||
Collect, CollectError, DefaultProcessor, Dispatch, ExtractError, Merge, MergeResponse,
|
||||
MultiRegion, Plan, Process, ProcessResponse, ResolveLock, RetryRegion,
|
||||
Collect, CollectAndMatchKey, CollectError, DefaultProcessor, Dispatch, ExtractError,
|
||||
HasKeys, Merge, MergeResponse, MultiRegion, Plan, PreserveKey, Process, ProcessResponse,
|
||||
ResolveLock, RetryRegion,
|
||||
},
|
||||
plan_builder::{PlanBuilder, SingleKey},
|
||||
shard::Shardable,
|
||||
|
|
|
@ -6,11 +6,13 @@ use crate::{
|
|||
request::{KvRequest, Shardable},
|
||||
stats::tikv_stats,
|
||||
transaction::{resolve_locks, HasLocks},
|
||||
Error, Result,
|
||||
util::iter::FlatMapOkIterExt,
|
||||
Error, Key, KvPair, Result, Value,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use futures::{prelude::*, stream::StreamExt};
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
use tikv_client_proto::kvrpcpb;
|
||||
use tikv_client_store::{HasError, HasRegionError, KvClient};
|
||||
|
||||
/// A plan for how to execute a request. A user builds up a plan with various
|
||||
|
@ -55,6 +57,12 @@ impl<Req: KvRequest> Plan for Dispatch<Req> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<Req: KvRequest + HasKeys> HasKeys for Dispatch<Req> {
|
||||
fn get_keys(&self) -> Vec<Key> {
|
||||
self.request.get_keys()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MultiRegion<P: Plan, PdC: PdClient> {
|
||||
pub(super) inner: P,
|
||||
pub pd_client: Arc<PdC>,
|
||||
|
@ -123,6 +131,12 @@ impl<In: Clone + Send + Sync + 'static, P: Plan<Result = Vec<Result<In>>>, M: Me
|
|||
#[derive(Clone, Copy)]
|
||||
pub struct Collect;
|
||||
|
||||
/// A merge strategy to be used with
|
||||
/// [`preserve_keys`](super::plan_builder::PlanBuilder::preserve_keys).
|
||||
/// It matches the keys preserved before and the values returned in the response.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CollectAndMatchKey;
|
||||
|
||||
/// A merge strategy which returns an error if any response is an error and
|
||||
/// otherwise returns a Vec of the results.
|
||||
#[derive(Clone, Copy)]
|
||||
|
@ -256,6 +270,17 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
impl<P: Plan + HasKeys, PdC: PdClient> HasKeys for ResolveLock<P, PdC> {
|
||||
fn get_keys(&self) -> Vec<Key> {
|
||||
self.inner.get_keys()
|
||||
}
|
||||
}
|
||||
|
||||
/// When executed, the plan extracts errors from its inner plan, and
|
||||
/// returns an `Err` wrapping the error.
|
||||
///
|
||||
/// The errors come from two places: `Err` from inner plans, and `Ok(response)`
|
||||
/// where `response` contains unresolved errors (`error` and `region_error`).
|
||||
pub struct ExtractError<P: Plan> {
|
||||
pub inner: P,
|
||||
}
|
||||
|
@ -268,11 +293,6 @@ impl<P: Plan> Clone for ExtractError<P> {
|
|||
}
|
||||
}
|
||||
|
||||
/// When executed, the plan extracts errors from its inner plan, and
|
||||
/// returns an `Err` wrapping the error.
|
||||
///
|
||||
/// The errors come from two places: `Err` from inner plans, and `Ok(response)`
|
||||
/// where `response` contains unresolved errors (`error` and `region_error`).
|
||||
#[async_trait]
|
||||
impl<P: Plan> Plan for ExtractError<P>
|
||||
where
|
||||
|
@ -292,6 +312,98 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// When executed, the plan clones the keys and execute its inner plan, then
|
||||
/// returns `(keys, response)`.
|
||||
///
|
||||
/// It's useful when the information of keys are lost in the response but needed
|
||||
/// for processing.
|
||||
pub struct PreserveKey<P: Plan + HasKeys> {
|
||||
pub inner: P,
|
||||
}
|
||||
|
||||
impl<P: Plan + HasKeys> Clone for PreserveKey<P> {
|
||||
fn clone(&self) -> Self {
|
||||
PreserveKey {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<P> Plan for PreserveKey<P>
|
||||
where
|
||||
P: Plan + HasKeys,
|
||||
{
|
||||
type Result = ResponseAndKeys<P::Result>;
|
||||
|
||||
async fn execute(&self) -> Result<Self::Result> {
|
||||
let keys = self.inner.get_keys();
|
||||
let res = self.inner.execute().await?;
|
||||
Ok(ResponseAndKeys(res, keys))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HasKeys {
|
||||
fn get_keys(&self) -> Vec<Key>;
|
||||
}
|
||||
|
||||
// contains a response and the corresponding keys
|
||||
// currently only used for matching keys and values in pessimistic lock requests
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ResponseAndKeys<Resp>(Resp, Vec<Key>);
|
||||
|
||||
impl<Resp: HasError> HasError for ResponseAndKeys<Resp> {
|
||||
fn error(&mut self) -> Option<Error> {
|
||||
self.0.error()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Resp: HasLocks> HasLocks for ResponseAndKeys<Resp> {
|
||||
fn take_locks(&mut self) -> Vec<tikv_client_proto::kvrpcpb::LockInfo> {
|
||||
self.0.take_locks()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Resp: HasRegionError> HasRegionError for ResponseAndKeys<Resp> {
|
||||
fn region_error(&mut self) -> Option<Error> {
|
||||
self.0.region_error()
|
||||
}
|
||||
}
|
||||
|
||||
impl Merge<ResponseAndKeys<kvrpcpb::PessimisticLockResponse>> for CollectAndMatchKey {
|
||||
type Out = Vec<KvPair>;
|
||||
|
||||
fn merge(
|
||||
&self,
|
||||
input: Vec<Result<ResponseAndKeys<kvrpcpb::PessimisticLockResponse>>>,
|
||||
) -> Result<Self::Out> {
|
||||
input
|
||||
.into_iter()
|
||||
.flat_map_ok(|ResponseAndKeys(mut resp, keys)| {
|
||||
let values = resp.take_values();
|
||||
let not_founds = resp.take_not_founds();
|
||||
let v: Vec<_> = if not_founds.is_empty() {
|
||||
// Legacy TiKV does not distiguish not existing key and existing key
|
||||
// that with empty value. We assume that key does not exist if value
|
||||
// is empty.
|
||||
let values: Vec<Value> = values.into_iter().filter(|v| v.is_empty()).collect();
|
||||
keys.into_iter().zip(values).map(From::from).collect()
|
||||
} else {
|
||||
assert_eq!(values.len(), not_founds.len());
|
||||
let values: Vec<Value> = values
|
||||
.into_iter()
|
||||
.zip(not_founds.into_iter())
|
||||
.filter_map(|(v, not_found)| if not_found { None } else { Some(v) })
|
||||
.collect();
|
||||
keys.into_iter().zip(values).map(From::from).collect()
|
||||
};
|
||||
// FIXME sucks to collect and re-iterate, but the iterators have different types
|
||||
v.into_iter()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
|
||||
|
||||
use super::PreserveKey;
|
||||
use crate::{
|
||||
backoff::Backoff,
|
||||
pd::PdClient,
|
||||
request::{
|
||||
DefaultProcessor, Dispatch, ExtractError, KvRequest, Merge, MergeResponse, MultiRegion,
|
||||
Plan, Process, ProcessResponse, ResolveLock, RetryRegion, Shardable,
|
||||
DefaultProcessor, Dispatch, ExtractError, HasKeys, KvRequest, Merge, MergeResponse,
|
||||
MultiRegion, Plan, Process, ProcessResponse, ResolveLock, RetryRegion, Shardable,
|
||||
},
|
||||
store::Store,
|
||||
transaction::HasLocks,
|
||||
|
@ -161,6 +162,19 @@ impl<PdC: PdClient, R: KvRequest> PlanBuilder<PdC, Dispatch<R>, NoTarget> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<PdC: PdClient, P: Plan + HasKeys> PlanBuilder<PdC, P, NoTarget>
|
||||
where
|
||||
P::Result: HasError,
|
||||
{
|
||||
pub fn preserve_keys(self) -> PlanBuilder<PdC, PreserveKey<P>, NoTarget> {
|
||||
PlanBuilder {
|
||||
pd_client: self.pd_client.clone(),
|
||||
plan: PreserveKey { inner: self.plan },
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<PdC: PdClient, P: Plan> PlanBuilder<PdC, P, Targetted>
|
||||
where
|
||||
P::Result: HasError,
|
||||
|
|
|
@ -2,13 +2,30 @@
|
|||
|
||||
use crate::{
|
||||
pd::PdClient,
|
||||
request::{Dispatch, KvRequest, Plan, ResolveLock, RetryRegion},
|
||||
request::{Dispatch, HasKeys, KvRequest, Plan, PreserveKey, ResolveLock, RetryRegion},
|
||||
store::Store,
|
||||
Result,
|
||||
};
|
||||
use futures::stream::BoxStream;
|
||||
use std::sync::Arc;
|
||||
|
||||
macro_rules! impl_inner_shardable {
|
||||
() => {
|
||||
type Shard = P::Shard;
|
||||
|
||||
fn shards(
|
||||
&self,
|
||||
pd_client: &Arc<impl PdClient>,
|
||||
) -> BoxStream<'static, Result<(Self::Shard, Store)>> {
|
||||
self.inner.shards(pd_client)
|
||||
}
|
||||
|
||||
fn apply_shard(&mut self, shard: Self::Shard, store: &Store) -> Result<()> {
|
||||
self.inner.apply_shard(shard, store)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub trait Shardable {
|
||||
type Shard: Send;
|
||||
|
||||
|
@ -37,33 +54,15 @@ impl<Req: KvRequest + Shardable> Shardable for Dispatch<Req> {
|
|||
}
|
||||
|
||||
impl<P: Plan + Shardable, PdC: PdClient> Shardable for ResolveLock<P, PdC> {
|
||||
type Shard = P::Shard;
|
||||
impl_inner_shardable!();
|
||||
}
|
||||
|
||||
fn shards(
|
||||
&self,
|
||||
pd_client: &Arc<impl PdClient>,
|
||||
) -> BoxStream<'static, Result<(Self::Shard, Store)>> {
|
||||
self.inner.shards(pd_client)
|
||||
}
|
||||
|
||||
fn apply_shard(&mut self, shard: Self::Shard, store: &Store) -> Result<()> {
|
||||
self.inner.apply_shard(shard, store)
|
||||
}
|
||||
impl<P: Plan + HasKeys + Shardable> Shardable for PreserveKey<P> {
|
||||
impl_inner_shardable!();
|
||||
}
|
||||
|
||||
impl<P: Plan + Shardable, PdC: PdClient> Shardable for RetryRegion<P, PdC> {
|
||||
type Shard = P::Shard;
|
||||
|
||||
fn shards(
|
||||
&self,
|
||||
pd_client: &Arc<impl PdClient>,
|
||||
) -> BoxStream<'static, Result<(Self::Shard, Store)>> {
|
||||
self.inner.shards(pd_client)
|
||||
}
|
||||
|
||||
fn apply_shard(&mut self, shard: Self::Shard, store: &Store) -> Result<()> {
|
||||
self.inner.apply_shard(shard, store)
|
||||
}
|
||||
impl_inner_shardable!();
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
|
||||
use crate::{
|
||||
pd::PdClient,
|
||||
request::{Collect, DefaultProcessor, KvRequest, Merge, Process, Shardable, SingleKey},
|
||||
request::{
|
||||
Collect, DefaultProcessor, HasKeys, KvRequest, Merge, Process, Shardable, SingleKey,
|
||||
},
|
||||
store::{store_stream_for_keys, store_stream_for_range_by_start_key, Store},
|
||||
timestamp::TimestampExt,
|
||||
transaction::HasLocks,
|
||||
|
@ -359,6 +361,15 @@ impl Shardable for kvrpcpb::PessimisticLockRequest {
|
|||
}
|
||||
}
|
||||
|
||||
impl HasKeys for kvrpcpb::PessimisticLockRequest {
|
||||
fn get_keys(&self) -> Vec<Key> {
|
||||
self.mutations
|
||||
.iter()
|
||||
.map(|m| m.key.clone().into())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Merge<kvrpcpb::PessimisticLockResponse> for Collect {
|
||||
// FIXME: PessimisticLockResponse only contains values.
|
||||
// We need to pair keys and values returned somewhere.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
use crate::{
|
||||
backoff::Backoff,
|
||||
pd::{PdClient, PdRpcClient},
|
||||
request::{Collect, CollectError, Plan, PlanBuilder, RetryOptions},
|
||||
request::{Collect, CollectAndMatchKey, CollectError, Plan, PlanBuilder, RetryOptions},
|
||||
timestamp::TimestampExt,
|
||||
transaction::{buffer::Buffer, lowering::*},
|
||||
BoundRange, Error, Key, KvPair, Result, Value,
|
||||
|
@ -27,7 +27,7 @@ use tokio::{sync::RwLock, time::Duration};
|
|||
/// and its mutations are readable for transactions with `start_ts` >= its `commit_ts`.
|
||||
///
|
||||
/// Mutations, or write operations made in a transaction are buffered locally and sent at the time of commit,
|
||||
/// except for pessimisitc locking.
|
||||
/// except for pessimistic locking.
|
||||
/// In pessimistic mode, all write operations or `xxx_for_update` operations will first acquire pessimistic locks in TiKV.
|
||||
/// A lock exists until the transaction is committed (in the first phase of 2PC) or rolled back, or it exceeds its Time To Live (TTL).
|
||||
///
|
||||
|
@ -126,8 +126,6 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
/// and the value is not cached in the local buffer.
|
||||
/// So normal `get`-like commands after `get_for_update` will not be influenced, they still read values at `start_ts`.
|
||||
///
|
||||
/// Different from `get`, this request does not distinguish between empty values and non-existent keys
|
||||
/// , i.e. querying non-existent keys will result in empty values.
|
||||
///
|
||||
/// It can only be used in pessimistic mode.
|
||||
///
|
||||
|
@ -150,10 +148,12 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
if !self.is_pessimistic() {
|
||||
Err(Error::InvalidTransactionType)
|
||||
} else {
|
||||
let key = key.into();
|
||||
let mut values = self.pessimistic_lock(iter::once(key.clone()), true).await?;
|
||||
assert!(values.len() == 1);
|
||||
Ok(values.pop().unwrap())
|
||||
let mut pairs = self.pessimistic_lock(iter::once(key.into()), true).await?;
|
||||
debug_assert!(pairs.len() <= 1);
|
||||
match pairs.pop() {
|
||||
Some(pair) => Ok(Some(pair.1)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,13 +236,12 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
/// and the value is not cached in the local buffer.
|
||||
/// So normal `get`-like commands after `batch_get_for_update` will not be influenced, they still read values at `start_ts`.
|
||||
///
|
||||
/// Different from `batch_get`, this request does not distinguish between empty values and non-existent keys
|
||||
/// , i.e. querying non-existent keys will result in empty values.
|
||||
/// Non-existent entries will not appear in the result. The order of the keys is not retained in the result.
|
||||
///
|
||||
/// It can only be used in pessimistic mode.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```rust,no_run,compile_fail
|
||||
/// ```rust,no_run
|
||||
/// # use tikv_client::{Key, Value, Config, TransactionClient};
|
||||
/// # use futures::prelude::*;
|
||||
/// # use std::collections::HashMap;
|
||||
|
@ -261,21 +260,16 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
/// txn.commit().await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
// This is temporarily disabled because we cannot correctly match the keys and values.
|
||||
// See `impl KvRequest for kvrpcpb::PessimisticLockRequest` for details.
|
||||
#[allow(dead_code)]
|
||||
async fn batch_get_for_update(
|
||||
pub async fn batch_get_for_update(
|
||||
&mut self,
|
||||
keys: impl IntoIterator<Item = impl Into<Key>>,
|
||||
) -> Result<impl Iterator<Item = KvPair>> {
|
||||
self.check_allow_operation().await?;
|
||||
if !self.is_pessimistic() {
|
||||
Err(Error::InvalidTransactionType)
|
||||
} else {
|
||||
let keys: Vec<Key> = keys.into_iter().map(|it| it.into()).collect();
|
||||
self.pessimistic_lock(keys.clone(), false).await?;
|
||||
self.batch_get(keys).await
|
||||
return Err(Error::InvalidTransactionType);
|
||||
}
|
||||
let keys: Vec<Key> = keys.into_iter().map(|it| it.into()).collect();
|
||||
Ok(self.pessimistic_lock(keys, true).await?.into_iter())
|
||||
}
|
||||
|
||||
/// Create a new 'scan' request.
|
||||
|
@ -625,46 +619,50 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
.await
|
||||
}
|
||||
|
||||
/// Pessimistically lock the keys.
|
||||
/// Pessimistically lock the keys, and optionally retrieve corresponding values.
|
||||
/// If a key does not exist, the corresponding pair will not appear in the result.
|
||||
///
|
||||
/// Once resolved it acquires a lock on the key in TiKV.
|
||||
/// The lock prevents other transactions from mutating the entry until it is released.
|
||||
/// Once resolved it acquires locks on the keys in TiKV.
|
||||
/// A lock prevents other transactions from mutating the entry until it is released.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Only valid for pessimistic transactions, panics if called on an optimistic transaction.
|
||||
async fn pessimistic_lock(
|
||||
&mut self,
|
||||
keys: impl IntoIterator<Item = Key>,
|
||||
need_value: bool,
|
||||
) -> Result<Vec<Option<Value>>> {
|
||||
) -> Result<Vec<KvPair>> {
|
||||
assert!(
|
||||
matches!(self.options.kind, TransactionKind::Pessimistic(_)),
|
||||
"`pessimistic_lock` is only valid to use with pessimistic transactions"
|
||||
);
|
||||
|
||||
let keys: Vec<Key> = keys.into_iter().collect();
|
||||
if keys.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let first_key = keys[0].clone();
|
||||
let primary_lock = self.buffer.get_primary_key_or(&first_key).await;
|
||||
let lock_ttl = DEFAULT_LOCK_TTL;
|
||||
let for_update_ts = self.rpc.clone().get_timestamp().await?;
|
||||
self.options.push_for_update_ts(for_update_ts.clone());
|
||||
let request = new_pessimistic_lock_request(
|
||||
keys.clone().into_iter(),
|
||||
primary_lock,
|
||||
self.timestamp.clone(),
|
||||
lock_ttl,
|
||||
DEFAULT_LOCK_TTL,
|
||||
for_update_ts,
|
||||
need_value,
|
||||
);
|
||||
let plan = PlanBuilder::new(self.rpc.clone(), request)
|
||||
.resolve_lock(self.options.retry_options.lock_backoff.clone())
|
||||
.preserve_keys()
|
||||
.multi_region()
|
||||
.retry_region(self.options.retry_options.region_backoff.clone())
|
||||
.merge(Collect)
|
||||
.merge(CollectAndMatchKey)
|
||||
.plan();
|
||||
let values = plan
|
||||
.execute()
|
||||
.await
|
||||
.map(|r| r.into_iter().map(Into::into).collect());
|
||||
let pairs = plan.execute().await;
|
||||
|
||||
self.start_auto_heartbeat().await;
|
||||
|
||||
|
@ -672,7 +670,7 @@ impl<PdC: PdClient> Transaction<PdC> {
|
|||
self.buffer.lock(key).await;
|
||||
}
|
||||
|
||||
values
|
||||
pairs
|
||||
}
|
||||
|
||||
/// Checks if the transaction can perform arbitrary operations.
|
||||
|
|
|
@ -213,12 +213,15 @@ async fn raw_bank_transfer() -> Result<()> {
|
|||
/// Tests transactional API when there are multiple regions.
|
||||
/// Write large volumes of data to enforce region splitting.
|
||||
/// In order to test `scan`, data is uniformly inserted.
|
||||
// FIXME: this test is stupid. We should use pd-ctl or config files to make
|
||||
// multiple regions, instead of bulk writing.
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn txn_write_million() -> Result<()> {
|
||||
const NUM_BITS_TXN: u32 = 7;
|
||||
const NUM_BITS_KEY_PER_TXN: u32 = 3;
|
||||
const NUM_BITS_TXN: u32 = 13;
|
||||
const NUM_BITS_KEY_PER_TXN: u32 = 4;
|
||||
let interval = 2u32.pow(32 - NUM_BITS_TXN - NUM_BITS_KEY_PER_TXN);
|
||||
let value = "large_value".repeat(10);
|
||||
|
||||
clear_tikv().await;
|
||||
let client = TransactionClient::new(pd_addrs()).await?;
|
||||
|
@ -232,9 +235,9 @@ async fn txn_write_million() -> Result<()> {
|
|||
})
|
||||
.map(|u| u.to_be_bytes().to_vec())
|
||||
.take(2usize.pow(NUM_BITS_KEY_PER_TXN))
|
||||
.collect::<Vec<_>>(); // each txn puts 2 ^ 12 keys. 12 = 25 - 13
|
||||
.collect::<Vec<_>>();
|
||||
let mut txn = client.begin_optimistic().await?;
|
||||
for (k, v) in keys.iter().zip(iter::repeat(1u32.to_be_bytes().to_vec())) {
|
||||
for (k, v) in keys.iter().zip(iter::repeat(value.clone())) {
|
||||
txn.put(k.clone(), v).await?;
|
||||
}
|
||||
txn.commit().await?;
|
||||
|
@ -244,48 +247,71 @@ async fn txn_write_million() -> Result<()> {
|
|||
assert_eq!(res.count(), 2usize.pow(NUM_BITS_KEY_PER_TXN));
|
||||
txn.commit().await?;
|
||||
}
|
||||
/* FIXME: scan all keys will make the message size exceed its limit
|
||||
// test scan
|
||||
let limit = 2u32.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN + 2); // large enough
|
||||
let snapshot = client.snapshot(
|
||||
client.current_timestamp().await?,
|
||||
TransactionOptions::default(),
|
||||
);
|
||||
let res = snapshot.scan(vec![].., limit).await?;
|
||||
assert_eq!(res.count(), 2usize.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN));
|
||||
|
||||
// test scan
|
||||
let limit = 2u32.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN + 2); // large enough
|
||||
let snapshot = client.snapshot(
|
||||
client.current_timestamp().await?,
|
||||
TransactionOptions::default(),
|
||||
);
|
||||
let res = snapshot.scan(vec![].., limit).await?;
|
||||
assert_eq!(res.count(), 2usize.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN));
|
||||
// scan by small range and combine them
|
||||
let mut rng = thread_rng();
|
||||
let mut keys = gen_u32_keys(200, &mut rng)
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
|
||||
// scan by small range and combine them
|
||||
let mut rng = thread_rng();
|
||||
let mut keys = gen_u32_keys(10, &mut rng)
|
||||
.iter()
|
||||
.cloned()
|
||||
let mut sum = 0;
|
||||
|
||||
// empty key to key[0]
|
||||
let snapshot = client.snapshot(
|
||||
client.current_timestamp().await?,
|
||||
TransactionOptions::default(),
|
||||
);
|
||||
let res = snapshot.scan(vec![]..keys[0].clone(), limit).await?;
|
||||
sum += res.count();
|
||||
|
||||
// key[i] .. key[i+1]
|
||||
for i in 0..keys.len() - 1 {
|
||||
let res = snapshot
|
||||
.scan(keys[i].clone()..keys[i + 1].clone(), limit)
|
||||
.await?;
|
||||
sum += res.count();
|
||||
}
|
||||
|
||||
// keys[last] to unbounded
|
||||
let res = snapshot.scan(keys[keys.len() - 1].clone().., limit).await?;
|
||||
sum += res.count();
|
||||
|
||||
assert_eq!(sum, 2usize.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN));
|
||||
*/
|
||||
// test batch_get and batch_get_for_update
|
||||
const SKIP_BITS: u32 = 12; // do not retrieve all because there's a limit of message size
|
||||
let mut cur = 0u32;
|
||||
let keys = iter::repeat_with(|| {
|
||||
let v = cur;
|
||||
cur = cur.overflowing_add(interval * 2u32.pow(SKIP_BITS)).0;
|
||||
v
|
||||
})
|
||||
.map(|u| u.to_be_bytes().to_vec())
|
||||
.take(2usize.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN - SKIP_BITS))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut txn = client.begin_pessimistic().await?;
|
||||
let res = txn.batch_get(keys.clone()).await?.collect::<Vec<_>>();
|
||||
assert_eq!(res.len(), keys.len());
|
||||
|
||||
let res = txn
|
||||
.batch_get_for_update(keys.clone())
|
||||
.await?
|
||||
.collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
|
||||
let mut sum = 0;
|
||||
|
||||
// empty key to key[0]
|
||||
let snapshot = client.snapshot(
|
||||
client.current_timestamp().await?,
|
||||
TransactionOptions::default(),
|
||||
);
|
||||
let res = snapshot.scan(vec![]..keys[0].clone(), limit).await?;
|
||||
sum += res.count();
|
||||
|
||||
// key[i] .. key[i+1]
|
||||
for i in 0..keys.len() - 1 {
|
||||
let res = snapshot
|
||||
.scan(keys[i].clone()..keys[i + 1].clone(), limit)
|
||||
.await?;
|
||||
sum += res.count();
|
||||
}
|
||||
|
||||
// keys[last] to unbounded
|
||||
let res = snapshot.scan(keys[keys.len() - 1].clone().., limit).await?;
|
||||
sum += res.count();
|
||||
|
||||
assert_eq!(sum, 2usize.pow(NUM_BITS_KEY_PER_TXN + NUM_BITS_TXN));
|
||||
assert_eq!(res.len(), keys.len());
|
||||
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -298,7 +324,7 @@ async fn txn_bank_transfer() -> Result<()> {
|
|||
|
||||
let people = gen_u32_keys(NUM_PEOPLE, &mut rng);
|
||||
let mut txn = client
|
||||
.begin_with_options(TransactionOptions::new_optimistic().try_one_pc())
|
||||
.begin_with_options(TransactionOptions::new_optimistic())
|
||||
.await?;
|
||||
let mut sum: u32 = 0;
|
||||
for person in &people {
|
||||
|
@ -543,8 +569,7 @@ async fn raw_write_million() -> Result<()> {
|
|||
#[serial]
|
||||
async fn pessimistic_rollback() -> Result<()> {
|
||||
clear_tikv().await;
|
||||
let client =
|
||||
TransactionClient::new_with_config(vec!["127.0.0.1:2379"], Default::default()).await?;
|
||||
let client = TransactionClient::new_with_config(pd_addrs(), Default::default()).await?;
|
||||
let mut preload_txn = client.begin_optimistic().await?;
|
||||
let key1 = vec![1];
|
||||
let value = key1.clone();
|
||||
|
@ -575,8 +600,7 @@ async fn pessimistic_rollback() -> Result<()> {
|
|||
#[serial]
|
||||
async fn lock_keys() -> Result<()> {
|
||||
clear_tikv().await;
|
||||
let client =
|
||||
TransactionClient::new_with_config(vec!["127.0.0.1:2379"], Default::default()).await?;
|
||||
let client = TransactionClient::new_with_config(pd_addrs(), Default::default()).await?;
|
||||
|
||||
let k1 = b"key1".to_vec();
|
||||
let k2 = b"key2".to_vec();
|
||||
|
@ -606,6 +630,42 @@ async fn lock_keys() -> Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn get_for_update() -> Result<()> {
|
||||
clear_tikv().await;
|
||||
let client = TransactionClient::new_with_config(pd_addrs(), Default::default()).await?;
|
||||
let key1 = "key".to_owned();
|
||||
let key2 = "another key".to_owned();
|
||||
let value1 = b"some value".to_owned();
|
||||
let value2 = b"another value".to_owned();
|
||||
let keys = vec![key1.clone(), key2.clone()];
|
||||
|
||||
let mut t1 = client.begin_pessimistic().await?;
|
||||
let mut t2 = client.begin_pessimistic().await?;
|
||||
|
||||
let mut t0 = client.begin_pessimistic().await?;
|
||||
t0.put(key1.clone(), value1).await?;
|
||||
t0.put(key2.clone(), value2).await?;
|
||||
t0.commit().await?;
|
||||
|
||||
assert!(t1.get(key1.clone()).await?.is_none());
|
||||
assert!(t1.get_for_update(key1.clone()).await?.unwrap() == value1);
|
||||
t1.commit().await?;
|
||||
|
||||
assert!(t2.batch_get(keys.clone()).await?.collect::<Vec<_>>().len() == 0);
|
||||
let res: HashMap<_, _> = t2
|
||||
.batch_get_for_update(keys.clone())
|
||||
.await?
|
||||
.map(From::from)
|
||||
.collect();
|
||||
t2.commit().await?;
|
||||
assert!(res.get(&key1.into()).unwrap() == &value1);
|
||||
assert!(res.get(&key2.into()).unwrap() == &value2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn pessimistic_heartbeat() -> Result<()> {
|
||||
|
|
|
@ -99,8 +99,8 @@ impl Connection {
|
|||
let tso = TimestampOracle::new(id, &client)?;
|
||||
let cluster = Cluster {
|
||||
id,
|
||||
members,
|
||||
client,
|
||||
members,
|
||||
tso,
|
||||
};
|
||||
Ok(cluster)
|
||||
|
|
Loading…
Reference in New Issue