Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

txn: Error handling for pessimistic locks #332

Merged
merged 7 commits into from
Feb 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,4 +107,4 @@ jobs:
- name: start tiup playground
run: /home/runner/.tiup/bin/tiup playground nightly --mode tikv-slim --kv 3 --without-monitor --kv.config /home/runner/work/client-rust/client-rust/config/tikv.toml --pd.config /home/runner/work/client-rust/client-rust/config/pd.toml &
- name: integration test
run: make integration-test
run: MULTI_REGION=1 make integration-test
12 changes: 10 additions & 2 deletions src/raw/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::{
pd::{PdClient, PdRpcClient},
raw::lowering::*,
request::{Collect, CollectSingle, Plan},
BoundRange, ColumnFamily, Key, KvPair, Result, Value,
Backoff, BoundRange, ColumnFamily, Key, KvPair, Result, Value,
};

const MAX_RAW_KV_SCAN_LIMIT: u32 = 10240;
Expand Down Expand Up @@ -359,11 +359,19 @@ impl<PdC: PdClient> Client<PdC> {
/// # });
/// ```
pub async fn delete_range(&self, range: impl Into<BoundRange>) -> Result<()> {
self.delete_range_opt(range, DEFAULT_REGION_BACKOFF).await
}

pub async fn delete_range_opt(
&self,
range: impl Into<BoundRange>,
backoff: Backoff,
) -> Result<()> {
debug!(self.logger, "invoking raw delete_range request");
self.assert_non_atomic()?;
let request = new_raw_delete_range_request(range.into(), self.cf.clone());
let plan = crate::request::PlanBuilder::new(self.rpc.clone(), request)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.retry_multi_region(backoff)
.extract_error()
.plan();
plan.execute().await?;
Expand Down
47 changes: 39 additions & 8 deletions src/request/plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use crate::{
stats::tikv_stats,
store::RegionStore,
transaction::{resolve_locks, HasLocks},
util::iter::FlatMapOkIterExt,
Error, Result,
};

Expand Down Expand Up @@ -63,6 +64,11 @@ pub struct RetryableMultiRegion<P: Plan, PdC: PdClient> {
pub(super) inner: P,
pub pd_client: Arc<PdC>,
pub backoff: Backoff,

/// Preserve all regions' results for other downstream plans to handle.
/// If true, return Ok and preserve all regions' results, even if some of them are Err.
/// Otherwise, return the first Err if there is any.
pub preserve_region_results: bool,
}

impl<P: Plan + Shardable, PdC: PdClient> RetryableMultiRegion<P, PdC>
Expand All @@ -76,6 +82,7 @@ where
current_plan: P,
backoff: Backoff,
permits: Arc<Semaphore>,
preserve_region_results: bool,
) -> Result<<Self as Plan>::Result> {
let shards = current_plan.shards(&pd_client).collect::<Vec<_>>().await;
let mut handles = Vec::new();
Expand All @@ -89,16 +96,29 @@ where
region_store,
backoff.clone(),
permits.clone(),
preserve_region_results,
));
handles.push(handle);
}
Ok(try_join_all(handles)
.await?
.into_iter()
.collect::<Result<Vec<_>>>()?
.into_iter()
.flatten()
.collect())

let results = try_join_all(handles).await?;
if preserve_region_results {
Ok(results
.into_iter()
.flat_map_ok(|x| x)
.map(|x| match x {
Ok(r) => r,
Err(e) => Err(e),
})
.collect())
} else {
Ok(results
.into_iter()
.collect::<Result<Vec<_>>>()?
.into_iter()
.flatten()
.collect())
}
}

#[async_recursion]
Expand All @@ -108,6 +128,7 @@ where
region_store: RegionStore,
mut backoff: Backoff,
permits: Arc<Semaphore>,
preserve_region_results: bool,
) -> Result<<Self as Plan>::Result> {
// limit concurrent requests
let permit = permits.acquire().await.unwrap();
Expand All @@ -125,7 +146,14 @@ where
if !region_error_resolved {
futures_timer::Delay::new(duration).await;
}
Self::single_plan_handler(pd_client, plan, backoff, permits).await
Self::single_plan_handler(
pd_client,
plan,
backoff,
permits,
preserve_region_results,
)
.await
}
None => Err(Error::RegionError(e)),
}
Expand Down Expand Up @@ -242,6 +270,7 @@ impl<P: Plan, PdC: PdClient> Clone for RetryableMultiRegion<P, PdC> {
inner: self.inner.clone(),
pd_client: self.pd_client.clone(),
backoff: self.backoff.clone(),
preserve_region_results: self.preserve_region_results,
}
}
}
Expand All @@ -263,6 +292,7 @@ where
self.inner.clone(),
self.backoff.clone(),
concurrency_permits.clone(),
self.preserve_region_results,
)
.await
}
Expand Down Expand Up @@ -556,6 +586,7 @@ mod test {
},
pd_client: Arc::new(MockPdClient::default()),
backoff: Backoff::no_backoff(),
preserve_region_results: false,
};
assert!(plan.execute().await.is_err())
}
Expand Down
18 changes: 18 additions & 0 deletions src/request/plan_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,31 @@ where
pub fn retry_multi_region(
self,
backoff: Backoff,
) -> PlanBuilder<PdC, RetryableMultiRegion<P, PdC>, Targetted> {
self.make_retry_multi_region(backoff, false)
}

/// Preserve all results, even some of them are Err.
/// To pass all responses to merge, and handle partial successful results correctly.
pub fn retry_multi_region_preserve_results(
self,
backoff: Backoff,
) -> PlanBuilder<PdC, RetryableMultiRegion<P, PdC>, Targetted> {
self.make_retry_multi_region(backoff, true)
}

fn make_retry_multi_region(
self,
backoff: Backoff,
preserve_region_results: bool,
) -> PlanBuilder<PdC, RetryableMultiRegion<P, PdC>, Targetted> {
PlanBuilder {
pd_client: self.pd_client.clone(),
plan: RetryableMultiRegion {
inner: self.plan,
pd_client: self.pd_client,
backoff,
preserve_region_results,
},
phantom: PhantomData,
}
Expand Down
41 changes: 41 additions & 0 deletions src/transaction/buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,19 @@ impl Buffer {
}
}

/// Unlock the given key if locked.
pub fn unlock(&mut self, key: &Key) {
if let Some(value) = self.entry_map.get_mut(key) {
if let BufferEntry::Locked(v) = value {
if let Some(v) = v {
*value = BufferEntry::Cached(v.take());
} else {
self.entry_map.remove(key);
}
}
}
}

/// Put a value into the buffer (does not write through).
pub fn put(&mut self, key: Key, value: Value) {
let mut entry = self.entry_map.entry(key.clone());
Expand Down Expand Up @@ -485,6 +498,12 @@ mod tests {
};
}

macro_rules! assert_entry_none {
($key: ident) => {
assert!(matches!(buffer.entry_map.get(&$key), None,))
};
}

// Insert + Delete = CheckNotExists
let key: Key = b"key1".to_vec().into();
buffer.insert(key.clone(), b"value1".to_vec());
Expand All @@ -510,5 +529,27 @@ mod tests {
buffer.delete(key.clone());
buffer.insert(key.clone(), b"value1".to_vec());
assert_entry!(key, BufferEntry::Put(_));

// Lock + Unlock = None
let key: Key = b"key4".to_vec().into();
buffer.lock(key.clone());
buffer.unlock(&key);
assert_entry_none!(key);

// Cached + Lock + Unlock = Cached
let key: Key = b"key5".to_vec().into();
let val: Value = b"value5".to_vec();
let val_ = val.clone();
let r = block_on(buffer.get_or_else(key.clone(), move |_| ready(Ok(Some(val_)))));
assert_eq!(r.unwrap().unwrap(), val);
buffer.lock(key.clone());
buffer.unlock(&key);
assert_entry!(key, BufferEntry::Cached(Some(_)));
assert_eq!(
block_on(buffer.get_or_else(key, move |_| ready(Err(internal_err!("")))))
.unwrap()
.unwrap(),
val
);
}
}
Loading