Skip to content

Commit

Permalink
Rollup merge of rust-lang#123803 - Sp00ph:shrink_to_fix, r=Mark-Simul…
Browse files Browse the repository at this point in the history
…acrum

Fix `VecDeque::shrink_to` UB when `handle_alloc_error` unwinds.

Fixes rust-lang#123369

For `VecDeque` it's relatively simple to restore the buffer into a consistent state so this PR does just that.

Note that with its current implementation, `shrink_to` may change the internal arrangement of elements in the buffer, so e.g. `[D, <uninit>, A, B, C]` will become `[<uninit>, A, B, C, D]` and `[<uninit>, <uninit>, A, B, C]` may become `[B, C, <uninit>, <uninit>, A]` if `shrink_to` unwinds. This shouldn't be an issue though as we don't make any guarantees about the stability of the internal buffer arrangement (and this case is impossible to hit on stable anyways).

This PR also includes a test with code adapted from rust-lang#123369 which fails without the new `shrink_to` code. Does this suffice or do we maybe need more exhaustive tests like in rust-lang#108475?

cc `@Amanieu`

`@rustbot` label +T-libs
  • Loading branch information
matthiaskrgr committed Apr 14, 2024
2 parents b2b4a2a + fb00ac6 commit 7f88868
Show file tree
Hide file tree
Showing 3 changed files with 120 additions and 2 deletions.
66 changes: 65 additions & 1 deletion library/alloc/src/collections/vec_deque/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -978,6 +978,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
// overflow.
let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
// Used in the drop guard below.
let old_head = self.head;

if self.len == 0 {
self.head = 0;
Expand Down Expand Up @@ -1030,12 +1032,74 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
self.head = new_head;
}
self.buf.shrink_to_fit(target_cap);

struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
old_head: usize,
target_cap: usize,
}

impl<T, A: Allocator> Drop for Guard<'_, T, A> {
#[cold]
fn drop(&mut self) {
unsafe {
// SAFETY: This is only called if `buf.shrink_to_fit` unwinds,
// which is the only time it's safe to call `abort_shrink`.
self.deque.abort_shrink(self.old_head, self.target_cap)
}
}
}

let guard = Guard { deque: self, old_head, target_cap };

guard.deque.buf.shrink_to_fit(target_cap);

// Don't drop the guard if we didn't unwind.
mem::forget(guard);

debug_assert!(self.head < self.capacity() || self.capacity() == 0);
debug_assert!(self.len <= self.capacity());
}

/// Reverts the deque back into a consistent state in case `shrink_to` failed.
/// This is necessary to prevent UB if the backing allocator returns an error
/// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369).
///
/// `old_head` refers to the head index before `shrink_to` was called. `target_cap`
/// is the capacity that it was trying to shrink to.
unsafe fn abort_shrink(&mut self, old_head: usize, target_cap: usize) {
// Moral equivalent of self.head + self.len <= target_cap. Won't overflow
// because `self.len <= target_cap`.
if self.head <= target_cap - self.len {
// The deque's buffer is contiguous, so no need to copy anything around.
return;
}

// `shrink_to` already copied the head to fit into the new capacity, so this won't overflow.
let head_len = target_cap - self.head;
// `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive.
let tail_len = self.len - head_len;

if tail_len <= cmp::min(head_len, self.capacity() - target_cap) {
// There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`),
// and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`).

unsafe {
// The old tail and the new tail can't overlap because the head slice lies between them. The
// head slice ends at `target_cap`, so that's where we copy to.
self.copy_nonoverlapping(0, target_cap, tail_len);
}
} else {
// Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail
// (and therefore hopefully cheaper to copy).
unsafe {
// The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here.
self.copy(self.head, old_head, head_len);
self.head = old_head;
}
}
}

/// Shortens the deque, keeping the first `len` elements and dropping
/// the rest.
///
Expand Down
55 changes: 54 additions & 1 deletion library/alloc/src/collections/vec_deque/tests.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
use core::iter::TrustedLen;
#![feature(alloc_error_hook)]

use crate::alloc::{AllocError, Layout};
use core::{iter::TrustedLen, ptr::NonNull};
use std::{
alloc::{set_alloc_error_hook, take_alloc_error_hook, System},
panic::{catch_unwind, AssertUnwindSafe},
};

use super::*;

Expand Down Expand Up @@ -790,6 +797,52 @@ fn test_shrink_to() {
}
}

#[test]
fn test_shrink_to_unwind() {
// This tests that `shrink_to` leaves the deque in a consistent state when
// the call to `RawVec::shrink_to_fit` unwinds. The code is adapted from #123369
// but changed to hopefully not have any UB even if the test fails.

struct BadAlloc;

unsafe impl Allocator for BadAlloc {
fn allocate(&self, l: Layout) -> Result<NonNull<[u8]>, AllocError> {
// We allocate zeroed here so that the whole buffer of the deque
// is always initialized. That way, even if the deque is left in
// an inconsistent state, no uninitialized memory should be accessed.
System.allocate_zeroed(l)
}

unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { System.deallocate(ptr, layout) }
}

unsafe fn shrink(
&self,
_ptr: NonNull<u8>,
_old_layout: Layout,
_new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Err(AllocError)
}
}

// preserve the old error hook just in case.
let old_error_hook = take_alloc_error_hook();
set_alloc_error_hook(|_| panic!("alloc error"));

let mut v = VecDeque::with_capacity_in(15, BadAlloc);
v.push_back(1);
v.push_front(2);
// This should unwind because it calls `BadAlloc::shrink` and then `handle_alloc_error` which unwinds.
assert!(catch_unwind(AssertUnwindSafe(|| v.shrink_to_fit())).is_err());
// This should only pass if the deque is left in a consistent state.
assert_eq!(v, [2, 1]);

// restore the old error hook.
set_alloc_error_hook(old_error_hook);
}

#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
Expand Down
1 change: 1 addition & 0 deletions library/alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@
// tidy-alphabetical-start
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![cfg_attr(test, feature(alloc_error_hook))]
#![cfg_attr(test, feature(is_sorted))]
#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
Expand Down

0 comments on commit 7f88868

Please sign in to comment.