Rollup merge of #93365 - nnethercote:more-arena-cleanups, r=oli-obk

More arena cleanups

A sequel to #90990.

r? `@oli-obk`
This commit is contained in:
Matthias Krüger 2022-01-27 22:32:30 +01:00 committed by GitHub
commit 0eb6753fc5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -45,24 +45,24 @@ pub struct TypedArena<T> {
end: Cell<*mut T>,
/// A vector of arena chunks.
chunks: RefCell<Vec<TypedArenaChunk<T>>>,
chunks: RefCell<Vec<ArenaChunk<T>>>,
/// Marker indicating that dropping the arena causes its owned
/// instances of `T` to be dropped.
_own: PhantomData<T>,
}
struct TypedArenaChunk<T> {
struct ArenaChunk<T = u8> {
/// The raw storage for the arena chunk.
storage: Box<[MaybeUninit<T>]>,
/// The number of valid entries in the chunk.
entries: usize,
}
impl<T> TypedArenaChunk<T> {
impl<T> ArenaChunk<T> {
#[inline]
unsafe fn new(capacity: usize) -> TypedArenaChunk<T> {
TypedArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 }
unsafe fn new(capacity: usize) -> ArenaChunk<T> {
ArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 }
}
/// Destroys this arena chunk.
@ -125,6 +125,11 @@ impl<I, T> IterExt<T> for I
where
I: IntoIterator<Item = T>,
{
// This default collects into a `SmallVec` and then allocates by copying
// from it. The specializations below for types like `Vec` are more
// efficient, copying directly without the intermediate collecting step.
// This default could be made more efficient, like
// `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
#[inline]
default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
@ -139,7 +144,7 @@ impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it
// Move the content to the arena by copying and then forgetting it.
unsafe {
let start_ptr = arena.alloc_raw_slice(len);
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
@ -156,7 +161,7 @@ impl<T> IterExt<T> for Vec<T> {
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it
// Move the content to the arena by copying and then forgetting it.
unsafe {
let start_ptr = arena.alloc_raw_slice(len);
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
@ -173,7 +178,7 @@ impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it
// Move the content to the arena by copying and then forgetting it.
unsafe {
let start_ptr = arena.alloc_raw_slice(len);
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
@ -272,7 +277,7 @@ impl<T> TypedArena<T> {
// Also ensure that this chunk can fit `additional`.
new_cap = cmp::max(additional, new_cap);
let mut chunk = TypedArenaChunk::<T>::new(new_cap);
let mut chunk = ArenaChunk::<T>::new(new_cap);
self.ptr.set(chunk.start());
self.end.set(chunk.end());
chunks.push(chunk);
@ -281,7 +286,7 @@ impl<T> TypedArena<T> {
// Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
// chunks.
fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) {
fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
// Determine how much was filled.
let start = last_chunk.start() as usize;
// We obtain the value of the pointer to the first uninitialized element.
@ -340,7 +345,7 @@ pub struct DroplessArena {
end: Cell<*mut u8>,
/// A vector of arena chunks.
chunks: RefCell<Vec<TypedArenaChunk<u8>>>,
chunks: RefCell<Vec<ArenaChunk>>,
}
unsafe impl Send for DroplessArena {}
@ -378,7 +383,7 @@ impl DroplessArena {
// Also ensure that this chunk can fit `additional`.
new_cap = cmp::max(additional, new_cap);
let mut chunk = TypedArenaChunk::<u8>::new(new_cap);
let mut chunk = ArenaChunk::new(new_cap);
self.start.set(chunk.start());
self.end.set(chunk.end());
chunks.push(chunk);
@ -520,10 +525,19 @@ impl DroplessArena {
}
}
// Declare an `Arena` containing one dropless arena and many typed arenas (the
// types of the typed arenas are specified by the arguments). The dropless
// arena will be used for any types that impl `Copy`, and also for any of the
// specified types that satisfy `!mem::needs_drop`.
/// Declare an `Arena` containing one dropless arena and many typed arenas (the
/// types of the typed arenas are specified by the arguments).
///
/// There are three cases of interest.
/// - Types that are `Copy`: these need not be specified in the arguments. They
/// will use the `DroplessArena`.
/// - Types that are `!Copy` and `!Drop`: these must be specified in the
/// arguments. An empty `TypedArena` will be created for each one, but the
/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
/// This is odd but harmless, because an empty arena allocates no memory.
/// - Types that are `!Copy` and `Drop`: these must be specified in the
/// arguments. The `TypedArena` will be used for them.
///
#[rustc_macro_transparency = "semitransparent"]
pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
#[derive(Default)]
@ -532,7 +546,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
$($name: $crate::TypedArena<$ty>,)*
}
pub trait ArenaAllocatable<'tcx, T = Self>: Sized {
pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self;
fn allocate_from_iter<'a>(
arena: &'a Arena<'tcx>,
@ -541,7 +555,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
}
// Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
impl<'tcx, T: Copy> ArenaAllocatable<'tcx, ()> for T {
impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
#[inline]
fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
arena.dropless.alloc(self)
@ -555,7 +569,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
}
}
$(
impl<'tcx> ArenaAllocatable<'tcx, $ty> for $ty {
impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
#[inline]
fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
if !::std::mem::needs_drop::<Self>() {
@ -581,7 +595,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
impl<'tcx> Arena<'tcx> {
#[inline]
pub fn alloc<T: ArenaAllocatable<'tcx, U>, U>(&self, value: T) -> &mut T {
pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&self, value: T) -> &mut T {
value.allocate_on(self)
}
@ -594,7 +608,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
self.dropless.alloc_slice(value)
}
pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, U>, U>(
pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>(
&'a self,
iter: impl ::std::iter::IntoIterator<Item = T>,
) -> &'a mut [T] {
@ -603,5 +617,10 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
}
}
// Marker types that let us give different behaviour for arenas allocating
// `Copy` types vs `!Copy` types.
pub struct IsCopy;
pub struct IsNotCopy;
#[cfg(test)]
mod tests;