Skip to content

Commit 816383c

Browse files
committed
Remove the TypedArena::alloc_from_iter specialization.
It was added in #78569. It's complicated and doesn't actually help performance. Also, add a comment explaining why the two `alloc_from_iter` functions are so different.
1 parent e0d7ed1 commit 816383c

File tree

1 file changed

+45
-90
lines changed
  • compiler/rustc_arena/src

1 file changed

+45
-90
lines changed

compiler/rustc_arena/src/lib.rs

+45-90
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
#![feature(dropck_eyepatch)]
1616
#![feature(new_uninit)]
1717
#![feature(maybe_uninit_slice)]
18-
#![feature(min_specialization)]
1918
#![feature(decl_macro)]
2019
#![feature(pointer_byte_offsets)]
2120
#![feature(rustc_attrs)]
@@ -44,23 +43,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
4443
f()
4544
}
4645

47-
/// An arena that can hold objects of only one type.
48-
pub struct TypedArena<T> {
49-
/// A pointer to the next object to be allocated.
50-
ptr: Cell<*mut T>,
51-
52-
/// A pointer to the end of the allocated area. When this pointer is
53-
/// reached, a new chunk is allocated.
54-
end: Cell<*mut T>,
55-
56-
/// A vector of arena chunks.
57-
chunks: RefCell<Vec<ArenaChunk<T>>>,
58-
59-
/// Marker indicating that dropping the arena causes its owned
60-
/// instances of `T` to be dropped.
61-
_own: PhantomData<T>,
62-
}
63-
6446
struct ArenaChunk<T = u8> {
6547
/// The raw storage for the arena chunk.
6648
storage: NonNull<[MaybeUninit<T>]>,
@@ -130,6 +112,23 @@ impl<T> ArenaChunk<T> {
130112
const PAGE: usize = 4096;
131113
const HUGE_PAGE: usize = 2 * 1024 * 1024;
132114

115+
/// An arena that can hold objects of only one type.
116+
pub struct TypedArena<T> {
117+
/// A pointer to the next object to be allocated.
118+
ptr: Cell<*mut T>,
119+
120+
/// A pointer to the end of the allocated area. When this pointer is
121+
/// reached, a new chunk is allocated.
122+
end: Cell<*mut T>,
123+
124+
/// A vector of arena chunks.
125+
chunks: RefCell<Vec<ArenaChunk<T>>>,
126+
127+
/// Marker indicating that dropping the arena causes its owned
128+
/// instances of `T` to be dropped.
129+
_own: PhantomData<T>,
130+
}
131+
133132
impl<T> Default for TypedArena<T> {
134133
/// Creates a new `TypedArena`.
135134
fn default() -> TypedArena<T> {
@@ -144,77 +143,6 @@ impl<T> Default for TypedArena<T> {
144143
}
145144
}
146145

147-
trait IterExt<T> {
148-
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
149-
}
150-
151-
impl<I, T> IterExt<T> for I
152-
where
153-
I: IntoIterator<Item = T>,
154-
{
155-
// This default collects into a `SmallVec` and then allocates by copying
156-
// from it. The specializations below for types like `Vec` are more
157-
// efficient, copying directly without the intermediate collecting step.
158-
// This default could be made more efficient, like
159-
// `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
160-
#[inline]
161-
default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
162-
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
163-
vec.alloc_from_iter(arena)
164-
}
165-
}
166-
167-
impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
168-
#[inline]
169-
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
170-
let len = self.len();
171-
if len == 0 {
172-
return &mut [];
173-
}
174-
// Move the content to the arena by copying and then forgetting it.
175-
let start_ptr = arena.alloc_raw_slice(len);
176-
unsafe {
177-
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
178-
mem::forget(self);
179-
slice::from_raw_parts_mut(start_ptr, len)
180-
}
181-
}
182-
}
183-
184-
impl<T> IterExt<T> for Vec<T> {
185-
#[inline]
186-
fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
187-
let len = self.len();
188-
if len == 0 {
189-
return &mut [];
190-
}
191-
// Move the content to the arena by copying and then forgetting it.
192-
let start_ptr = arena.alloc_raw_slice(len);
193-
unsafe {
194-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
195-
self.set_len(0);
196-
slice::from_raw_parts_mut(start_ptr, len)
197-
}
198-
}
199-
}
200-
201-
impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
202-
#[inline]
203-
fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
204-
let len = self.len();
205-
if len == 0 {
206-
return &mut [];
207-
}
208-
// Move the content to the arena by copying and then forgetting it.
209-
let start_ptr = arena.alloc_raw_slice(len);
210-
unsafe {
211-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
212-
self.set_len(0);
213-
slice::from_raw_parts_mut(start_ptr, len)
214-
}
215-
}
216-
}
217-
218146
impl<T> TypedArena<T> {
219147
/// Allocates an object in the `TypedArena`, returning a reference to it.
220148
#[inline]
@@ -270,8 +198,35 @@ impl<T> TypedArena<T> {
270198

271199
#[inline]
272200
pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
201+
// This implementation is entirely separate to
202+
// `DroplessIterator::alloc_from_iter`, even though conceptually they
203+
// are the same.
204+
//
205+
// `DroplessIterator` (in the fast case) writes elements from the
206+
// iterator one at a time into the allocated memory. That's easy
207+
// because the elements don't implement `Drop`. But for `TypedArena`
208+
// they do implement `Drop`, which means that if the iterator panics we
209+
// could end up with some allocated-but-uninitialized elements, which
210+
// will then cause UB in `TypedArena::drop`.
211+
//
212+
// Instead we use an approach where any iterator panic will occur
213+
// before the memory is allocated. This function is much less hot than
214+
// `DroplessArena::alloc_from_iter`, so it doesn't need to be
215+
// hyper-optimized.
273216
assert!(mem::size_of::<T>() != 0);
274-
iter.alloc_from_iter(self)
217+
218+
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
219+
if vec.is_empty() {
220+
return &mut [];
221+
}
222+
// Move the content to the arena by copying and then forgetting it.
223+
let len = vec.len();
224+
let start_ptr = self.alloc_raw_slice(len);
225+
unsafe {
226+
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
227+
vec.set_len(0);
228+
slice::from_raw_parts_mut(start_ptr, len)
229+
}
275230
}
276231

277232
/// Grows the arena.

0 commit comments

Comments
 (0)