bumpalo/lib.rs
1#![doc = include_str!("../README.md")]
2#![deny(missing_debug_implementations)]
3#![deny(missing_docs)]
4#![cfg_attr(not(feature = "std"), no_std)]
5#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
6
7#[doc(hidden)]
8pub extern crate alloc as core_alloc;
9
10#[cfg(feature = "boxed")]
11pub mod boxed;
12#[cfg(feature = "collections")]
13pub mod collections;
14
15mod alloc;
16
17use core::cell::Cell;
18use core::cmp::Ordering;
19use core::fmt::Display;
20use core::iter;
21use core::marker::PhantomData;
22use core::mem;
23use core::ptr::{self, NonNull};
24use core::slice;
25use core::str;
26use core_alloc::alloc::{alloc, dealloc, Layout};
27
28#[cfg(feature = "allocator_api")]
29use core_alloc::alloc::{AllocError, Allocator};
30
31#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
32use allocator_api2::alloc::{AllocError, Allocator};
33
34pub use alloc::AllocErr;
35
36/// An error returned from [`Bump::try_alloc_try_with`].
37#[derive(Clone, PartialEq, Eq, Debug)]
38pub enum AllocOrInitError<E> {
39 /// Indicates that the initial allocation failed.
40 Alloc(AllocErr),
41 /// Indicates that the initializer failed with the contained error after
42 /// allocation.
43 ///
44 /// It is possible but not guaranteed that the allocated memory has been
45 /// released back to the allocator at this point.
46 Init(E),
47}
48impl<E> From<AllocErr> for AllocOrInitError<E> {
49 fn from(e: AllocErr) -> Self {
50 Self::Alloc(e)
51 }
52}
53impl<E: Display> Display for AllocOrInitError<E> {
54 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
55 match self {
56 AllocOrInitError::Alloc(err) => err.fmt(f),
57 AllocOrInitError::Init(err) => write!(f, "initialization failed: {}", err),
58 }
59 }
60}
61
62/// An arena to bump allocate into.
63///
64/// ## No `Drop`s
65///
66/// Objects that are bump-allocated will never have their [`Drop`] implementation
67/// called — unless you do it manually yourself. This makes it relatively
68/// easy to leak memory or other resources.
69///
70/// If you have a type which internally manages
71///
72/// * an allocation from the global heap (e.g. [`Vec<T>`]),
73/// * open file descriptors (e.g. [`std::fs::File`]), or
74/// * any other resource that must be cleaned up (e.g. an `mmap`)
75///
76/// and relies on its `Drop` implementation to clean up the internal resource,
77/// then if you allocate that type with a `Bump`, you need to find a new way to
78/// clean up after it yourself.
79///
80/// Potential solutions are:
81///
82/// * Using [`bumpalo::boxed::Box::new_in`] instead of [`Bump::alloc`], that
83/// will drop wrapped values similarly to [`std::boxed::Box`]. Note that this
84/// requires enabling the `"boxed"` Cargo feature for this crate. **This is
85/// often the easiest solution.**
86///
87/// * Calling [`drop_in_place`][drop_in_place] or using
88/// [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types.
89///
90/// * Using [`bumpalo::collections::Vec`] instead of [`std::vec::Vec`].
91///
92/// * Avoiding allocating these problematic types within a `Bump`.
93///
94/// Note that not calling `Drop` is memory safe! Destructors are never
95/// guaranteed to run in Rust, you can't rely on them for enforcing memory
96/// safety.
97///
98/// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
99/// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
100/// [`std::fs::File`]: https://doc.rust-lang.org/std/fs/struct.File.html
101/// [drop_in_place]: https://doc.rust-lang.org/std/ptr/fn.drop_in_place.html
102/// [manuallydrop]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html
103/// [`bumpalo::collections::Vec`]: collections/vec/struct.Vec.html
104/// [`std::vec::Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
105/// [`bumpalo::boxed::Box::new_in`]: boxed/struct.Box.html#method.new_in
106/// [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html
107///
108/// ## Example
109///
110/// ```
111/// use bumpalo::Bump;
112///
113/// // Create a new bump arena.
114/// let bump = Bump::new();
115///
116/// // Allocate values into the arena.
117/// let forty_two = bump.alloc(42);
118/// assert_eq!(*forty_two, 42);
119///
120/// // Mutable references are returned from allocation.
121/// let mut s = bump.alloc("bumpalo");
122/// *s = "the bump allocator; and also is a buffalo";
123/// ```
124///
125/// ## Allocation Methods Come in Many Flavors
126///
127/// There are various allocation methods on `Bump`, the simplest being
128/// [`alloc`][Bump::alloc]. The others exist to satisfy some combination of
129/// fallible allocation and initialization. The allocation methods are
130/// summarized in the following table:
131///
132/// <table>
133/// <thead>
134/// <tr>
135/// <th></th>
136/// <th>Infallible Allocation</th>
137/// <th>Fallible Allocation</th>
138/// </tr>
139/// </thead>
140/// <tr>
141/// <th>By Value</th>
142/// <td><a href="#method.alloc"><code>alloc</code></a></td>
143/// <td><a href="#method.try_alloc"><code>try_alloc</code></a></td>
144/// </tr>
145/// <tr>
146/// <th>Infallible Initializer Function</th>
147/// <td><a href="#method.alloc_with"><code>alloc_with</code></a></td>
148/// <td><a href="#method.try_alloc_with"><code>try_alloc_with</code></a></td>
149/// </tr>
150/// <tr>
151/// <th>Fallible Initializer Function</th>
152/// <td><a href="#method.alloc_try_with"><code>alloc_try_with</code></a></td>
153/// <td><a href="#method.try_alloc_try_with"><code>try_alloc_try_with</code></a></td>
154/// </tr>
155/// <tbody>
156/// </tbody>
157/// </table>
158///
159/// ### Fallible Allocation: The `try_alloc_` Method Prefix
160///
161/// These allocation methods let you recover from out-of-memory (OOM)
162/// scenarios, rather than raising a panic on OOM.
163///
164/// ```
165/// use bumpalo::Bump;
166///
167/// let bump = Bump::new();
168///
169/// match bump.try_alloc(MyStruct {
170/// // ...
171/// }) {
172/// Ok(my_struct) => {
173/// // Allocation succeeded.
174/// }
175/// Err(e) => {
176/// // Out of memory.
177/// }
178/// }
179///
180/// struct MyStruct {
181/// // ...
182/// }
183/// ```
184///
185/// ### Initializer Functions: The `_with` Method Suffix
186///
187/// Calling one of the generic `…alloc(x)` methods is essentially equivalent to
188/// the matching [`…alloc_with(|| x)`](?search=alloc_with). However if you use
189/// `…alloc_with`, then the closure will not be invoked until after allocating
190/// space for storing `x` on the heap.
191///
192/// This can be useful in certain edge-cases related to compiler optimizations.
193/// When evaluating for example `bump.alloc(x)`, semantically `x` is first put
194/// on the stack and then moved onto the heap. In some cases, the compiler is
195/// able to optimize this into constructing `x` directly on the heap, however
196/// in many cases it does not.
197///
198/// The `…alloc_with` functions try to help the compiler be smarter. In most
199/// cases doing for example `bump.try_alloc_with(|| x)` on release mode will be
200/// enough to help the compiler realize that this optimization is valid and
201/// to construct `x` directly onto the heap.
202///
203/// #### Warning
204///
205/// These functions critically depend on compiler optimizations to achieve their
206/// desired effect. This means that it is not an effective tool when compiling
207/// without optimizations on.
208///
209/// Even when optimizations are on, these functions do not **guarantee** that
210/// the value is constructed on the heap. To the best of our knowledge no such
211/// guarantee can be made in stable Rust as of 1.54.
212///
213/// ### Fallible Initialization: The `_try_with` Method Suffix
214///
215/// The generic [`…alloc_try_with(|| x)`](?search=_try_with) methods behave
216/// like the purely `_with` suffixed methods explained above. However, they
217/// allow for fallible initialization by accepting a closure that returns a
218/// [`Result`] and will attempt to undo the initial allocation if this closure
219/// returns [`Err`].
220///
221/// #### Warning
222///
223/// If the inner closure returns [`Ok`], space for the entire [`Result`] remains
224/// allocated inside `self`. This can be a problem especially if the [`Err`]
225/// variant is larger, but even otherwise there may be overhead for the
226/// [`Result`]'s discriminant.
227///
228/// <p><details><summary>Undoing the allocation in the <code>Err</code> case
229/// always fails if <code>f</code> successfully made any additional allocations
230/// in <code>self</code>.</summary>
231///
232/// For example, the following will always leak also space for the [`Result`]
233/// into this `Bump`, even though the inner reference isn't kept and the [`Err`]
234/// payload is returned semantically by value:
235///
236/// ```rust
237/// let bump = bumpalo::Bump::new();
238///
239/// let r: Result<&mut [u8; 1000], ()> = bump.alloc_try_with(|| {
240/// let _ = bump.alloc(0_u8);
241/// Err(())
242/// });
243///
244/// assert!(r.is_err());
245/// ```
246///
247///</details></p>
248///
249/// Since [`Err`] payloads are first placed on the heap and then moved to the
250/// stack, `bump.…alloc_try_with(|| x)?` is likely to execute more slowly than
251/// the matching `bump.…alloc(x?)` in case of initialization failure. If this
252/// happens frequently, using the plain un-suffixed method may perform better.
253///
254/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
255/// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
256/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
257///
258/// ### `Bump` Allocation Limits
259///
260/// `bumpalo` supports setting a limit on the maximum bytes of memory that can
261/// be allocated for use in a particular `Bump` arena. This limit can be set and removed with
262/// [`set_allocation_limit`][Bump::set_allocation_limit].
263/// The allocation limit is only enforced when allocating new backing chunks for
264/// a `Bump`. Updating the allocation limit will not affect existing allocations
265/// or any future allocations within the `Bump`'s current chunk.
266///
267/// #### Example
268///
269/// ```
270/// let bump = bumpalo::Bump::new();
271///
272/// assert_eq!(bump.allocation_limit(), None);
273/// bump.set_allocation_limit(Some(0));
274///
275/// assert!(bump.try_alloc(5).is_err());
276///
277/// bump.set_allocation_limit(Some(6));
278///
279/// assert_eq!(bump.allocation_limit(), Some(6));
280///
281/// bump.set_allocation_limit(None);
282///
283/// assert_eq!(bump.allocation_limit(), None);
284/// ```
285///
286/// #### Warning
287///
288/// Because of backwards compatibility, allocations that fail
289/// due to allocation limits will not present differently than
290/// errors due to resource exhaustion.
291#[derive(Debug)]
292pub struct Bump<const MIN_ALIGN: usize = 1> {
293 // The current chunk we are bump allocating within.
294 current_chunk_footer: Cell<NonNull<ChunkFooter>>,
295 allocation_limit: Cell<Option<usize>>,
296}
297
298#[repr(C)]
299#[derive(Debug)]
300struct ChunkFooter {
301 // Pointer to the start of this chunk allocation. This footer is always at
302 // the end of the chunk.
303 data: NonNull<u8>,
304
305 // The layout of this chunk's allocation.
306 layout: Layout,
307
308 // Link to the previous chunk.
309 //
310 // Note that the last node in the `prev` linked list is the canonical empty
311 // chunk, whose `prev` link points to itself.
312 prev: Cell<NonNull<ChunkFooter>>,
313
314 // Bump allocation finger that is always in the range `self.data..=self`.
315 ptr: Cell<NonNull<u8>>,
316
317 // The bytes allocated in all chunks so far, the canonical empty chunk has
318 // a size of 0 and for all other chunks, `allocated_bytes` will be
319 // the allocated_bytes of the current chunk plus the allocated bytes
320 // of the `prev` chunk.
321 allocated_bytes: usize,
322}
323
324/// A wrapper type for the canonical, statically allocated empty chunk.
325///
326/// For the canonical empty chunk to be `static`, its type must be `Sync`, which
327/// is the purpose of this wrapper type. This is safe because the empty chunk is
328/// immutable and never actually modified.
329#[repr(transparent)]
330struct EmptyChunkFooter(ChunkFooter);
331
332unsafe impl Sync for EmptyChunkFooter {}
333
334static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
335 // This chunk is empty (except the foot itself).
336 layout: Layout::new::<ChunkFooter>(),
337
338 // The start of the (empty) allocatable region for this chunk is itself.
339 data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) },
340
341 // The end of the (empty) allocatable region for this chunk is also itself.
342 ptr: Cell::new(unsafe {
343 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8)
344 }),
345
346 // Invariant: the last chunk footer in all `ChunkFooter::prev` linked lists
347 // is the empty chunk footer, whose `prev` points to itself.
348 prev: Cell::new(unsafe {
349 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter)
350 }),
351
352 // Empty chunks count as 0 allocated bytes in an arena.
353 allocated_bytes: 0,
354});
355
356impl EmptyChunkFooter {
357 fn get(&'static self) -> NonNull<ChunkFooter> {
358 NonNull::from(&self.0)
359 }
360}
361
362impl ChunkFooter {
363 // Returns the start and length of the currently allocated region of this
364 // chunk.
365 fn as_raw_parts(&self) -> (*const u8, usize) {
366 let data = self.data.as_ptr() as *const u8;
367 let ptr = self.ptr.get().as_ptr() as *const u8;
368 debug_assert!(data <= ptr);
369 debug_assert!(ptr <= self as *const ChunkFooter as *const u8);
370 let len = unsafe { (self as *const ChunkFooter as *const u8).offset_from(ptr) as usize };
371 (ptr, len)
372 }
373
374 /// Is this chunk the last empty chunk?
375 fn is_empty(&self) -> bool {
376 ptr::eq(self, EMPTY_CHUNK.get().as_ptr())
377 }
378}
379
380impl<const MIN_ALIGN: usize> Default for Bump<MIN_ALIGN> {
381 fn default() -> Self {
382 Self::with_min_align()
383 }
384}
385
386impl<const MIN_ALIGN: usize> Drop for Bump<MIN_ALIGN> {
387 fn drop(&mut self) {
388 unsafe {
389 dealloc_chunk_list(self.current_chunk_footer.get());
390 }
391 }
392}
393
394#[inline]
395unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
396 while !footer.as_ref().is_empty() {
397 let f = footer;
398 footer = f.as_ref().prev.get();
399 dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
400 }
401}
402
403// `Bump`s are safe to send between threads because nothing aliases its owned
404// chunks until you start allocating from it. But by the time you allocate from
405// it, the returned references to allocations borrow the `Bump` and therefore
406// prevent sending the `Bump` across threads until the borrows end.
407unsafe impl<const MIN_ALIGN: usize> Send for Bump<MIN_ALIGN> {}
408
409#[inline]
410fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
411 debug_assert!(align.is_power_of_two());
412
413 let pointer = pointer as usize;
414 let pointer_aligned = round_down_to(pointer, align);
415 pointer == pointer_aligned
416}
417
418#[inline]
419pub(crate) const fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
420 debug_assert!(divisor > 0);
421 debug_assert!(divisor.is_power_of_two());
422 match n.checked_add(divisor - 1) {
423 Some(x) => Some(x & !(divisor - 1)),
424 None => None,
425 }
426}
427
428/// Like `round_up_to` but turns overflow into undefined behavior rather than
429/// returning `None`.
430#[inline]
431pub(crate) unsafe fn round_up_to_unchecked(n: usize, divisor: usize) -> usize {
432 match round_up_to(n, divisor) {
433 Some(x) => x,
434 None => {
435 debug_assert!(false, "round_up_to_unchecked failed");
436 core::hint::unreachable_unchecked()
437 }
438 }
439}
440
441#[inline]
442pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
443 debug_assert!(divisor > 0);
444 debug_assert!(divisor.is_power_of_two());
445 n & !(divisor - 1)
446}
447
448/// Same as `round_down_to` but preserves pointer provenance.
449#[inline]
450pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
451 debug_assert!(divisor > 0);
452 debug_assert!(divisor.is_power_of_two());
453 ptr.wrapping_sub(ptr as usize & (divisor - 1))
454}
455
456#[inline]
457pub(crate) unsafe fn round_mut_ptr_up_to_unchecked(ptr: *mut u8, divisor: usize) -> *mut u8 {
458 debug_assert!(divisor > 0);
459 debug_assert!(divisor.is_power_of_two());
460 let aligned = round_up_to_unchecked(ptr as usize, divisor);
461 let delta = aligned - (ptr as usize);
462 ptr.add(delta)
463}
464
465// The typical page size these days.
466//
467// Note that we don't need to exactly match page size for correctness, and it is
468// okay if this is smaller than the real page size in practice. It isn't worth
469// the portability concerns and lack of const propagation that dynamically
470// looking up the actual page size implies.
471const TYPICAL_PAGE_SIZE: usize = 0x1000;
472
473// We only support alignments of up to 16 bytes for iter_allocated_chunks.
474const SUPPORTED_ITER_ALIGNMENT: usize = 16;
475const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
476const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
477
478// Assert that `ChunkFooter` is at most the supported alignment. This will give a
479// compile time error if it is not the case
480const _FOOTER_ALIGN_ASSERTION: () = {
481 assert!(mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN);
482};
483
484// Maximum typical overhead per allocation imposed by allocators.
485const MALLOC_OVERHEAD: usize = 16;
486
487// This is the overhead from malloc, footer and alignment. For instance, if
488// we want to request a chunk of memory that has at least X bytes usable for
489// allocations (where X is aligned to CHUNK_ALIGN), then we expect that the
490// after adding a footer, malloc overhead and alignment, the chunk of memory
491// the allocator actually sets aside for us is X+OVERHEAD rounded up to the
492// nearest suitable size boundary.
493const OVERHEAD: usize = match round_up_to(MALLOC_OVERHEAD + FOOTER_SIZE, CHUNK_ALIGN) {
494 Some(x) => x,
495 None => panic!(),
496};
497
498// The target size of our first allocation, including our overhead. The
499// available bump capacity will be smaller.
500const FIRST_ALLOCATION_GOAL: usize = 1 << 9;
501
502// The actual size of the first allocation is going to be a bit smaller than the
503// goal. We need to make room for the footer, and we also need take the
504// alignment into account. We're trying to avoid this kind of situation:
505// https://blog.mozilla.org/nnethercote/2011/08/05/clownshoes-available-in-sizes-2101-and-up/
506const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
507
508/// The memory size and alignment details for a potential new chunk
509/// allocation.
510#[derive(Debug, Clone, Copy)]
511struct NewChunkMemoryDetails {
512 new_size_without_footer: usize,
513 align: usize,
514 size: usize,
515}
516
517/// Wrapper around `Layout::from_size_align` that adds debug assertions.
518#[inline]
519fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
520 Layout::from_size_align(size, align).map_err(|_| AllocErr)
521}
522
523#[cold]
524#[inline(never)]
525fn allocation_size_overflow<T>() -> T {
526 panic!("requested allocation size overflowed")
527}
528
529// NB: We don't have constructors as methods on `impl<N> Bump<N>` that return
530// `Self` because then `rustc` can't infer the `N` if it isn't explicitly
531// provided, even though it has a default value. There doesn't seem to be a good
532// workaround, other than putting constructors on the `Bump<DEFAULT>`; even
533// `std` does this same thing with `HashMap`, for example.
534impl Bump<1> {
535 /// Construct a new arena to bump allocate into.
536 ///
537 /// ## Example
538 ///
539 /// ```
540 /// let bump = bumpalo::Bump::new();
541 /// # let _ = bump;
542 /// ```
543 pub fn new() -> Self {
544 Self::with_capacity(0)
545 }
546
547 /// Attempt to construct a new arena to bump allocate into.
548 ///
549 /// ## Example
550 ///
551 /// ```
552 /// let bump = bumpalo::Bump::try_new();
553 /// # let _ = bump.unwrap();
554 /// ```
555 pub fn try_new() -> Result<Self, AllocErr> {
556 Bump::try_with_capacity(0)
557 }
558
559 /// Construct a new arena with the specified byte capacity to bump allocate
560 /// into.
561 ///
562 /// ## Example
563 ///
564 /// ```
565 /// let bump = bumpalo::Bump::with_capacity(100);
566 /// # let _ = bump;
567 /// ```
568 ///
569 /// ## Panics
570 ///
571 /// Panics if allocating the initial capacity fails.
572 pub fn with_capacity(capacity: usize) -> Self {
573 Self::try_with_capacity(capacity).unwrap_or_else(|_| oom())
574 }
575
576 /// Attempt to construct a new arena with the specified byte capacity to
577 /// bump allocate into.
578 ///
579 /// Propagates errors when allocating the initial capacity.
580 ///
581 /// ## Example
582 ///
583 /// ```
584 /// # fn _foo() -> Result<(), bumpalo::AllocErr> {
585 /// let bump = bumpalo::Bump::try_with_capacity(100)?;
586 /// # let _ = bump;
587 /// # Ok(())
588 /// # }
589 /// ```
590 pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> {
591 Self::try_with_min_align_and_capacity(capacity)
592 }
593}
594
595impl<const MIN_ALIGN: usize> Bump<MIN_ALIGN> {
596 /// Create a new `Bump` that enforces a minimum alignment.
597 ///
598 /// The minimum alignment must be a power of two and no larger than `16`.
599 ///
600 /// Enforcing a minimum alignment can speed up allocation of objects with
601 /// alignment less than or equal to the minimum alignment. This comes at the
602 /// cost of introducing otherwise-unnecessary padding between allocations of
603 /// objects with alignment less than the minimum.
604 ///
605 /// # Example
606 ///
607 /// ```
608 /// type BumpAlign8 = bumpalo::Bump<8>;
609 /// let bump = BumpAlign8::with_min_align();
610 /// for x in 0..u8::MAX {
611 /// let x = bump.alloc(x);
612 /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8");
613 /// }
614 /// ```
615 ///
616 /// # Panics
617 ///
618 /// Panics on invalid minimum alignments.
619 //
620 // Because of `rustc`'s poor type inference for default type/const
621 // parameters (see the comment above the `impl Bump` block with no const
622 // `MIN_ALIGN` parameter) and because we don't want to force everyone to
623 // specify a minimum alignment with `Bump::new()` et al, we have a separate
624 // constructor for specifying the minimum alignment.
625 pub fn with_min_align() -> Self {
626 assert!(
627 MIN_ALIGN.is_power_of_two(),
628 "MIN_ALIGN must be a power of two; found {MIN_ALIGN}"
629 );
630 assert!(
631 MIN_ALIGN <= CHUNK_ALIGN,
632 "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}"
633 );
634
635 Bump {
636 current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
637 allocation_limit: Cell::new(None),
638 }
639 }
640
641 /// Create a new `Bump` that enforces a minimum alignment and starts with
642 /// room for at least `capacity` bytes.
643 ///
644 /// The minimum alignment must be a power of two and no larger than `16`.
645 ///
646 /// Enforcing a minimum alignment can speed up allocation of objects with
647 /// alignment less than or equal to the minimum alignment. This comes at the
648 /// cost of introducing otherwise-unnecessary padding between allocations of
649 /// objects with alignment less than the minimum.
650 ///
651 /// # Example
652 ///
653 /// ```
654 /// type BumpAlign8 = bumpalo::Bump<8>;
655 /// let mut bump = BumpAlign8::with_min_align_and_capacity(8 * 100);
656 /// for x in 0..100_u64 {
657 /// let x = bump.alloc(x);
658 /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8");
659 /// }
660 /// assert_eq!(
661 /// bump.iter_allocated_chunks().count(), 1,
662 /// "initial chunk had capacity for all allocations",
663 /// );
664 /// ```
665 ///
666 /// # Panics
667 ///
668 /// Panics on invalid minimum alignments.
669 ///
670 /// Panics if allocating the initial capacity fails.
671 pub fn with_min_align_and_capacity(capacity: usize) -> Self {
672 Self::try_with_min_align_and_capacity(capacity).unwrap_or_else(|_| oom())
673 }
674
675 /// Create a new `Bump` that enforces a minimum alignment and starts with
676 /// room for at least `capacity` bytes.
677 ///
678 /// The minimum alignment must be a power of two and no larger than `16`.
679 ///
680 /// Enforcing a minimum alignment can speed up allocation of objects with
681 /// alignment less than or equal to the minimum alignment. This comes at the
682 /// cost of introducing otherwise-unnecessary padding between allocations of
683 /// objects with alignment less than the minimum.
684 ///
685 /// # Example
686 ///
687 /// ```
688 /// # fn _foo() -> Result<(), bumpalo::AllocErr> {
689 /// type BumpAlign8 = bumpalo::Bump<8>;
690 /// let mut bump = BumpAlign8::try_with_min_align_and_capacity(8 * 100)?;
691 /// for x in 0..100_u64 {
692 /// let x = bump.alloc(x);
693 /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8");
694 /// }
695 /// assert_eq!(
696 /// bump.iter_allocated_chunks().count(), 1,
697 /// "initial chunk had capacity for all allocations",
698 /// );
699 /// # Ok(())
700 /// # }
701 /// ```
702 ///
703 /// # Panics
704 ///
705 /// Panics on invalid minimum alignments.
706 ///
707 /// Panics if allocating the initial capacity fails.
708 pub fn try_with_min_align_and_capacity(capacity: usize) -> Result<Self, AllocErr> {
709 assert!(
710 MIN_ALIGN.is_power_of_two(),
711 "MIN_ALIGN must be a power of two; found {MIN_ALIGN}"
712 );
713 assert!(
714 MIN_ALIGN <= CHUNK_ALIGN,
715 "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}"
716 );
717
718 if capacity == 0 {
719 return Ok(Bump {
720 current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
721 allocation_limit: Cell::new(None),
722 });
723 }
724
725 let layout = layout_from_size_align(capacity, MIN_ALIGN)?;
726
727 let chunk_footer = unsafe {
728 Self::new_chunk(
729 Self::new_chunk_memory_details(None, layout).ok_or(AllocErr)?,
730 layout,
731 EMPTY_CHUNK.get(),
732 )
733 .ok_or(AllocErr)?
734 };
735
736 Ok(Bump {
737 current_chunk_footer: Cell::new(chunk_footer),
738 allocation_limit: Cell::new(None),
739 })
740 }
741
742 /// Get this bump arena's minimum alignment.
743 ///
744 /// All objects allocated in this arena get aligned to this value.
745 ///
746 /// ## Example
747 ///
748 /// ```
749 /// let bump2 = bumpalo::Bump::<2>::with_min_align();
750 /// assert_eq!(bump2.min_align(), 2);
751 ///
752 /// let bump4 = bumpalo::Bump::<4>::with_min_align();
753 /// assert_eq!(bump4.min_align(), 4);
754 /// ```
755 #[inline]
756 pub fn min_align(&self) -> usize {
757 MIN_ALIGN
758 }
759
760 /// The allocation limit for this arena in bytes.
761 ///
762 /// ## Example
763 ///
764 /// ```
765 /// let bump = bumpalo::Bump::with_capacity(0);
766 ///
767 /// assert_eq!(bump.allocation_limit(), None);
768 ///
769 /// bump.set_allocation_limit(Some(6));
770 ///
771 /// assert_eq!(bump.allocation_limit(), Some(6));
772 ///
773 /// bump.set_allocation_limit(None);
774 ///
775 /// assert_eq!(bump.allocation_limit(), None);
776 /// ```
777 pub fn allocation_limit(&self) -> Option<usize> {
778 self.allocation_limit.get()
779 }
780
781 /// Set the allocation limit in bytes for this arena.
782 ///
783 /// The allocation limit is only enforced when allocating new backing chunks for
784 /// a `Bump`. Updating the allocation limit will not affect existing allocations
785 /// or any future allocations within the `Bump`'s current chunk.
786 ///
787 /// ## Example
788 ///
789 /// ```
790 /// let bump = bumpalo::Bump::with_capacity(0);
791 ///
792 /// bump.set_allocation_limit(Some(0));
793 ///
794 /// assert!(bump.try_alloc(5).is_err());
795 /// ```
796 pub fn set_allocation_limit(&self, limit: Option<usize>) {
797 self.allocation_limit.set(limit);
798 }
799
800 /// How much headroom an arena has before it hits its allocation
801 /// limit.
802 fn allocation_limit_remaining(&self) -> Option<usize> {
803 self.allocation_limit.get().and_then(|allocation_limit| {
804 let allocated_bytes = self.allocated_bytes();
805 if allocated_bytes > allocation_limit {
806 None
807 } else {
808 Some(usize::abs_diff(allocation_limit, allocated_bytes))
809 }
810 })
811 }
812
813 /// Whether a request to allocate a new chunk with a given size for a given
814 /// requested layout will fit under the allocation limit set on a `Bump`.
815 fn chunk_fits_under_limit(
816 allocation_limit_remaining: Option<usize>,
817 new_chunk_memory_details: NewChunkMemoryDetails,
818 ) -> bool {
819 allocation_limit_remaining
820 .map(|allocation_limit_left| {
821 allocation_limit_left >= new_chunk_memory_details.new_size_without_footer
822 })
823 .unwrap_or(true)
824 }
825
826 /// Determine the memory details including final size, alignment and final
827 /// size without footer for a new chunk that would be allocated to fulfill
828 /// an allocation request.
829 fn new_chunk_memory_details(
830 new_size_without_footer: Option<usize>,
831 requested_layout: Layout,
832 ) -> Option<NewChunkMemoryDetails> {
833 // We must have `CHUNK_ALIGN` or better alignment...
834 let align = CHUNK_ALIGN
835 // and we have to have at least our configured minimum alignment...
836 .max(MIN_ALIGN)
837 // and make sure we satisfy the requested allocation's alignment.
838 .max(requested_layout.align());
839
840 let mut new_size_without_footer =
841 new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
842
843 let requested_size =
844 round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow);
845 new_size_without_footer = new_size_without_footer.max(requested_size);
846
847 // We want our allocations to play nice with the memory allocator, and
848 // waste as little memory as possible. For small allocations, this means
849 // that the entire allocation including the chunk footer and mallocs
850 // internal overhead is as close to a power of two as we can go without
851 // going over. For larger allocations, we only need to get close to a
852 // page boundary without going over.
853 if new_size_without_footer < TYPICAL_PAGE_SIZE {
854 new_size_without_footer =
855 (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
856 } else {
857 new_size_without_footer =
858 round_up_to(new_size_without_footer + OVERHEAD, TYPICAL_PAGE_SIZE)? - OVERHEAD;
859 }
860
861 debug_assert_eq!(align % CHUNK_ALIGN, 0);
862 debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
863 let size = new_size_without_footer
864 .checked_add(FOOTER_SIZE)
865 .unwrap_or_else(allocation_size_overflow);
866
867 Some(NewChunkMemoryDetails {
868 new_size_without_footer,
869 size,
870 align,
871 })
872 }
873
874 /// Allocate a new chunk and return its initialized footer.
875 ///
876 /// If given, `layouts` is a tuple of the current chunk size and the
877 /// layout of the allocation request that triggered us to fall back to
878 /// allocating a new chunk of memory.
879 unsafe fn new_chunk(
880 new_chunk_memory_details: NewChunkMemoryDetails,
881 requested_layout: Layout,
882 prev: NonNull<ChunkFooter>,
883 ) -> Option<NonNull<ChunkFooter>> {
884 let NewChunkMemoryDetails {
885 new_size_without_footer,
886 align,
887 size,
888 } = new_chunk_memory_details;
889
890 let layout = layout_from_size_align(size, align).ok()?;
891
892 debug_assert!(size >= requested_layout.size());
893
894 let data = alloc(layout);
895 let data = NonNull::new(data)?;
896
897 // The `ChunkFooter` is at the end of the chunk.
898 let footer_ptr = data.as_ptr().add(new_size_without_footer);
899 debug_assert_eq!((data.as_ptr() as usize) % align, 0);
900 debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0);
901 let footer_ptr = footer_ptr as *mut ChunkFooter;
902
903 // The bump pointer is initialized to the end of the range we will bump
904 // out of, rounded down to the minimum alignment. It is the
905 // `NewChunkMemoryDetails` constructor's responsibility to ensure that
906 // even after this rounding we have enough non-zero capacity in the
907 // chunk.
908 let ptr = round_mut_ptr_down_to(footer_ptr.cast::<u8>(), MIN_ALIGN);
909 debug_assert_eq!(ptr as usize % MIN_ALIGN, 0);
910 debug_assert!(
911 data.as_ptr() < ptr,
912 "bump pointer {ptr:#p} should still be greater than or equal to the \
913 start of the bump chunk {data:#p}"
914 );
915 debug_assert_eq!(
916 (ptr as usize) - (data.as_ptr() as usize),
917 new_size_without_footer
918 );
919
920 let ptr = Cell::new(NonNull::new_unchecked(ptr));
921
922 // The `allocated_bytes` of a new chunk counts the total size
923 // of the chunks, not how much of the chunks are used.
924 let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer;
925
926 ptr::write(
927 footer_ptr,
928 ChunkFooter {
929 data,
930 layout,
931 prev: Cell::new(prev),
932 ptr,
933 allocated_bytes,
934 },
935 );
936
937 Some(NonNull::new_unchecked(footer_ptr))
938 }
939
940 /// Reset this bump allocator.
941 ///
942 /// Performs mass deallocation on everything allocated in this arena by
943 /// resetting the pointer into the underlying chunk of memory to the start
944 /// of the chunk. Does not run any `Drop` implementations on deallocated
945 /// objects; see [the top-level documentation](struct.Bump.html) for details.
946 ///
947 /// If this arena has allocated multiple chunks to bump allocate into, then
948 /// the excess chunks are returned to the global allocator.
949 ///
950 /// ## Example
951 ///
952 /// ```
953 /// let mut bump = bumpalo::Bump::new();
954 ///
955 /// // Allocate a bunch of things.
956 /// {
957 /// for i in 0..100 {
958 /// bump.alloc(i);
959 /// }
960 /// }
961 ///
962 /// // Reset the arena.
963 /// bump.reset();
964 ///
965 /// // Allocate some new things in the space previously occupied by the
966 /// // original things.
967 /// for j in 200..400 {
968 /// bump.alloc(j);
969 /// }
970 ///```
971 pub fn reset(&mut self) {
972 // Takes `&mut self` so `self` must be unique and there can't be any
973 // borrows active that would get invalidated by resetting.
974 unsafe {
975 if self.current_chunk_footer.get().as_ref().is_empty() {
976 return;
977 }
978
979 let mut cur_chunk = self.current_chunk_footer.get();
980
981 // Deallocate all chunks except the current one
982 let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get());
983 dealloc_chunk_list(prev_chunk);
984
985 // Reset the bump finger to the end of the chunk.
986 debug_assert!(
987 is_pointer_aligned_to(cur_chunk.as_ptr(), MIN_ALIGN),
988 "bump pointer {cur_chunk:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
989 );
990 cur_chunk.as_ref().ptr.set(cur_chunk.cast());
991
992 // Reset the allocated size of the chunk.
993 cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size() - FOOTER_SIZE;
994
995 debug_assert!(
996 self.current_chunk_footer
997 .get()
998 .as_ref()
999 .prev
1000 .get()
1001 .as_ref()
1002 .is_empty(),
1003 "We should only have a single chunk"
1004 );
1005 debug_assert_eq!(
1006 self.current_chunk_footer.get().as_ref().ptr.get(),
1007 self.current_chunk_footer.get().cast(),
1008 "Our chunk's bump finger should be reset to the start of its allocation"
1009 );
1010 }
1011 }
1012
1013 /// Allocate an object in this `Bump` and return an exclusive reference to
1014 /// it.
1015 ///
1016 /// ## Panics
1017 ///
1018 /// Panics if reserving space for `T` fails.
1019 ///
1020 /// ## Example
1021 ///
1022 /// ```
1023 /// let bump = bumpalo::Bump::new();
1024 /// let x = bump.alloc("hello");
1025 /// assert_eq!(*x, "hello");
1026 /// ```
1027 #[inline(always)]
1028 pub fn alloc<T>(&self, val: T) -> &mut T {
1029 self.alloc_with(|| val)
1030 }
1031
1032 /// Try to allocate an object in this `Bump` and return an exclusive
1033 /// reference to it.
1034 ///
1035 /// ## Errors
1036 ///
1037 /// Errors if reserving space for `T` fails.
1038 ///
1039 /// ## Example
1040 ///
1041 /// ```
1042 /// let bump = bumpalo::Bump::new();
1043 /// let x = bump.try_alloc("hello");
1044 /// assert_eq!(x, Ok(&mut "hello"));
1045 /// ```
1046 #[inline(always)]
1047 pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
1048 self.try_alloc_with(|| val)
1049 }
1050
1051 /// Pre-allocate space for an object in this `Bump`, initializes it using
1052 /// the closure, then returns an exclusive reference to it.
1053 ///
1054 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1055 /// discussion on the differences between the `_with` suffixed methods and
1056 /// those methods without it, their performance characteristics, and when
1057 /// you might or might not choose a `_with` suffixed method.
1058 ///
1059 /// ## Panics
1060 ///
1061 /// Panics if reserving space for `T` fails.
1062 ///
1063 /// ## Example
1064 ///
1065 /// ```
1066 /// let bump = bumpalo::Bump::new();
1067 /// let x = bump.alloc_with(|| "hello");
1068 /// assert_eq!(*x, "hello");
1069 /// ```
1070 #[inline(always)]
1071 pub fn alloc_with<F, T>(&self, f: F) -> &mut T
1072 where
1073 F: FnOnce() -> T,
1074 {
1075 #[inline(always)]
1076 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
1077 where
1078 F: FnOnce() -> T,
1079 {
1080 // This function is translated as:
1081 // - allocate space for a T on the stack
1082 // - call f() with the return value being put onto this stack space
1083 // - memcpy from the stack to the heap
1084 //
1085 // Ideally we want LLVM to always realize that doing a stack
1086 // allocation is unnecessary and optimize the code so it writes
1087 // directly into the heap instead. It seems we get it to realize
1088 // this most consistently if we put this critical line into it's
1089 // own function instead of inlining it into the surrounding code.
1090 ptr::write(ptr, f());
1091 }
1092
1093 let layout = Layout::new::<T>();
1094
1095 unsafe {
1096 let p = self.alloc_layout(layout);
1097 let p = p.as_ptr() as *mut T;
1098 inner_writer(p, f);
1099 &mut *p
1100 }
1101 }
1102
1103 /// Tries to pre-allocate space for an object in this `Bump`, initializes
1104 /// it using the closure, then returns an exclusive reference to it.
1105 ///
1106 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1107 /// discussion on the differences between the `_with` suffixed methods and
1108 /// those methods without it, their performance characteristics, and when
1109 /// you might or might not choose a `_with` suffixed method.
1110 ///
1111 /// ## Errors
1112 ///
1113 /// Errors if reserving space for `T` fails.
1114 ///
1115 /// ## Example
1116 ///
1117 /// ```
1118 /// let bump = bumpalo::Bump::new();
1119 /// let x = bump.try_alloc_with(|| "hello");
1120 /// assert_eq!(x, Ok(&mut "hello"));
1121 /// ```
1122 #[inline(always)]
1123 pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
1124 where
1125 F: FnOnce() -> T,
1126 {
1127 #[inline(always)]
1128 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
1129 where
1130 F: FnOnce() -> T,
1131 {
1132 // This function is translated as:
1133 // - allocate space for a T on the stack
1134 // - call f() with the return value being put onto this stack space
1135 // - memcpy from the stack to the heap
1136 //
1137 // Ideally we want LLVM to always realize that doing a stack
1138 // allocation is unnecessary and optimize the code so it writes
1139 // directly into the heap instead. It seems we get it to realize
1140 // this most consistently if we put this critical line into it's
1141 // own function instead of inlining it into the surrounding code.
1142 ptr::write(ptr, f());
1143 }
1144
1145 //SAFETY: Self-contained:
1146 // `p` is allocated for `T` and then a `T` is written.
1147 let layout = Layout::new::<T>();
1148 let p = self.try_alloc_layout(layout)?;
1149 let p = p.as_ptr() as *mut T;
1150
1151 unsafe {
1152 inner_writer(p, f);
1153 Ok(&mut *p)
1154 }
1155 }
1156
1157 /// Pre-allocates space for a [`Result`] in this `Bump`, initializes it using
1158 /// the closure, then returns an exclusive reference to its `T` if [`Ok`].
1159 ///
1160 /// Iff the allocation fails, the closure is not run.
1161 ///
1162 /// Iff [`Err`], an allocator rewind is *attempted* and the `E` instance is
1163 /// moved out of the allocator to be consumed or dropped as normal.
1164 ///
1165 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1166 /// discussion on the differences between the `_with` suffixed methods and
1167 /// those methods without it, their performance characteristics, and when
1168 /// you might or might not choose a `_with` suffixed method.
1169 ///
1170 /// For caveats specific to fallible initialization, see
1171 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
1172 ///
1173 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
1174 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
1175 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
1176 ///
1177 /// ## Errors
1178 ///
1179 /// Iff the allocation succeeds but `f` fails, that error is forwarded by value.
1180 ///
1181 /// ## Panics
1182 ///
1183 /// Panics if reserving space for `Result<T, E>` fails.
1184 ///
1185 /// ## Example
1186 ///
1187 /// ```
1188 /// let bump = bumpalo::Bump::new();
1189 /// let x = bump.alloc_try_with(|| Ok("hello"))?;
1190 /// assert_eq!(*x, "hello");
1191 /// # Result::<_, ()>::Ok(())
1192 /// ```
1193 #[inline(always)]
1194 pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
1195 where
1196 F: FnOnce() -> Result<T, E>,
1197 {
1198 let rewind_footer = self.current_chunk_footer.get();
1199 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
1200 let mut inner_result_ptr = NonNull::from(self.alloc_with(f));
1201 match unsafe { inner_result_ptr.as_mut() } {
1202 Ok(t) => Ok(unsafe {
1203 //SAFETY:
1204 // The `&mut Result<T, E>` returned by `alloc_with` may be
1205 // lifetime-limited by `E`, but the derived `&mut T` still has
1206 // the same validity as in `alloc_with` since the error variant
1207 // is already ruled out here.
1208
1209 // We could conditionally truncate the allocation here, but
1210 // since it grows backwards, it seems unlikely that we'd get
1211 // any more than the `Result`'s discriminant this way, if
1212 // anything at all.
1213 &mut *(t as *mut _)
1214 }),
1215 Err(e) => unsafe {
1216 // If this result was the last allocation in this arena, we can
1217 // reclaim its space. In fact, sometimes we can do even better
1218 // than simply calling `dealloc` on the result pointer: we can
1219 // reclaim any alignment padding we might have added (which
1220 // `dealloc` cannot do) if we didn't allocate a new chunk for
1221 // this result.
1222 if self.is_last_allocation(inner_result_ptr.cast()) {
1223 let current_footer_p = self.current_chunk_footer.get();
1224 let current_ptr = ¤t_footer_p.as_ref().ptr;
1225 if current_footer_p == rewind_footer {
1226 // It's still the same chunk, so reset the bump pointer
1227 // to its original value upon entry to this method
1228 // (reclaiming any alignment padding we may have
1229 // added).
1230 current_ptr.set(rewind_ptr);
1231 } else {
1232 // We allocated a new chunk for this result.
1233 //
1234 // We know the result is the only allocation in this
1235 // chunk: Any additional allocations since the start of
1236 // this method could only have happened when running
1237 // the initializer function, which is called *after*
1238 // reserving space for this result. Therefore, since we
1239 // already determined via the check above that this
1240 // result was the last allocation, there must not have
1241 // been any other allocations, and this result is the
1242 // only allocation in this chunk.
1243 //
1244 // Because this is the only allocation in this chunk,
1245 // we can reset the chunk's bump finger to the start of
1246 // the chunk.
1247 current_ptr.set(current_footer_p.as_ref().data);
1248 }
1249 }
1250 //SAFETY:
1251 // As we received `E` semantically by value from `f`, we can
1252 // just copy that value here as long as we avoid a double-drop
1253 // (which can't happen as any specific references to the `E`'s
1254 // data in `self` are destroyed when this function returns).
1255 //
1256 // The order between this and the deallocation doesn't matter
1257 // because `Self: !Sync`.
1258 Err(ptr::read(e as *const _))
1259 },
1260 }
1261 }
1262
1263 /// Tries to pre-allocates space for a [`Result`] in this `Bump`,
1264 /// initializes it using the closure, then returns an exclusive reference
1265 /// to its `T` if all [`Ok`].
1266 ///
1267 /// Iff the allocation fails, the closure is not run.
1268 ///
1269 /// Iff the closure returns [`Err`], an allocator rewind is *attempted* and
1270 /// the `E` instance is moved out of the allocator to be consumed or dropped
1271 /// as normal.
1272 ///
1273 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1274 /// discussion on the differences between the `_with` suffixed methods and
1275 /// those methods without it, their performance characteristics, and when
1276 /// you might or might not choose a `_with` suffixed method.
1277 ///
1278 /// For caveats specific to fallible initialization, see
1279 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
1280 ///
1281 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
1282 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
1283 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
1284 ///
1285 /// ## Errors
1286 ///
1287 /// Errors with the [`Alloc`](`AllocOrInitError::Alloc`) variant iff
1288 /// reserving space for `Result<T, E>` fails.
1289 ///
1290 /// Iff the allocation succeeds but `f` fails, that error is forwarded by
1291 /// value inside the [`Init`](`AllocOrInitError::Init`) variant.
1292 ///
1293 /// ## Example
1294 ///
1295 /// ```
1296 /// let bump = bumpalo::Bump::new();
1297 /// let x = bump.try_alloc_try_with(|| Ok("hello"))?;
1298 /// assert_eq!(*x, "hello");
1299 /// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
1300 /// ```
1301 #[inline(always)]
1302 pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
1303 where
1304 F: FnOnce() -> Result<T, E>,
1305 {
1306 let rewind_footer = self.current_chunk_footer.get();
1307 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
1308 let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?);
1309 match unsafe { inner_result_ptr.as_mut() } {
1310 Ok(t) => Ok(unsafe {
1311 //SAFETY:
1312 // The `&mut Result<T, E>` returned by `alloc_with` may be
1313 // lifetime-limited by `E`, but the derived `&mut T` still has
1314 // the same validity as in `alloc_with` since the error variant
1315 // is already ruled out here.
1316
1317 // We could conditionally truncate the allocation here, but
1318 // since it grows backwards, it seems unlikely that we'd get
1319 // any more than the `Result`'s discriminant this way, if
1320 // anything at all.
1321 &mut *(t as *mut _)
1322 }),
1323 Err(e) => unsafe {
1324 // If this result was the last allocation in this arena, we can
1325 // reclaim its space. In fact, sometimes we can do even better
1326 // than simply calling `dealloc` on the result pointer: we can
1327 // reclaim any alignment padding we might have added (which
1328 // `dealloc` cannot do) if we didn't allocate a new chunk for
1329 // this result.
1330 if self.is_last_allocation(inner_result_ptr.cast()) {
1331 let current_footer_p = self.current_chunk_footer.get();
1332 let current_ptr = ¤t_footer_p.as_ref().ptr;
1333 if current_footer_p == rewind_footer {
1334 // It's still the same chunk, so reset the bump pointer
1335 // to its original value upon entry to this method
1336 // (reclaiming any alignment padding we may have
1337 // added).
1338 current_ptr.set(rewind_ptr);
1339 } else {
1340 // We allocated a new chunk for this result.
1341 //
1342 // We know the result is the only allocation in this
1343 // chunk: Any additional allocations since the start of
1344 // this method could only have happened when running
1345 // the initializer function, which is called *after*
1346 // reserving space for this result. Therefore, since we
1347 // already determined via the check above that this
1348 // result was the last allocation, there must not have
1349 // been any other allocations, and this result is the
1350 // only allocation in this chunk.
1351 //
1352 // Because this is the only allocation in this chunk,
1353 // we can reset the chunk's bump finger to the start of
1354 // the chunk.
1355 current_ptr.set(current_footer_p.as_ref().data);
1356 }
1357 }
1358 //SAFETY:
1359 // As we received `E` semantically by value from `f`, we can
1360 // just copy that value here as long as we avoid a double-drop
1361 // (which can't happen as any specific references to the `E`'s
1362 // data in `self` are destroyed when this function returns).
1363 //
1364 // The order between this and the deallocation doesn't matter
1365 // because `Self: !Sync`.
1366 Err(AllocOrInitError::Init(ptr::read(e as *const _)))
1367 },
1368 }
1369 }
1370
1371 /// `Copy` a slice into this `Bump` and return an exclusive reference to
1372 /// the copy.
1373 ///
1374 /// ## Panics
1375 ///
1376 /// Panics if reserving space for the slice fails.
1377 ///
1378 /// ## Example
1379 ///
1380 /// ```
1381 /// let bump = bumpalo::Bump::new();
1382 /// let x = bump.alloc_slice_copy(&[1, 2, 3]);
1383 /// assert_eq!(x, &[1, 2, 3]);
1384 /// ```
1385 #[inline(always)]
1386 pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
1387 where
1388 T: Copy,
1389 {
1390 let layout = Layout::for_value(src);
1391 let dst = self.alloc_layout(layout).cast::<T>();
1392
1393 unsafe {
1394 ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1395 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1396 }
1397 }
1398
1399 /// Like `alloc_slice_copy`, but does not panic in case of allocation failure.
1400 ///
1401 /// ## Example
1402 ///
1403 /// ```
1404 /// let bump = bumpalo::Bump::new();
1405 /// let x = bump.try_alloc_slice_copy(&[1, 2, 3]);
1406 /// assert_eq!(x, Ok(&mut[1, 2, 3] as &mut [_]));
1407 ///
1408 ///
1409 /// let bump = bumpalo::Bump::new();
1410 /// bump.set_allocation_limit(Some(4));
1411 /// let x = bump.try_alloc_slice_copy(&[1, 2, 3, 4, 5, 6]);
1412 /// assert_eq!(x, Err(bumpalo::AllocErr)); // too big
1413 /// ```
1414 #[inline(always)]
1415 pub fn try_alloc_slice_copy<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
1416 where
1417 T: Copy,
1418 {
1419 let layout = Layout::for_value(src);
1420 let dst = self.try_alloc_layout(layout)?.cast::<T>();
1421 let result = unsafe {
1422 core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1423 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1424 };
1425 Ok(result)
1426 }
1427
1428 /// `Clone` a slice into this `Bump` and return an exclusive reference to
1429 /// the clone. Prefer [`alloc_slice_copy`](#method.alloc_slice_copy) if `T` is `Copy`.
1430 ///
1431 /// ## Panics
1432 ///
1433 /// Panics if reserving space for the slice fails.
1434 ///
1435 /// ## Example
1436 ///
1437 /// ```
1438 /// #[derive(Clone, Debug, Eq, PartialEq)]
1439 /// struct Sheep {
1440 /// name: String,
1441 /// }
1442 ///
1443 /// let originals = [
1444 /// Sheep { name: "Alice".into() },
1445 /// Sheep { name: "Bob".into() },
1446 /// Sheep { name: "Cathy".into() },
1447 /// ];
1448 ///
1449 /// let bump = bumpalo::Bump::new();
1450 /// let clones = bump.alloc_slice_clone(&originals);
1451 /// assert_eq!(originals, clones);
1452 /// ```
1453 #[inline(always)]
1454 pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
1455 where
1456 T: Clone,
1457 {
1458 let layout = Layout::for_value(src);
1459 let dst = self.alloc_layout(layout).cast::<T>();
1460
1461 unsafe {
1462 for (i, val) in src.iter().cloned().enumerate() {
1463 ptr::write(dst.as_ptr().add(i), val);
1464 }
1465
1466 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1467 }
1468 }
1469
1470 /// Like `alloc_slice_clone` but does not panic on failure.
1471 #[inline(always)]
1472 pub fn try_alloc_slice_clone<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
1473 where
1474 T: Clone,
1475 {
1476 let layout = Layout::for_value(src);
1477 let dst = self.try_alloc_layout(layout)?.cast::<T>();
1478
1479 unsafe {
1480 for (i, val) in src.iter().cloned().enumerate() {
1481 ptr::write(dst.as_ptr().add(i), val);
1482 }
1483
1484 Ok(slice::from_raw_parts_mut(dst.as_ptr(), src.len()))
1485 }
1486 }
1487
1488 /// `Copy` a string slice into this `Bump` and return an exclusive reference to it.
1489 ///
1490 /// ## Panics
1491 ///
1492 /// Panics if reserving space for the string fails.
1493 ///
1494 /// ## Example
1495 ///
1496 /// ```
1497 /// let bump = bumpalo::Bump::new();
1498 /// let hello = bump.alloc_str("hello world");
1499 /// assert_eq!("hello world", hello);
1500 /// ```
1501 #[inline(always)]
1502 pub fn alloc_str(&self, src: &str) -> &mut str {
1503 let buffer = self.alloc_slice_copy(src.as_bytes());
1504 unsafe {
1505 // This is OK, because it already came in as str, so it is guaranteed to be utf8
1506 str::from_utf8_unchecked_mut(buffer)
1507 }
1508 }
1509
1510 /// Same as `alloc_str` but does not panic on failure.
1511 ///
1512 /// ## Example
1513 ///
1514 /// ```
1515 /// let bump = bumpalo::Bump::new();
1516 /// let hello = bump.try_alloc_str("hello world").unwrap();
1517 /// assert_eq!("hello world", hello);
1518 ///
1519 ///
1520 /// let bump = bumpalo::Bump::new();
1521 /// bump.set_allocation_limit(Some(5));
1522 /// let hello = bump.try_alloc_str("hello world");
1523 /// assert_eq!(Err(bumpalo::AllocErr), hello);
1524 /// ```
1525 #[inline(always)]
1526 pub fn try_alloc_str(&self, src: &str) -> Result<&mut str, AllocErr> {
1527 let buffer = self.try_alloc_slice_copy(src.as_bytes())?;
1528 unsafe {
1529 // This is OK, because it already came in as str, so it is guaranteed to be utf8
1530 Ok(str::from_utf8_unchecked_mut(buffer))
1531 }
1532 }
1533
1534 /// Allocates a new slice of size `len` into this `Bump` and returns an
1535 /// exclusive reference to the copy.
1536 ///
1537 /// The elements of the slice are initialized using the supplied closure.
1538 /// The closure argument is the position in the slice.
1539 ///
1540 /// ## Panics
1541 ///
1542 /// Panics if reserving space for the slice fails.
1543 ///
1544 /// ## Example
1545 ///
1546 /// ```
1547 /// let bump = bumpalo::Bump::new();
1548 /// let x = bump.alloc_slice_fill_with(5, |i| 5 * (i + 1));
1549 /// assert_eq!(x, &[5, 10, 15, 20, 25]);
1550 /// ```
1551 #[inline(always)]
1552 pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
1553 where
1554 F: FnMut(usize) -> T,
1555 {
1556 let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
1557 let dst = self.alloc_layout(layout).cast::<T>();
1558
1559 unsafe {
1560 for i in 0..len {
1561 ptr::write(dst.as_ptr().add(i), f(i));
1562 }
1563
1564 let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1565 debug_assert_eq!(Layout::for_value(result), layout);
1566 result
1567 }
1568 }
1569
1570 /// Allocates a new slice of size `len` into this `Bump` and returns an
1571 /// exclusive reference to the copy, failing if the closure return an Err.
1572 ///
1573 /// The elements of the slice are initialized using the supplied closure.
1574 /// The closure argument is the position in the slice.
1575 ///
1576 /// ## Panics
1577 ///
1578 /// Panics if reserving space for the slice fails.
1579 ///
1580 /// ## Example
1581 ///
1582 /// ```
1583 /// let bump = bumpalo::Bump::new();
1584 /// let x: Result<&mut [usize], ()> = bump.alloc_slice_try_fill_with(5, |i| Ok(5 * i));
1585 /// assert_eq!(x, Ok(bump.alloc_slice_copy(&[0, 5, 10, 15, 20])));
1586 /// ```
1587 ///
1588 /// ```
1589 /// let bump = bumpalo::Bump::new();
1590 /// let x: Result<&mut [usize], ()> = bump.alloc_slice_try_fill_with(
1591 /// 5,
1592 /// |n| if n == 2 { Err(()) } else { Ok(n) }
1593 /// );
1594 /// assert_eq!(x, Err(()));
1595 /// ```
1596 #[inline(always)]
1597 pub fn alloc_slice_try_fill_with<T, F, E>(&self, len: usize, mut f: F) -> Result<&mut [T], E>
1598 where
1599 F: FnMut(usize) -> Result<T, E>,
1600 {
1601 let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
1602 let base_ptr = self.alloc_layout(layout);
1603 let dst = base_ptr.cast::<T>();
1604
1605 unsafe {
1606 for i in 0..len {
1607 match f(i) {
1608 Ok(el) => ptr::write(dst.as_ptr().add(i), el),
1609 Err(e) => {
1610 self.dealloc(base_ptr, layout);
1611 return Err(e);
1612 }
1613 }
1614 }
1615
1616 let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1617 debug_assert_eq!(Layout::for_value(result), layout);
1618 Ok(result)
1619 }
1620 }
1621
1622 /// Allocates a new slice of size `len` into this `Bump` and returns an
1623 /// exclusive reference to the copy.
1624 ///
1625 /// The elements of the slice are initialized using the supplied closure.
1626 /// The closure argument is the position in the slice.
1627 ///
1628 /// ## Example
1629 ///
1630 /// ```
1631 /// let bump = bumpalo::Bump::new();
1632 /// let x = bump.try_alloc_slice_fill_with(5, |i| 5 * (i + 1));
1633 /// assert_eq!(x, Ok(&mut[5usize, 10, 15, 20, 25] as &mut [_]));
1634 ///
1635 ///
1636 /// let bump = bumpalo::Bump::new();
1637 /// bump.set_allocation_limit(Some(4));
1638 /// let x = bump.try_alloc_slice_fill_with(10, |i| 5 * (i + 1));
1639 /// assert_eq!(x, Err(bumpalo::AllocErr));
1640 /// ```
1641 #[inline(always)]
1642 pub fn try_alloc_slice_fill_with<T, F>(
1643 &self,
1644 len: usize,
1645 mut f: F,
1646 ) -> Result<&mut [T], AllocErr>
1647 where
1648 F: FnMut(usize) -> T,
1649 {
1650 let layout = Layout::array::<T>(len).map_err(|_| AllocErr)?;
1651 let dst = self.try_alloc_layout(layout)?.cast::<T>();
1652
1653 unsafe {
1654 for i in 0..len {
1655 ptr::write(dst.as_ptr().add(i), f(i));
1656 }
1657
1658 let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1659 debug_assert_eq!(Layout::for_value(result), layout);
1660 Ok(result)
1661 }
1662 }
1663
1664 /// Allocates a new slice of size `len` into this `Bump` and returns an
1665 /// exclusive reference to the copy.
1666 ///
1667 /// All elements of the slice are initialized to `value`.
1668 ///
1669 /// ## Panics
1670 ///
1671 /// Panics if reserving space for the slice fails.
1672 ///
1673 /// ## Example
1674 ///
1675 /// ```
1676 /// let bump = bumpalo::Bump::new();
1677 /// let x = bump.alloc_slice_fill_copy(5, 42);
1678 /// assert_eq!(x, &[42, 42, 42, 42, 42]);
1679 /// ```
1680 #[inline(always)]
1681 pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
1682 self.alloc_slice_fill_with(len, |_| value)
1683 }
1684
1685 /// Same as `alloc_slice_fill_copy` but does not panic on failure.
1686 #[inline(always)]
1687 pub fn try_alloc_slice_fill_copy<T: Copy>(
1688 &self,
1689 len: usize,
1690 value: T,
1691 ) -> Result<&mut [T], AllocErr> {
1692 self.try_alloc_slice_fill_with(len, |_| value)
1693 }
1694
1695 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1696 /// exclusive reference to the copy.
1697 ///
1698 /// All elements of the slice are initialized to `value.clone()`.
1699 ///
1700 /// ## Panics
1701 ///
1702 /// Panics if reserving space for the slice fails.
1703 ///
1704 /// ## Example
1705 ///
1706 /// ```
1707 /// let bump = bumpalo::Bump::new();
1708 /// let s: String = "Hello Bump!".to_string();
1709 /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s);
1710 /// assert_eq!(x.len(), 2);
1711 /// assert_eq!(&x[0], &s);
1712 /// assert_eq!(&x[1], &s);
1713 /// ```
1714 #[inline(always)]
1715 pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
1716 self.alloc_slice_fill_with(len, |_| value.clone())
1717 }
1718
1719 /// Like `alloc_slice_fill_clone` but does not panic on failure.
1720 #[inline(always)]
1721 pub fn try_alloc_slice_fill_clone<T: Clone>(
1722 &self,
1723 len: usize,
1724 value: &T,
1725 ) -> Result<&mut [T], AllocErr> {
1726 self.try_alloc_slice_fill_with(len, |_| value.clone())
1727 }
1728
1729 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1730 /// exclusive reference to the copy.
1731 ///
1732 /// The elements are initialized using the supplied iterator.
1733 ///
1734 /// ## Panics
1735 ///
1736 /// Panics if reserving space for the slice fails, or if the supplied
1737 /// iterator returns fewer elements than it promised.
1738 ///
1739 /// ## Example
1740 ///
1741 /// ```
1742 /// let bump = bumpalo::Bump::new();
1743 /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i));
1744 /// assert_eq!(x, [4, 9, 25]);
1745 /// ```
1746 #[inline(always)]
1747 pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
1748 where
1749 I: IntoIterator<Item = T>,
1750 I::IntoIter: ExactSizeIterator,
1751 {
1752 let mut iter = iter.into_iter();
1753 self.alloc_slice_fill_with(iter.len(), |_| {
1754 iter.next().expect("Iterator supplied too few elements")
1755 })
1756 }
1757
1758 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1759 /// exclusive reference to the copy, failing if the iterator returns an Err.
1760 ///
1761 /// The elements are initialized using the supplied iterator.
1762 ///
1763 /// ## Panics
1764 ///
1765 /// Panics if reserving space for the slice fails, or if the supplied
1766 /// iterator returns fewer elements than it promised.
1767 ///
1768 /// ## Examples
1769 ///
1770 /// ```
1771 /// let bump = bumpalo::Bump::new();
1772 /// let x: Result<&mut [i32], ()> = bump.alloc_slice_try_fill_iter(
1773 /// [2, 3, 5].iter().cloned().map(|i| Ok(i * i))
1774 /// );
1775 /// assert_eq!(x, Ok(bump.alloc_slice_copy(&[4, 9, 25])));
1776 /// ```
1777 ///
1778 /// ```
1779 /// let bump = bumpalo::Bump::new();
1780 /// let x: Result<&mut [i32], ()> = bump.alloc_slice_try_fill_iter(
1781 /// [Ok(2), Err(()), Ok(5)].iter().cloned()
1782 /// );
1783 /// assert_eq!(x, Err(()));
1784 /// ```
1785 #[inline(always)]
1786 pub fn alloc_slice_try_fill_iter<T, I, E>(&self, iter: I) -> Result<&mut [T], E>
1787 where
1788 I: IntoIterator<Item = Result<T, E>>,
1789 I::IntoIter: ExactSizeIterator,
1790 {
1791 let mut iter = iter.into_iter();
1792 self.alloc_slice_try_fill_with(iter.len(), |_| {
1793 iter.next().expect("Iterator supplied too few elements")
1794 })
1795 }
1796
1797 /// Allocates a new slice of size `iter.len()` slice into this `Bump` and return an
1798 /// exclusive reference to the copy. Does not panic on failure.
1799 ///
1800 /// The elements are initialized using the supplied iterator.
1801 ///
1802 /// ## Example
1803 ///
1804 /// ```
1805 /// let bump = bumpalo::Bump::new();
1806 /// let x: &[i32] = bump.try_alloc_slice_fill_iter([2, 3, 5]
1807 /// .iter().cloned().map(|i| i * i)).unwrap();
1808 /// assert_eq!(x, [4, 9, 25]);
1809 /// ```
1810 #[inline(always)]
1811 pub fn try_alloc_slice_fill_iter<T, I>(&self, iter: I) -> Result<&mut [T], AllocErr>
1812 where
1813 I: IntoIterator<Item = T>,
1814 I::IntoIter: ExactSizeIterator,
1815 {
1816 let mut iter = iter.into_iter();
1817 self.try_alloc_slice_fill_with(iter.len(), |_| {
1818 iter.next().expect("Iterator supplied too few elements")
1819 })
1820 }
1821
1822 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1823 /// exclusive reference to the copy.
1824 ///
1825 /// All elements of the slice are initialized to [`T::default()`].
1826 ///
1827 /// [`T::default()`]: https://doc.rust-lang.org/std/default/trait.Default.html#tymethod.default
1828 ///
1829 /// ## Panics
1830 ///
1831 /// Panics if reserving space for the slice fails.
1832 ///
1833 /// ## Example
1834 ///
1835 /// ```
1836 /// let bump = bumpalo::Bump::new();
1837 /// let x = bump.alloc_slice_fill_default::<u32>(5);
1838 /// assert_eq!(x, &[0, 0, 0, 0, 0]);
1839 /// ```
1840 #[inline(always)]
1841 pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
1842 self.alloc_slice_fill_with(len, |_| T::default())
1843 }
1844
1845 /// Like `alloc_slice_fill_default` but does not panic on failure.
1846 #[inline(always)]
1847 pub fn try_alloc_slice_fill_default<T: Default>(
1848 &self,
1849 len: usize,
1850 ) -> Result<&mut [T], AllocErr> {
1851 self.try_alloc_slice_fill_with(len, |_| T::default())
1852 }
1853
1854 /// Allocate space for an object with the given `Layout`.
1855 ///
1856 /// The returned pointer points at uninitialized memory, and should be
1857 /// initialized with
1858 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1859 ///
1860 /// # Panics
1861 ///
1862 /// Panics if reserving space matching `layout` fails.
1863 #[inline(always)]
1864 pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
1865 self.try_alloc_layout(layout).unwrap_or_else(|_| oom())
1866 }
1867
1868 /// Attempts to allocate space for an object with the given `Layout` or else returns
1869 /// an `Err`.
1870 ///
1871 /// The returned pointer points at uninitialized memory, and should be
1872 /// initialized with
1873 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1874 ///
1875 /// # Errors
1876 ///
1877 /// Errors if reserving space matching `layout` fails.
1878 #[inline(always)]
1879 pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1880 if let Some(p) = self.try_alloc_layout_fast(layout) {
1881 Ok(p)
1882 } else {
1883 self.alloc_layout_slow(layout).ok_or(AllocErr)
1884 }
1885 }
1886
1887 #[inline(always)]
1888 fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
1889 // We don't need to check for ZSTs here since they will automatically
1890 // be handled properly: the pointer will be bumped by zero bytes,
1891 // modulo alignment. This keeps the fast path optimized for non-ZSTs,
1892 // which are much more common.
1893 unsafe {
1894 let footer_ptr = self.current_chunk_footer.get();
1895 let footer = footer_ptr.as_ref();
1896
1897 let ptr = footer.ptr.get().as_ptr();
1898 let start = footer.data.as_ptr();
1899 debug_assert!(
1900 start <= ptr,
1901 "start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}"
1902 );
1903 debug_assert!(
1904 ptr <= footer_ptr.cast::<u8>().as_ptr(),
1905 "bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}"
1906 );
1907 debug_assert!(
1908 is_pointer_aligned_to(ptr, MIN_ALIGN),
1909 "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
1910 );
1911
1912 // This `match` should be boiled away by LLVM: `MIN_ALIGN` is a
1913 // constant and the layout's alignment is also constant in practice
1914 // after inlining.
1915 let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) {
1916 Ordering::Less => {
1917 // We need to round the size up to a multiple of `MIN_ALIGN`
1918 // to preserve the minimum alignment. This might overflow
1919 // since we cannot rely on `Layout`'s guarantees.
1920 let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?;
1921
1922 let capacity = (ptr as usize) - (start as usize);
1923 if aligned_size > capacity {
1924 return None;
1925 }
1926
1927 ptr.wrapping_sub(aligned_size)
1928 }
1929 Ordering::Equal => {
1930 // `Layout` guarantees that rounding the size up to its
1931 // align cannot overflow (but does not guarantee that the
1932 // size is initially a multiple of the alignment, which is
1933 // why we need to do this rounding).
1934 let aligned_size = round_up_to_unchecked(layout.size(), layout.align());
1935
1936 let capacity = (ptr as usize) - (start as usize);
1937 if aligned_size > capacity {
1938 return None;
1939 }
1940
1941 ptr.wrapping_sub(aligned_size)
1942 }
1943 Ordering::Greater => {
1944 // `Layout` guarantees that rounding the size up to its
1945 // align cannot overflow (but does not guarantee that the
1946 // size is initially a multiple of the alignment, which is
1947 // why we need to do this rounding).
1948 let aligned_size = round_up_to_unchecked(layout.size(), layout.align());
1949
1950 let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
1951 let capacity = (aligned_ptr as usize).wrapping_sub(start as usize);
1952 if aligned_ptr < start || aligned_size > capacity {
1953 return None;
1954 }
1955
1956 aligned_ptr.wrapping_sub(aligned_size)
1957 }
1958 };
1959
1960 debug_assert!(
1961 is_pointer_aligned_to(aligned_ptr, layout.align()),
1962 "pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}",
1963 layout.align()
1964 );
1965 debug_assert!(
1966 is_pointer_aligned_to(aligned_ptr, MIN_ALIGN),
1967 "pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}",
1968 MIN_ALIGN
1969 );
1970 debug_assert!(
1971 start <= aligned_ptr && aligned_ptr <= ptr,
1972 "pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}"
1973 );
1974
1975 debug_assert!(!aligned_ptr.is_null());
1976 let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
1977
1978 footer.ptr.set(aligned_ptr);
1979 Some(aligned_ptr)
1980 }
1981 }
1982
1983 /// Gets the remaining capacity in the current chunk (in bytes).
1984 ///
1985 /// ## Example
1986 ///
1987 /// ```
1988 /// use bumpalo::Bump;
1989 ///
1990 /// let bump = Bump::with_capacity(100);
1991 ///
1992 /// let capacity = bump.chunk_capacity();
1993 /// assert!(capacity >= 100);
1994 /// ```
1995 pub fn chunk_capacity(&self) -> usize {
1996 let current_footer = self.current_chunk_footer.get();
1997 let current_footer = unsafe { current_footer.as_ref() };
1998
1999 current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
2000 }
2001
2002 /// Slow path allocation for when we need to allocate a new chunk from the
2003 /// parent bump set because there isn't enough room in our current chunk.
2004 #[inline(never)]
2005 #[cold]
2006 fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
2007 unsafe {
2008 let allocation_limit_remaining = self.allocation_limit_remaining();
2009
2010 // Get a new chunk from the global allocator.
2011 let current_footer = self.current_chunk_footer.get();
2012 let current_layout = current_footer.as_ref().layout;
2013
2014 // By default, we want our new chunk to be about twice as big
2015 // as the previous chunk. If the global allocator refuses it,
2016 // we try to divide it by half until it works or the requested
2017 // size is smaller than the default footer size.
2018 let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
2019 let mut base_size = (current_layout.size() - FOOTER_SIZE)
2020 .checked_mul(2)?
2021 .max(min_new_chunk_size);
2022 let chunk_memory_details = iter::from_fn(|| {
2023 let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
2024 && base_size >= layout.size()
2025 && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
2026 && self.allocated_bytes() == 0);
2027
2028 if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
2029 let size = base_size;
2030 base_size /= 2;
2031 Self::new_chunk_memory_details(Some(size), layout)
2032 } else {
2033 None
2034 }
2035 });
2036
2037 let new_footer = chunk_memory_details
2038 .filter_map(|chunk_memory_details| {
2039 if Self::chunk_fits_under_limit(
2040 allocation_limit_remaining,
2041 chunk_memory_details,
2042 ) {
2043 Self::new_chunk(chunk_memory_details, layout, current_footer)
2044 } else {
2045 None
2046 }
2047 })
2048 .next()?;
2049
2050 debug_assert_eq!(
2051 new_footer.as_ref().data.as_ptr() as usize % layout.align(),
2052 0
2053 );
2054
2055 // Set the new chunk as our new current chunk.
2056 self.current_chunk_footer.set(new_footer);
2057
2058 // And then we can rely on `tray_alloc_layout_fast` to allocate
2059 // space within this chunk.
2060 let ptr = self.try_alloc_layout_fast(layout);
2061 debug_assert!(ptr.is_some());
2062 ptr
2063 }
2064 }
2065
2066 /// Returns an iterator over each chunk of allocated memory that
2067 /// this arena has bump allocated into.
2068 ///
2069 /// The chunks are returned ordered by allocation time, with the most
2070 /// recently allocated chunk being returned first, and the least recently
2071 /// allocated chunk being returned last.
2072 ///
2073 /// The values inside each chunk are also ordered by allocation time, with
2074 /// the most recent allocation being earlier in the slice, and the least
2075 /// recent allocation being towards the end of the slice.
2076 ///
2077 /// ## Safety
2078 ///
2079 /// Because this method takes `&mut self`, we know that the bump arena
2080 /// reference is unique and therefore there aren't any active references to
2081 /// any of the objects we've allocated in it either. This potential aliasing
2082 /// of exclusive references is one common footgun for unsafe code that we
2083 /// don't need to worry about here.
2084 ///
2085 /// However, there could be regions of uninitialized memory used as padding
2086 /// between allocations, which is why this iterator has items of type
2087 /// `[MaybeUninit<u8>]`, instead of simply `[u8]`.
2088 ///
2089 /// The only way to guarantee that there is no padding between allocations
2090 /// or within allocated objects is if all of these properties hold:
2091 ///
2092 /// 1. Every object allocated in this arena has the same alignment,
2093 /// and that alignment is at most 16.
2094 /// 2. Every object's size is a multiple of its alignment.
2095 /// 3. None of the objects allocated in this arena contain any internal
2096 /// padding.
2097 ///
2098 /// If you want to use this `iter_allocated_chunks` method, it is *your*
2099 /// responsibility to ensure that these properties hold before calling
2100 /// `MaybeUninit::assume_init` or otherwise reading the returned values.
2101 ///
2102 /// Finally, you must also ensure that any values allocated into the bump
2103 /// arena have not had their `Drop` implementations called on them,
2104 /// e.g. after dropping a [`bumpalo::boxed::Box<T>`][crate::boxed::Box].
2105 ///
2106 /// ## Example
2107 ///
2108 /// ```
2109 /// let mut bump = bumpalo::Bump::new();
2110 ///
2111 /// // Allocate a bunch of `i32`s in this bump arena, potentially causing
2112 /// // additional memory chunks to be reserved.
2113 /// for i in 0..10000 {
2114 /// bump.alloc(i);
2115 /// }
2116 ///
2117 /// // Iterate over each chunk we've bump allocated into. This is safe
2118 /// // because we have only allocated `i32`s in this arena, which fulfills
2119 /// // the above requirements.
2120 /// for ch in bump.iter_allocated_chunks() {
2121 /// println!("Used a chunk that is {} bytes long", ch.len());
2122 /// println!("The first byte is {:?}", unsafe {
2123 /// ch[0].assume_init()
2124 /// });
2125 /// }
2126 ///
2127 /// // Within a chunk, allocations are ordered from most recent to least
2128 /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate
2129 /// // through the chunk's data, we get them in the order 'c', then 'b',
2130 /// // then 'a'.
2131 ///
2132 /// bump.reset();
2133 /// bump.alloc(b'a');
2134 /// bump.alloc(b'b');
2135 /// bump.alloc(b'c');
2136 ///
2137 /// assert_eq!(bump.iter_allocated_chunks().count(), 1);
2138 /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap();
2139 /// assert_eq!(chunk.len(), 3);
2140 ///
2141 /// // Safe because we've only allocated `u8`s in this arena, which
2142 /// // fulfills the above requirements.
2143 /// unsafe {
2144 /// assert_eq!(chunk[0].assume_init(), b'c');
2145 /// assert_eq!(chunk[1].assume_init(), b'b');
2146 /// assert_eq!(chunk[2].assume_init(), b'a');
2147 /// }
2148 /// ```
2149 pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_, MIN_ALIGN> {
2150 // Safety: Ensured by mutable borrow of `self`.
2151 let raw = unsafe { self.iter_allocated_chunks_raw() };
2152 ChunkIter {
2153 raw,
2154 bump: PhantomData,
2155 }
2156 }
2157
2158 /// Returns an iterator over raw pointers to chunks of allocated memory that
2159 /// this arena has bump allocated into.
2160 ///
2161 /// This is an unsafe version of [`iter_allocated_chunks()`](Bump::iter_allocated_chunks),
2162 /// with the caller responsible for safe usage of the returned pointers as
2163 /// well as ensuring that the iterator is not invalidated by new
2164 /// allocations.
2165 ///
2166 /// ## Safety
2167 ///
2168 /// Allocations from this arena must not be performed while the returned
2169 /// iterator is alive. If reading the chunk data (or casting to a reference)
2170 /// the caller must ensure that there exist no mutable references to
2171 /// previously allocated data.
2172 ///
2173 /// In addition, all of the caveats when reading the chunk data from
2174 /// [`iter_allocated_chunks()`](Bump::iter_allocated_chunks) still apply.
2175 pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_, MIN_ALIGN> {
2176 ChunkRawIter {
2177 footer: self.current_chunk_footer.get(),
2178 bump: PhantomData,
2179 }
2180 }
2181
2182 /// Calculates the number of bytes currently allocated across all chunks in
2183 /// this bump arena.
2184 ///
2185 /// If you allocate types of different alignments or types with
2186 /// larger-than-typical alignment in the same arena, some padding
2187 /// bytes might get allocated in the bump arena. Note that those padding
2188 /// bytes will add to this method's resulting sum, so you cannot rely
2189 /// on it only counting the sum of the sizes of the things
2190 /// you've allocated in the arena.
2191 ///
2192 /// The allocated bytes do not include the size of bumpalo's metadata,
2193 /// so the amount of memory requested from the Rust allocator is higher
2194 /// than the returned value.
2195 ///
2196 /// ## Example
2197 ///
2198 /// ```
2199 /// let bump = bumpalo::Bump::new();
2200 /// let _x = bump.alloc_slice_fill_default::<u32>(5);
2201 /// let bytes = bump.allocated_bytes();
2202 /// assert!(bytes >= core::mem::size_of::<u32>() * 5);
2203 /// ```
2204 pub fn allocated_bytes(&self) -> usize {
2205 let footer = self.current_chunk_footer.get();
2206
2207 unsafe { footer.as_ref().allocated_bytes }
2208 }
2209
2210 /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
2211 ///
2212 /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
2213 /// the size of the bump metadata.
2214 pub fn allocated_bytes_including_metadata(&self) -> usize {
2215 let metadata_size =
2216 unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
2217 self.allocated_bytes() + metadata_size
2218 }
2219
2220 #[inline]
2221 unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
2222 let footer = self.current_chunk_footer.get();
2223 let footer = footer.as_ref();
2224 footer.ptr.get() == ptr
2225 }
2226
2227 #[inline]
2228 unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
2229 // If the pointer is the last allocation we made, we can reuse the bytes,
2230 // otherwise they are simply leaked -- at least until somebody calls reset().
2231 if self.is_last_allocation(ptr) {
2232 let ptr = self.current_chunk_footer.get().as_ref().ptr.get();
2233 let ptr = ptr.as_ptr().add(layout.size());
2234
2235 let ptr = round_mut_ptr_up_to_unchecked(ptr, MIN_ALIGN);
2236 debug_assert!(
2237 is_pointer_aligned_to(ptr, MIN_ALIGN),
2238 "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
2239 );
2240 let ptr = NonNull::new_unchecked(ptr);
2241 self.current_chunk_footer.get().as_ref().ptr.set(ptr);
2242 }
2243 }
2244
2245 #[inline]
2246 unsafe fn shrink(
2247 &self,
2248 ptr: NonNull<u8>,
2249 old_layout: Layout,
2250 new_layout: Layout,
2251 ) -> Result<NonNull<u8>, AllocErr> {
2252 // If the new layout demands greater alignment than the old layout has,
2253 // then either
2254 //
2255 // 1. the pointer happens to satisfy the new layout's alignment, so we
2256 // got lucky and can return the pointer as-is, or
2257 //
2258 // 2. the pointer is not aligned to the new layout's demanded alignment,
2259 // and we are unlucky.
2260 //
2261 // In the case of (2), to successfully "shrink" the allocation, we have
2262 // to allocate a whole new region for the new layout.
2263 if old_layout.align() < new_layout.align() {
2264 return if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
2265 Ok(ptr)
2266 } else {
2267 let new_ptr = self.try_alloc_layout(new_layout)?;
2268
2269 // We know that these regions are nonoverlapping because
2270 // `new_ptr` is a fresh allocation.
2271 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_layout.size());
2272
2273 Ok(new_ptr)
2274 };
2275 }
2276
2277 debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
2278
2279 let old_size = old_layout.size();
2280 let new_size = new_layout.size();
2281
2282 // This is how much space we would *actually* reclaim while satisfying
2283 // the requested alignment.
2284 let delta = round_down_to(old_size - new_size, new_layout.align().max(MIN_ALIGN));
2285
2286 if self.is_last_allocation(ptr)
2287 // Only reclaim the excess space (which requires a copy) if it
2288 // is worth it: we are actually going to recover "enough" space
2289 // and we can do a non-overlapping copy.
2290 //
2291 // We do `(old_size + 1) / 2` so division rounds up rather than
2292 // down. Consider when:
2293 //
2294 // old_size = 5
2295 // new_size = 3
2296 //
2297 // If we do not take care to round up, this will result in:
2298 //
2299 // delta = 2
2300 // (old_size / 2) = (5 / 2) = 2
2301 //
2302 // And the the check will succeed even though we are have
2303 // overlapping ranges:
2304 //
2305 // |--------old-allocation-------|
2306 // |------from-------|
2307 // |-------to--------|
2308 // +-----+-----+-----+-----+-----+
2309 // | a | b | c | . | . |
2310 // +-----+-----+-----+-----+-----+
2311 //
2312 // But we MUST NOT have overlapping ranges because we use
2313 // `copy_nonoverlapping` below! Therefore, we round the division
2314 // up to avoid this issue.
2315 && delta >= (old_size + 1) / 2
2316 {
2317 let footer = self.current_chunk_footer.get();
2318 let footer = footer.as_ref();
2319
2320 // NB: new_ptr is aligned, because ptr *has to* be aligned, and we
2321 // made sure delta is aligned.
2322 let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta));
2323 debug_assert!(
2324 is_pointer_aligned_to(new_ptr.as_ptr(), MIN_ALIGN),
2325 "bump pointer {new_ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
2326 );
2327 footer.ptr.set(new_ptr);
2328
2329 // NB: we know it is non-overlapping because of the size check
2330 // in the `if` condition.
2331 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
2332
2333 return Ok(new_ptr);
2334 }
2335
2336 // If this wasn't the last allocation, or shrinking wasn't worth it,
2337 // simply return the old pointer as-is.
2338 Ok(ptr)
2339 }
2340
2341 #[inline]
2342 unsafe fn grow(
2343 &self,
2344 ptr: NonNull<u8>,
2345 old_layout: Layout,
2346 new_layout: Layout,
2347 ) -> Result<NonNull<u8>, AllocErr> {
2348 let old_size = old_layout.size();
2349
2350 let new_size = new_layout.size();
2351 let new_size = round_up_to(new_size, MIN_ALIGN).ok_or(AllocErr)?;
2352
2353 let align_is_compatible = old_layout.align() >= new_layout.align();
2354
2355 if align_is_compatible && self.is_last_allocation(ptr) {
2356 // Try to allocate the delta size within this same block so we can
2357 // reuse the currently allocated space.
2358 let delta = new_size - old_size;
2359 if let Some(p) =
2360 self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
2361 {
2362 ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
2363 return Ok(p);
2364 }
2365 }
2366
2367 // Fallback: do a fresh allocation and copy the existing data into it.
2368 let new_ptr = self.try_alloc_layout(new_layout)?;
2369 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
2370 Ok(new_ptr)
2371 }
2372}
2373
2374/// An iterator over each chunk of allocated memory that
2375/// an arena has bump allocated into.
2376///
2377/// The chunks are returned ordered by allocation time, with the most recently
2378/// allocated chunk being returned first.
2379///
2380/// The values inside each chunk are also ordered by allocation time, with the most
2381/// recent allocation being earlier in the slice.
2382///
2383/// This struct is created by the [`iter_allocated_chunks`] method on
2384/// [`Bump`]. See that function for a safety description regarding reading from the returned items.
2385///
2386/// [`Bump`]: struct.Bump.html
2387/// [`iter_allocated_chunks`]: struct.Bump.html#method.iter_allocated_chunks
2388#[derive(Debug)]
2389pub struct ChunkIter<'a, const MIN_ALIGN: usize = 1> {
2390 raw: ChunkRawIter<'a, MIN_ALIGN>,
2391 bump: PhantomData<&'a mut Bump>,
2392}
2393
2394impl<'a, const MIN_ALIGN: usize> Iterator for ChunkIter<'a, MIN_ALIGN> {
2395 type Item = &'a [mem::MaybeUninit<u8>];
2396
2397 fn next(&mut self) -> Option<Self::Item> {
2398 unsafe {
2399 let (ptr, len) = self.raw.next()?;
2400 let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len);
2401 Some(slice)
2402 }
2403 }
2404}
2405
2406impl<'a, const MIN_ALIGN: usize> iter::FusedIterator for ChunkIter<'a, MIN_ALIGN> {}
2407
2408/// An iterator over raw pointers to chunks of allocated memory that this
2409/// arena has bump allocated into.
2410///
2411/// See [`ChunkIter`] for details regarding the returned chunks.
2412///
2413/// This struct is created by the [`iter_allocated_chunks_raw`] method on
2414/// [`Bump`]. See that function for a safety description regarding reading from
2415/// the returned items.
2416///
2417/// [`Bump`]: struct.Bump.html
2418/// [`iter_allocated_chunks_raw`]: struct.Bump.html#method.iter_allocated_chunks_raw
2419#[derive(Debug)]
2420pub struct ChunkRawIter<'a, const MIN_ALIGN: usize = 1> {
2421 footer: NonNull<ChunkFooter>,
2422 bump: PhantomData<&'a Bump<MIN_ALIGN>>,
2423}
2424
2425impl<const MIN_ALIGN: usize> Iterator for ChunkRawIter<'_, MIN_ALIGN> {
2426 type Item = (*mut u8, usize);
2427 fn next(&mut self) -> Option<(*mut u8, usize)> {
2428 unsafe {
2429 let foot = self.footer.as_ref();
2430 if foot.is_empty() {
2431 return None;
2432 }
2433 let (ptr, len) = foot.as_raw_parts();
2434 self.footer = foot.prev.get();
2435 Some((ptr as *mut u8, len))
2436 }
2437 }
2438}
2439
2440impl<const MIN_ALIGN: usize> iter::FusedIterator for ChunkRawIter<'_, MIN_ALIGN> {}
2441
2442#[inline(never)]
2443#[cold]
2444fn oom() -> ! {
2445 panic!("out of memory")
2446}
2447
2448unsafe impl<'a, const MIN_ALIGN: usize> alloc::Alloc for &'a Bump<MIN_ALIGN> {
2449 #[inline(always)]
2450 unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
2451 self.try_alloc_layout(layout)
2452 }
2453
2454 #[inline]
2455 unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
2456 Bump::<MIN_ALIGN>::dealloc(self, ptr, layout);
2457 }
2458
2459 #[inline]
2460 unsafe fn realloc(
2461 &mut self,
2462 ptr: NonNull<u8>,
2463 layout: Layout,
2464 new_size: usize,
2465 ) -> Result<NonNull<u8>, AllocErr> {
2466 let old_size = layout.size();
2467
2468 if old_size == 0 {
2469 return self.try_alloc_layout(layout);
2470 }
2471
2472 let new_layout = layout_from_size_align(new_size, layout.align())?;
2473 if new_size <= old_size {
2474 self.shrink(ptr, layout, new_layout)
2475 } else {
2476 self.grow(ptr, layout, new_layout)
2477 }
2478 }
2479}
2480
2481#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
2482unsafe impl<'a, const MIN_ALIGN: usize> Allocator for &'a Bump<MIN_ALIGN> {
2483 #[inline]
2484 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
2485 self.try_alloc_layout(layout)
2486 .map(|p| unsafe {
2487 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
2488 })
2489 .map_err(|_| AllocError)
2490 }
2491
2492 #[inline]
2493 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
2494 Bump::<MIN_ALIGN>::dealloc(self, ptr, layout)
2495 }
2496
2497 #[inline]
2498 unsafe fn shrink(
2499 &self,
2500 ptr: NonNull<u8>,
2501 old_layout: Layout,
2502 new_layout: Layout,
2503 ) -> Result<NonNull<[u8]>, AllocError> {
2504 Bump::<MIN_ALIGN>::shrink(self, ptr, old_layout, new_layout)
2505 .map(|p| unsafe {
2506 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
2507 })
2508 .map_err(|_| AllocError)
2509 }
2510
2511 #[inline]
2512 unsafe fn grow(
2513 &self,
2514 ptr: NonNull<u8>,
2515 old_layout: Layout,
2516 new_layout: Layout,
2517 ) -> Result<NonNull<[u8]>, AllocError> {
2518 Bump::<MIN_ALIGN>::grow(self, ptr, old_layout, new_layout)
2519 .map(|p| unsafe {
2520 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
2521 })
2522 .map_err(|_| AllocError)
2523 }
2524
2525 #[inline]
2526 unsafe fn grow_zeroed(
2527 &self,
2528 ptr: NonNull<u8>,
2529 old_layout: Layout,
2530 new_layout: Layout,
2531 ) -> Result<NonNull<[u8]>, AllocError> {
2532 let mut ptr = self.grow(ptr, old_layout, new_layout)?;
2533 ptr.as_mut()[old_layout.size()..].fill(0);
2534 Ok(ptr)
2535 }
2536}
2537
2538// NB: Only tests which require private types, fields, or methods should be in
2539// here. Anything that can just be tested via public API surface should be in
2540// `bumpalo/tests/all/*`.
2541#[cfg(test)]
2542mod tests {
2543 use super::*;
2544
2545 // Uses private type `ChunkFooter`.
2546 #[test]
2547 fn chunk_footer_is_five_words() {
2548 assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6);
2549 }
2550
2551 // Uses private `DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER` and `FOOTER_SIZE`.
2552 #[test]
2553 fn allocated_bytes() {
2554 let mut b = Bump::with_capacity(1);
2555
2556 assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
2557 assert_eq!(
2558 b.allocated_bytes_including_metadata(),
2559 DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE
2560 );
2561
2562 b.reset();
2563
2564 assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
2565 assert_eq!(
2566 b.allocated_bytes_including_metadata(),
2567 DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE
2568 );
2569 }
2570
2571 // Uses private `alloc` module.
2572 #[test]
2573 fn test_realloc() {
2574 use crate::alloc::Alloc;
2575
2576 unsafe {
2577 const CAPACITY: usize = DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER;
2578 let mut b = Bump::<1>::with_min_align_and_capacity(CAPACITY);
2579
2580 // `realloc` doesn't shrink allocations that aren't "worth it".
2581 let layout = Layout::from_size_align(100, 1).unwrap();
2582 let p = b.alloc_layout(layout);
2583 let q = (&b).realloc(p, layout, 51).unwrap();
2584 assert_eq!(p, q);
2585 b.reset();
2586
2587 // `realloc` will shrink allocations that are "worth it".
2588 let layout = Layout::from_size_align(100, 1).unwrap();
2589 let p = b.alloc_layout(layout);
2590 let q = (&b).realloc(p, layout, 50).unwrap();
2591 assert!(p != q);
2592 b.reset();
2593
2594 // `realloc` will reuse the last allocation when growing.
2595 let layout = Layout::from_size_align(10, 1).unwrap();
2596 let p = b.alloc_layout(layout);
2597 let q = (&b).realloc(p, layout, 11).unwrap();
2598 assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
2599 b.reset();
2600
2601 // `realloc` will allocate a new chunk when growing the last
2602 // allocation, if need be.
2603 let layout = Layout::from_size_align(1, 1).unwrap();
2604 let p = b.alloc_layout(layout);
2605 let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
2606 assert_ne!(q.as_ptr() as usize, p.as_ptr() as usize - CAPACITY);
2607 b.reset();
2608
2609 // `realloc` will allocate and copy when reallocating anything that
2610 // wasn't the last allocation.
2611 let layout = Layout::from_size_align(1, 1).unwrap();
2612 let p = b.alloc_layout(layout);
2613 let _ = b.alloc_layout(layout);
2614 let q = (&b).realloc(p, layout, 2).unwrap();
2615 assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
2616 b.reset();
2617 }
2618 }
2619
2620 // Uses our private `alloc` module.
2621 #[test]
2622 fn invalid_read() {
2623 use alloc::Alloc;
2624
2625 let mut b = &Bump::new();
2626
2627 unsafe {
2628 let l1 = Layout::from_size_align(12000, 4).unwrap();
2629 let p1 = Alloc::alloc(&mut b, l1).unwrap();
2630
2631 let l2 = Layout::from_size_align(1000, 4).unwrap();
2632 Alloc::alloc(&mut b, l2).unwrap();
2633
2634 let p1 = b.realloc(p1, l1, 24000).unwrap();
2635 let l3 = Layout::from_size_align(24000, 4).unwrap();
2636 b.realloc(p1, l3, 48000).unwrap();
2637 }
2638 }
2639}