bumpalo/
lib.rs

1#![doc = include_str!("../README.md")]
2#![deny(missing_debug_implementations)]
3#![deny(missing_docs)]
4#![cfg_attr(not(feature = "std"), no_std)]
5#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
6
7#[doc(hidden)]
8pub extern crate alloc as core_alloc;
9
10#[cfg(feature = "boxed")]
11pub mod boxed;
12#[cfg(feature = "collections")]
13pub mod collections;
14
15mod alloc;
16
17use core::cell::Cell;
18use core::fmt::Display;
19use core::iter;
20use core::marker::PhantomData;
21use core::mem;
22use core::ptr::{self, NonNull};
23use core::slice;
24use core::str;
25use core_alloc::alloc::{alloc, dealloc, Layout};
26
27#[cfg(feature = "allocator_api")]
28use core_alloc::alloc::{AllocError, Allocator};
29
30#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
31use allocator_api2::alloc::{AllocError, Allocator};
32
33pub use alloc::AllocErr;
34
35/// An error returned from [`Bump::try_alloc_try_with`].
36#[derive(Clone, PartialEq, Eq, Debug)]
37pub enum AllocOrInitError<E> {
38    /// Indicates that the initial allocation failed.
39    Alloc(AllocErr),
40    /// Indicates that the initializer failed with the contained error after
41    /// allocation.
42    ///
43    /// It is possible but not guaranteed that the allocated memory has been
44    /// released back to the allocator at this point.
45    Init(E),
46}
47impl<E> From<AllocErr> for AllocOrInitError<E> {
48    fn from(e: AllocErr) -> Self {
49        Self::Alloc(e)
50    }
51}
52impl<E: Display> Display for AllocOrInitError<E> {
53    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
54        match self {
55            AllocOrInitError::Alloc(err) => err.fmt(f),
56            AllocOrInitError::Init(err) => write!(f, "initialization failed: {}", err),
57        }
58    }
59}
60
61/// An arena to bump allocate into.
62///
63/// ## No `Drop`s
64///
65/// Objects that are bump-allocated will never have their [`Drop`] implementation
66/// called &mdash; unless you do it manually yourself. This makes it relatively
67/// easy to leak memory or other resources.
68///
69/// If you have a type which internally manages
70///
71/// * an allocation from the global heap (e.g. [`Vec<T>`]),
72/// * open file descriptors (e.g. [`std::fs::File`]), or
73/// * any other resource that must be cleaned up (e.g. an `mmap`)
74///
75/// and relies on its `Drop` implementation to clean up the internal resource,
76/// then if you allocate that type with a `Bump`, you need to find a new way to
77/// clean up after it yourself.
78///
79/// Potential solutions are:
80///
81/// * Using [`bumpalo::boxed::Box::new_in`] instead of [`Bump::alloc`], that
82///   will drop wrapped values similarly to [`std::boxed::Box`]. Note that this
83///   requires enabling the `"boxed"` Cargo feature for this crate. **This is
84///   often the easiest solution.**
85///
86/// * Calling [`drop_in_place`][drop_in_place] or using
87///   [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types.
88///
89/// * Using [`bumpalo::collections::Vec`] instead of [`std::vec::Vec`].
90///
91/// * Avoiding allocating these problematic types within a `Bump`.
92///
93/// Note that not calling `Drop` is memory safe! Destructors are never
94/// guaranteed to run in Rust, you can't rely on them for enforcing memory
95/// safety.
96///
97/// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
98/// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
99/// [`std::fs::File`]: https://doc.rust-lang.org/std/fs/struct.File.html
100/// [drop_in_place]: https://doc.rust-lang.org/std/ptr/fn.drop_in_place.html
101/// [manuallydrop]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html
102/// [`bumpalo::collections::Vec`]: collections/vec/struct.Vec.html
103/// [`std::vec::Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
104/// [`bumpalo::boxed::Box::new_in`]: boxed/struct.Box.html#method.new_in
105/// [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html
106///
107/// ## Example
108///
109/// ```
110/// use bumpalo::Bump;
111///
112/// // Create a new bump arena.
113/// let bump = Bump::new();
114///
115/// // Allocate values into the arena.
116/// let forty_two = bump.alloc(42);
117/// assert_eq!(*forty_two, 42);
118///
119/// // Mutable references are returned from allocation.
120/// let mut s = bump.alloc("bumpalo");
121/// *s = "the bump allocator; and also is a buffalo";
122/// ```
123///
124/// ## Allocation Methods Come in Many Flavors
125///
126/// There are various allocation methods on `Bump`, the simplest being
127/// [`alloc`][Bump::alloc]. The others exist to satisfy some combination of
128/// fallible allocation and initialization. The allocation methods are
129/// summarized in the following table:
130///
131/// <table>
132///   <thead>
133///     <tr>
134///       <th></th>
135///       <th>Infallible Allocation</th>
136///       <th>Fallible Allocation</th>
137///     </tr>
138///   </thead>
139///     <tr>
140///       <th>By Value</th>
141///       <td><a href="#method.alloc"><code>alloc</code></a></td>
142///       <td><a href="#method.try_alloc"><code>try_alloc</code></a></td>
143///     </tr>
144///     <tr>
145///       <th>Infallible Initializer Function</th>
146///       <td><a href="#method.alloc_with"><code>alloc_with</code></a></td>
147///       <td><a href="#method.try_alloc_with"><code>try_alloc_with</code></a></td>
148///     </tr>
149///     <tr>
150///       <th>Fallible Initializer Function</th>
151///       <td><a href="#method.alloc_try_with"><code>alloc_try_with</code></a></td>
152///       <td><a href="#method.try_alloc_try_with"><code>try_alloc_try_with</code></a></td>
153///     </tr>
154///   <tbody>
155///   </tbody>
156/// </table>
157///
158/// ### Fallible Allocation: The `try_alloc_` Method Prefix
159///
160/// These allocation methods let you recover from out-of-memory (OOM)
161/// scenarios, rather than raising a panic on OOM.
162///
163/// ```
164/// use bumpalo::Bump;
165///
166/// let bump = Bump::new();
167///
168/// match bump.try_alloc(MyStruct {
169///     // ...
170/// }) {
171///     Ok(my_struct) => {
172///         // Allocation succeeded.
173///     }
174///     Err(e) => {
175///         // Out of memory.
176///     }
177/// }
178///
179/// struct MyStruct {
180///     // ...
181/// }
182/// ```
183///
184/// ### Initializer Functions: The `_with` Method Suffix
185///
186/// Calling one of the generic `…alloc(x)` methods is essentially equivalent to
187/// the matching [`…alloc_with(|| x)`](?search=alloc_with). However if you use
188/// `…alloc_with`, then the closure will not be invoked until after allocating
189/// space for storing `x` on the heap.
190///
191/// This can be useful in certain edge-cases related to compiler optimizations.
192/// When evaluating for example `bump.alloc(x)`, semantically `x` is first put
193/// on the stack and then moved onto the heap. In some cases, the compiler is
194/// able to optimize this into constructing `x` directly on the heap, however
195/// in many cases it does not.
196///
197/// The `…alloc_with` functions try to help the compiler be smarter. In most
198/// cases doing for example `bump.try_alloc_with(|| x)` on release mode will be
199/// enough to help the compiler realize that this optimization is valid and
200/// to construct `x` directly onto the heap.
201///
202/// #### Warning
203///
204/// These functions critically depend on compiler optimizations to achieve their
205/// desired effect. This means that it is not an effective tool when compiling
206/// without optimizations on.
207///
208/// Even when optimizations are on, these functions do not **guarantee** that
209/// the value is constructed on the heap. To the best of our knowledge no such
210/// guarantee can be made in stable Rust as of 1.54.
211///
212/// ### Fallible Initialization: The `_try_with` Method Suffix
213///
214/// The generic [`…alloc_try_with(|| x)`](?search=_try_with) methods behave
215/// like the purely `_with` suffixed methods explained above. However, they
216/// allow for fallible initialization by accepting a closure that returns a
217/// [`Result`] and will attempt to undo the initial allocation if this closure
218/// returns [`Err`].
219///
220/// #### Warning
221///
222/// If the inner closure returns [`Ok`], space for the entire [`Result`] remains
223/// allocated inside `self`. This can be a problem especially if the [`Err`]
224/// variant is larger, but even otherwise there may be overhead for the
225/// [`Result`]'s discriminant.
226///
227/// <p><details><summary>Undoing the allocation in the <code>Err</code> case
228/// always fails if <code>f</code> successfully made any additional allocations
229/// in <code>self</code>.</summary>
230///
231/// For example, the following will always leak also space for the [`Result`]
232/// into this `Bump`, even though the inner reference isn't kept and the [`Err`]
233/// payload is returned semantically by value:
234///
235/// ```rust
236/// let bump = bumpalo::Bump::new();
237///
238/// let r: Result<&mut [u8; 1000], ()> = bump.alloc_try_with(|| {
239///     let _ = bump.alloc(0_u8);
240///     Err(())
241/// });
242///
243/// assert!(r.is_err());
244/// ```
245///
246///</details></p>
247///
248/// Since [`Err`] payloads are first placed on the heap and then moved to the
249/// stack, `bump.…alloc_try_with(|| x)?` is likely to execute more slowly than
250/// the matching `bump.…alloc(x?)` in case of initialization failure. If this
251/// happens frequently, using the plain un-suffixed method may perform better.
252///
253/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
254/// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
255/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
256///
257/// ### `Bump` Allocation Limits
258///
259/// `bumpalo` supports setting a limit on the maximum bytes of memory that can
260/// be allocated for use in a particular `Bump` arena. This limit can be set and removed with
261/// [`set_allocation_limit`][Bump::set_allocation_limit].
262/// The allocation limit is only enforced when allocating new backing chunks for
263/// a `Bump`. Updating the allocation limit will not affect existing allocations
264/// or any future allocations within the `Bump`'s current chunk.
265///
266/// #### Example
267///
268/// ```
269/// let bump = bumpalo::Bump::new();
270///
271/// assert_eq!(bump.allocation_limit(), None);
272/// bump.set_allocation_limit(Some(0));
273///
274/// assert!(bump.try_alloc(5).is_err());
275///
276/// bump.set_allocation_limit(Some(6));
277///
278/// assert_eq!(bump.allocation_limit(), Some(6));
279///
280/// bump.set_allocation_limit(None);
281///
282/// assert_eq!(bump.allocation_limit(), None);
283/// ```
284///
285/// #### Warning
286///
287/// Because of backwards compatibility, allocations that fail
288/// due to allocation limits will not present differently than
289/// errors due to resource exhaustion.
290
291#[derive(Debug)]
292pub struct Bump {
293    // The current chunk we are bump allocating within.
294    current_chunk_footer: Cell<NonNull<ChunkFooter>>,
295    allocation_limit: Cell<Option<usize>>,
296}
297
298#[repr(C)]
299#[derive(Debug)]
300struct ChunkFooter {
301    // Pointer to the start of this chunk allocation. This footer is always at
302    // the end of the chunk.
303    data: NonNull<u8>,
304
305    // The layout of this chunk's allocation.
306    layout: Layout,
307
308    // Link to the previous chunk.
309    //
310    // Note that the last node in the `prev` linked list is the canonical empty
311    // chunk, whose `prev` link points to itself.
312    prev: Cell<NonNull<ChunkFooter>>,
313
314    // Bump allocation finger that is always in the range `self.data..=self`.
315    ptr: Cell<NonNull<u8>>,
316
317    // The bytes allocated in all chunks so far, the canonical empty chunk has
318    // a size of 0 and for all other chunks, `allocated_bytes` will be
319    // the allocated_bytes of the current chunk plus the allocated bytes
320    // of the `prev` chunk.
321    allocated_bytes: usize,
322}
323
324/// A wrapper type for the canonical, statically allocated empty chunk.
325///
326/// For the canonical empty chunk to be `static`, its type must be `Sync`, which
327/// is the purpose of this wrapper type. This is safe because the empty chunk is
328/// immutable and never actually modified.
329#[repr(transparent)]
330struct EmptyChunkFooter(ChunkFooter);
331
332unsafe impl Sync for EmptyChunkFooter {}
333
334static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
335    // This chunk is empty (except the foot itself).
336    layout: Layout::new::<ChunkFooter>(),
337
338    // The start of the (empty) allocatable region for this chunk is itself.
339    data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) },
340
341    // The end of the (empty) allocatable region for this chunk is also itself.
342    ptr: Cell::new(unsafe {
343        NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8)
344    }),
345
346    // Invariant: the last chunk footer in all `ChunkFooter::prev` linked lists
347    // is the empty chunk footer, whose `prev` points to itself.
348    prev: Cell::new(unsafe {
349        NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter)
350    }),
351
352    // Empty chunks count as 0 allocated bytes in an arena.
353    allocated_bytes: 0,
354});
355
356impl EmptyChunkFooter {
357    fn get(&'static self) -> NonNull<ChunkFooter> {
358        NonNull::from(&self.0)
359    }
360}
361
362impl ChunkFooter {
363    // Returns the start and length of the currently allocated region of this
364    // chunk.
365    fn as_raw_parts(&self) -> (*const u8, usize) {
366        let data = self.data.as_ptr() as *const u8;
367        let ptr = self.ptr.get().as_ptr() as *const u8;
368        debug_assert!(data <= ptr);
369        debug_assert!(ptr <= self as *const ChunkFooter as *const u8);
370        let len = unsafe { (self as *const ChunkFooter as *const u8).offset_from(ptr) as usize };
371        (ptr, len)
372    }
373
374    /// Is this chunk the last empty chunk?
375    fn is_empty(&self) -> bool {
376        ptr::eq(self, EMPTY_CHUNK.get().as_ptr())
377    }
378}
379
380impl Default for Bump {
381    fn default() -> Bump {
382        Bump::new()
383    }
384}
385
386impl Drop for Bump {
387    fn drop(&mut self) {
388        unsafe {
389            dealloc_chunk_list(self.current_chunk_footer.get());
390        }
391    }
392}
393
394#[inline]
395unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
396    while !footer.as_ref().is_empty() {
397        let f = footer;
398        footer = f.as_ref().prev.get();
399        dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
400    }
401}
402
403// `Bump`s are safe to send between threads because nothing aliases its owned
404// chunks until you start allocating from it. But by the time you allocate from
405// it, the returned references to allocations borrow the `Bump` and therefore
406// prevent sending the `Bump` across threads until the borrows end.
407unsafe impl Send for Bump {}
408
409#[inline]
410fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
411    debug_assert!(align.is_power_of_two());
412
413    let pointer = pointer as usize;
414    let pointer_aligned = round_down_to(pointer, align);
415    pointer == pointer_aligned
416}
417
418#[inline]
419pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
420    debug_assert!(divisor > 0);
421    debug_assert!(divisor.is_power_of_two());
422    Some(n.checked_add(divisor - 1)? & !(divisor - 1))
423}
424
425#[inline]
426pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
427    debug_assert!(divisor > 0);
428    debug_assert!(divisor.is_power_of_two());
429    n & !(divisor - 1)
430}
431
432/// Same as `round_down_to` but preserves pointer provenance.
433#[inline]
434pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
435    debug_assert!(divisor > 0);
436    debug_assert!(divisor.is_power_of_two());
437    ptr.wrapping_sub(ptr as usize & (divisor - 1))
438}
439
440// After this point, we try to hit page boundaries instead of powers of 2
441const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
442
443// We only support alignments of up to 16 bytes for iter_allocated_chunks.
444const SUPPORTED_ITER_ALIGNMENT: usize = 16;
445const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
446const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
447
448// Assert that ChunkFooter is at most the supported alignment. This will give a compile time error if it is not the case
449const _FOOTER_ALIGN_ASSERTION: bool = mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN;
450const _: [(); _FOOTER_ALIGN_ASSERTION as usize] = [()];
451
452// Maximum typical overhead per allocation imposed by allocators.
453const MALLOC_OVERHEAD: usize = 16;
454
455// This is the overhead from malloc, footer and alignment. For instance, if
456// we want to request a chunk of memory that has at least X bytes usable for
457// allocations (where X is aligned to CHUNK_ALIGN), then we expect that the
458// after adding a footer, malloc overhead and alignment, the chunk of memory
459// the allocator actually sets aside for us is X+OVERHEAD rounded up to the
460// nearest suitable size boundary.
461const OVERHEAD: usize = (MALLOC_OVERHEAD + FOOTER_SIZE + (CHUNK_ALIGN - 1)) & !(CHUNK_ALIGN - 1);
462
463// Choose a relatively small default initial chunk size, since we double chunk
464// sizes as we grow bump arenas to amortize costs of hitting the global
465// allocator.
466const FIRST_ALLOCATION_GOAL: usize = 1 << 9;
467
468// The actual size of the first allocation is going to be a bit smaller
469// than the goal. We need to make room for the footer, and we also need
470// take the alignment into account.
471const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
472
473/// The memory size and alignment details for a potential new chunk
474/// allocation.
475#[derive(Debug, Clone, Copy)]
476struct NewChunkMemoryDetails {
477    new_size_without_footer: usize,
478    align: usize,
479    size: usize,
480}
481
482/// Wrapper around `Layout::from_size_align` that adds debug assertions.
483#[inline]
484fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
485    Layout::from_size_align(size, align).map_err(|_| AllocErr)
486}
487
488#[inline(never)]
489fn allocation_size_overflow<T>() -> T {
490    panic!("requested allocation size overflowed")
491}
492
493impl Bump {
494    /// Construct a new arena to bump allocate into.
495    ///
496    /// ## Example
497    ///
498    /// ```
499    /// let bump = bumpalo::Bump::new();
500    /// # let _ = bump;
501    /// ```
502    pub fn new() -> Bump {
503        Self::with_capacity(0)
504    }
505
506    /// Attempt to construct a new arena to bump allocate into.
507    ///
508    /// ## Example
509    ///
510    /// ```
511    /// let bump = bumpalo::Bump::try_new();
512    /// # let _ = bump.unwrap();
513    /// ```
514    pub fn try_new() -> Result<Bump, AllocErr> {
515        Bump::try_with_capacity(0)
516    }
517
518    /// Construct a new arena with the specified byte capacity to bump allocate into.
519    ///
520    /// ## Example
521    ///
522    /// ```
523    /// let bump = bumpalo::Bump::with_capacity(100);
524    /// # let _ = bump;
525    /// ```
526    pub fn with_capacity(capacity: usize) -> Bump {
527        Bump::try_with_capacity(capacity).unwrap_or_else(|_| oom())
528    }
529
530    /// Attempt to construct a new arena with the specified byte capacity to bump allocate into.
531    ///
532    /// ## Example
533    ///
534    /// ```
535    /// let bump = bumpalo::Bump::try_with_capacity(100);
536    /// # let _ = bump.unwrap();
537    /// ```
538    pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> {
539        if capacity == 0 {
540            return Ok(Bump {
541                current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
542                allocation_limit: Cell::new(None),
543            });
544        }
545
546        let layout = layout_from_size_align(capacity, 1)?;
547
548        let chunk_footer = unsafe {
549            Self::new_chunk(
550                Bump::new_chunk_memory_details(None, layout).ok_or(AllocErr)?,
551                layout,
552                EMPTY_CHUNK.get(),
553            )
554            .ok_or(AllocErr)?
555        };
556
557        Ok(Bump {
558            current_chunk_footer: Cell::new(chunk_footer),
559            allocation_limit: Cell::new(None),
560        })
561    }
562
563    /// The allocation limit for this arena in bytes.
564    ///
565    /// ## Example
566    ///
567    /// ```
568    /// let bump = bumpalo::Bump::with_capacity(0);
569    ///
570    /// assert_eq!(bump.allocation_limit(), None);
571    ///
572    /// bump.set_allocation_limit(Some(6));
573    ///
574    /// assert_eq!(bump.allocation_limit(), Some(6));
575    ///
576    /// bump.set_allocation_limit(None);
577    ///
578    /// assert_eq!(bump.allocation_limit(), None);
579    /// ```
580    pub fn allocation_limit(&self) -> Option<usize> {
581        self.allocation_limit.get()
582    }
583
584    /// Set the allocation limit in bytes for this arena.
585    ///
586    /// The allocation limit is only enforced when allocating new backing chunks for
587    /// a `Bump`. Updating the allocation limit will not affect existing allocations
588    /// or any future allocations within the `Bump`'s current chunk.
589    ///
590    /// ## Example
591    ///
592    /// ```
593    /// let bump = bumpalo::Bump::with_capacity(0);
594    ///
595    /// bump.set_allocation_limit(Some(0));
596    ///
597    /// assert!(bump.try_alloc(5).is_err());
598    /// ```
599    pub fn set_allocation_limit(&self, limit: Option<usize>) {
600        self.allocation_limit.set(limit);
601    }
602
603    /// How much headroom an arena has before it hits its allocation
604    /// limit.
605    fn allocation_limit_remaining(&self) -> Option<usize> {
606        self.allocation_limit.get().and_then(|allocation_limit| {
607            let allocated_bytes = self.allocated_bytes();
608            if allocated_bytes > allocation_limit {
609                None
610            } else {
611                Some(usize::abs_diff(allocation_limit, allocated_bytes))
612            }
613        })
614    }
615
616    /// Whether a request to allocate a new chunk with a given size for a given
617    /// requested layout will fit under the allocation limit set on a `Bump`.
618    fn chunk_fits_under_limit(
619        allocation_limit_remaining: Option<usize>,
620        new_chunk_memory_details: NewChunkMemoryDetails,
621    ) -> bool {
622        allocation_limit_remaining
623            .map(|allocation_limit_left| {
624                allocation_limit_left >= new_chunk_memory_details.new_size_without_footer
625            })
626            .unwrap_or(true)
627    }
628
629    /// Determine the memory details including final size, alignment and
630    /// final size without footer for a new chunk that would be allocated
631    /// to fulfill an allocation request.
632    fn new_chunk_memory_details(
633        new_size_without_footer: Option<usize>,
634        requested_layout: Layout,
635    ) -> Option<NewChunkMemoryDetails> {
636        let mut new_size_without_footer =
637            new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
638
639        // We want to have CHUNK_ALIGN or better alignment
640        let mut align = CHUNK_ALIGN;
641
642        // If we already know we need to fulfill some request,
643        // make sure we allocate at least enough to satisfy it
644        align = align.max(requested_layout.align());
645        let requested_size =
646            round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow);
647        new_size_without_footer = new_size_without_footer.max(requested_size);
648
649        // We want our allocations to play nice with the memory allocator,
650        // and waste as little memory as possible.
651        // For small allocations, this means that the entire allocation
652        // including the chunk footer and mallocs internal overhead is
653        // as close to a power of two as we can go without going over.
654        // For larger allocations, we only need to get close to a page
655        // boundary without going over.
656        if new_size_without_footer < PAGE_STRATEGY_CUTOFF {
657            new_size_without_footer =
658                (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
659        } else {
660            new_size_without_footer =
661                round_up_to(new_size_without_footer + OVERHEAD, 0x1000)? - OVERHEAD;
662        }
663
664        debug_assert_eq!(align % CHUNK_ALIGN, 0);
665        debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
666        let size = new_size_without_footer
667            .checked_add(FOOTER_SIZE)
668            .unwrap_or_else(allocation_size_overflow);
669
670        Some(NewChunkMemoryDetails {
671            new_size_without_footer,
672            size,
673            align,
674        })
675    }
676
677    /// Allocate a new chunk and return its initialized footer.
678    ///
679    /// If given, `layouts` is a tuple of the current chunk size and the
680    /// layout of the allocation request that triggered us to fall back to
681    /// allocating a new chunk of memory.
682    unsafe fn new_chunk(
683        new_chunk_memory_details: NewChunkMemoryDetails,
684        requested_layout: Layout,
685        prev: NonNull<ChunkFooter>,
686    ) -> Option<NonNull<ChunkFooter>> {
687        let NewChunkMemoryDetails {
688            new_size_without_footer,
689            align,
690            size,
691        } = new_chunk_memory_details;
692
693        let layout = layout_from_size_align(size, align).ok()?;
694
695        debug_assert!(size >= requested_layout.size());
696
697        let data = alloc(layout);
698        let data = NonNull::new(data)?;
699
700        // The `ChunkFooter` is at the end of the chunk.
701        let footer_ptr = data.as_ptr().add(new_size_without_footer);
702        debug_assert_eq!((data.as_ptr() as usize) % align, 0);
703        debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0);
704        let footer_ptr = footer_ptr as *mut ChunkFooter;
705
706        // The bump pointer is initialized to the end of the range we will
707        // bump out of.
708        let ptr = Cell::new(NonNull::new_unchecked(footer_ptr as *mut u8));
709
710        // The `allocated_bytes` of a new chunk counts the total size
711        // of the chunks, not how much of the chunks are used.
712        let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer;
713
714        ptr::write(
715            footer_ptr,
716            ChunkFooter {
717                data,
718                layout,
719                prev: Cell::new(prev),
720                ptr,
721                allocated_bytes,
722            },
723        );
724
725        Some(NonNull::new_unchecked(footer_ptr))
726    }
727
728    /// Reset this bump allocator.
729    ///
730    /// Performs mass deallocation on everything allocated in this arena by
731    /// resetting the pointer into the underlying chunk of memory to the start
732    /// of the chunk. Does not run any `Drop` implementations on deallocated
733    /// objects; see [the top-level documentation](struct.Bump.html) for details.
734    ///
735    /// If this arena has allocated multiple chunks to bump allocate into, then
736    /// the excess chunks are returned to the global allocator.
737    ///
738    /// ## Example
739    ///
740    /// ```
741    /// let mut bump = bumpalo::Bump::new();
742    ///
743    /// // Allocate a bunch of things.
744    /// {
745    ///     for i in 0..100 {
746    ///         bump.alloc(i);
747    ///     }
748    /// }
749    ///
750    /// // Reset the arena.
751    /// bump.reset();
752    ///
753    /// // Allocate some new things in the space previously occupied by the
754    /// // original things.
755    /// for j in 200..400 {
756    ///     bump.alloc(j);
757    /// }
758    ///```
759    pub fn reset(&mut self) {
760        // Takes `&mut self` so `self` must be unique and there can't be any
761        // borrows active that would get invalidated by resetting.
762        unsafe {
763            if self.current_chunk_footer.get().as_ref().is_empty() {
764                return;
765            }
766
767            let mut cur_chunk = self.current_chunk_footer.get();
768
769            // Deallocate all chunks except the current one
770            let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get());
771            dealloc_chunk_list(prev_chunk);
772
773            // Reset the bump finger to the end of the chunk.
774            cur_chunk.as_ref().ptr.set(cur_chunk.cast());
775
776            // Reset the allocated size of the chunk.
777            cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size();
778
779            debug_assert!(
780                self.current_chunk_footer
781                    .get()
782                    .as_ref()
783                    .prev
784                    .get()
785                    .as_ref()
786                    .is_empty(),
787                "We should only have a single chunk"
788            );
789            debug_assert_eq!(
790                self.current_chunk_footer.get().as_ref().ptr.get(),
791                self.current_chunk_footer.get().cast(),
792                "Our chunk's bump finger should be reset to the start of its allocation"
793            );
794        }
795    }
796
797    /// Allocate an object in this `Bump` and return an exclusive reference to
798    /// it.
799    ///
800    /// ## Panics
801    ///
802    /// Panics if reserving space for `T` fails.
803    ///
804    /// ## Example
805    ///
806    /// ```
807    /// let bump = bumpalo::Bump::new();
808    /// let x = bump.alloc("hello");
809    /// assert_eq!(*x, "hello");
810    /// ```
811    #[inline(always)]
812    pub fn alloc<T>(&self, val: T) -> &mut T {
813        self.alloc_with(|| val)
814    }
815
816    /// Try to allocate an object in this `Bump` and return an exclusive
817    /// reference to it.
818    ///
819    /// ## Errors
820    ///
821    /// Errors if reserving space for `T` fails.
822    ///
823    /// ## Example
824    ///
825    /// ```
826    /// let bump = bumpalo::Bump::new();
827    /// let x = bump.try_alloc("hello");
828    /// assert_eq!(x, Ok(&mut "hello"));
829    /// ```
830    #[inline(always)]
831    pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
832        self.try_alloc_with(|| val)
833    }
834
835    /// Pre-allocate space for an object in this `Bump`, initializes it using
836    /// the closure, then returns an exclusive reference to it.
837    ///
838    /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
839    /// discussion on the differences between the `_with` suffixed methods and
840    /// those methods without it, their performance characteristics, and when
841    /// you might or might not choose a `_with` suffixed method.
842    ///
843    /// ## Panics
844    ///
845    /// Panics if reserving space for `T` fails.
846    ///
847    /// ## Example
848    ///
849    /// ```
850    /// let bump = bumpalo::Bump::new();
851    /// let x = bump.alloc_with(|| "hello");
852    /// assert_eq!(*x, "hello");
853    /// ```
854    #[inline(always)]
855    pub fn alloc_with<F, T>(&self, f: F) -> &mut T
856    where
857        F: FnOnce() -> T,
858    {
859        #[inline(always)]
860        unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
861        where
862            F: FnOnce() -> T,
863        {
864            // This function is translated as:
865            // - allocate space for a T on the stack
866            // - call f() with the return value being put onto this stack space
867            // - memcpy from the stack to the heap
868            //
869            // Ideally we want LLVM to always realize that doing a stack
870            // allocation is unnecessary and optimize the code so it writes
871            // directly into the heap instead. It seems we get it to realize
872            // this most consistently if we put this critical line into it's
873            // own function instead of inlining it into the surrounding code.
874            ptr::write(ptr, f());
875        }
876
877        let layout = Layout::new::<T>();
878
879        unsafe {
880            let p = self.alloc_layout(layout);
881            let p = p.as_ptr() as *mut T;
882            inner_writer(p, f);
883            &mut *p
884        }
885    }
886
887    /// Tries to pre-allocate space for an object in this `Bump`, initializes
888    /// it using the closure, then returns an exclusive reference to it.
889    ///
890    /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
891    /// discussion on the differences between the `_with` suffixed methods and
892    /// those methods without it, their performance characteristics, and when
893    /// you might or might not choose a `_with` suffixed method.
894    ///
895    /// ## Errors
896    ///
897    /// Errors if reserving space for `T` fails.
898    ///
899    /// ## Example
900    ///
901    /// ```
902    /// let bump = bumpalo::Bump::new();
903    /// let x = bump.try_alloc_with(|| "hello");
904    /// assert_eq!(x, Ok(&mut "hello"));
905    /// ```
906    #[inline(always)]
907    pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
908    where
909        F: FnOnce() -> T,
910    {
911        #[inline(always)]
912        unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
913        where
914            F: FnOnce() -> T,
915        {
916            // This function is translated as:
917            // - allocate space for a T on the stack
918            // - call f() with the return value being put onto this stack space
919            // - memcpy from the stack to the heap
920            //
921            // Ideally we want LLVM to always realize that doing a stack
922            // allocation is unnecessary and optimize the code so it writes
923            // directly into the heap instead. It seems we get it to realize
924            // this most consistently if we put this critical line into it's
925            // own function instead of inlining it into the surrounding code.
926            ptr::write(ptr, f());
927        }
928
929        //SAFETY: Self-contained:
930        // `p` is allocated for `T` and then a `T` is written.
931        let layout = Layout::new::<T>();
932        let p = self.try_alloc_layout(layout)?;
933        let p = p.as_ptr() as *mut T;
934
935        unsafe {
936            inner_writer(p, f);
937            Ok(&mut *p)
938        }
939    }
940
941    /// Pre-allocates space for a [`Result`] in this `Bump`, initializes it using
942    /// the closure, then returns an exclusive reference to its `T` if [`Ok`].
943    ///
944    /// Iff the allocation fails, the closure is not run.
945    ///
946    /// Iff [`Err`], an allocator rewind is *attempted* and the `E` instance is
947    /// moved out of the allocator to be consumed or dropped as normal.
948    ///
949    /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
950    /// discussion on the differences between the `_with` suffixed methods and
951    /// those methods without it, their performance characteristics, and when
952    /// you might or might not choose a `_with` suffixed method.
953    ///
954    /// For caveats specific to fallible initialization, see
955    /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
956    ///
957    /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
958    /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
959    /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
960    ///
961    /// ## Errors
962    ///
963    /// Iff the allocation succeeds but `f` fails, that error is forwarded by value.
964    ///
965    /// ## Panics
966    ///
967    /// Panics if reserving space for `Result<T, E>` fails.
968    ///
969    /// ## Example
970    ///
971    /// ```
972    /// let bump = bumpalo::Bump::new();
973    /// let x = bump.alloc_try_with(|| Ok("hello"))?;
974    /// assert_eq!(*x, "hello");
975    /// # Result::<_, ()>::Ok(())
976    /// ```
977    #[inline(always)]
978    pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
979    where
980        F: FnOnce() -> Result<T, E>,
981    {
982        let rewind_footer = self.current_chunk_footer.get();
983        let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
984        let mut inner_result_ptr = NonNull::from(self.alloc_with(f));
985        match unsafe { inner_result_ptr.as_mut() } {
986            Ok(t) => Ok(unsafe {
987                //SAFETY:
988                // The `&mut Result<T, E>` returned by `alloc_with` may be
989                // lifetime-limited by `E`, but the derived `&mut T` still has
990                // the same validity as in `alloc_with` since the error variant
991                // is already ruled out here.
992
993                // We could conditionally truncate the allocation here, but
994                // since it grows backwards, it seems unlikely that we'd get
995                // any more than the `Result`'s discriminant this way, if
996                // anything at all.
997                &mut *(t as *mut _)
998            }),
999            Err(e) => unsafe {
1000                // If this result was the last allocation in this arena, we can
1001                // reclaim its space. In fact, sometimes we can do even better
1002                // than simply calling `dealloc` on the result pointer: we can
1003                // reclaim any alignment padding we might have added (which
1004                // `dealloc` cannot do) if we didn't allocate a new chunk for
1005                // this result.
1006                if self.is_last_allocation(inner_result_ptr.cast()) {
1007                    let current_footer_p = self.current_chunk_footer.get();
1008                    let current_ptr = &current_footer_p.as_ref().ptr;
1009                    if current_footer_p == rewind_footer {
1010                        // It's still the same chunk, so reset the bump pointer
1011                        // to its original value upon entry to this method
1012                        // (reclaiming any alignment padding we may have
1013                        // added).
1014                        current_ptr.set(rewind_ptr);
1015                    } else {
1016                        // We allocated a new chunk for this result.
1017                        //
1018                        // We know the result is the only allocation in this
1019                        // chunk: Any additional allocations since the start of
1020                        // this method could only have happened when running
1021                        // the initializer function, which is called *after*
1022                        // reserving space for this result. Therefore, since we
1023                        // already determined via the check above that this
1024                        // result was the last allocation, there must not have
1025                        // been any other allocations, and this result is the
1026                        // only allocation in this chunk.
1027                        //
1028                        // Because this is the only allocation in this chunk,
1029                        // we can reset the chunk's bump finger to the start of
1030                        // the chunk.
1031                        current_ptr.set(current_footer_p.as_ref().data);
1032                    }
1033                }
1034                //SAFETY:
1035                // As we received `E` semantically by value from `f`, we can
1036                // just copy that value here as long as we avoid a double-drop
1037                // (which can't happen as any specific references to the `E`'s
1038                // data in `self` are destroyed when this function returns).
1039                //
1040                // The order between this and the deallocation doesn't matter
1041                // because `Self: !Sync`.
1042                Err(ptr::read(e as *const _))
1043            },
1044        }
1045    }
1046
1047    /// Tries to pre-allocates space for a [`Result`] in this `Bump`,
1048    /// initializes it using the closure, then returns an exclusive reference
1049    /// to its `T` if all [`Ok`].
1050    ///
1051    /// Iff the allocation fails, the closure is not run.
1052    ///
1053    /// Iff the closure returns [`Err`], an allocator rewind is *attempted* and
1054    /// the `E` instance is moved out of the allocator to be consumed or dropped
1055    /// as normal.
1056    ///
1057    /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1058    /// discussion on the differences between the `_with` suffixed methods and
1059    /// those methods without it, their performance characteristics, and when
1060    /// you might or might not choose a `_with` suffixed method.
1061    ///
1062    /// For caveats specific to fallible initialization, see
1063    /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
1064    ///
1065    /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
1066    /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
1067    /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
1068    ///
1069    /// ## Errors
1070    ///
1071    /// Errors with the [`Alloc`](`AllocOrInitError::Alloc`) variant iff
1072    /// reserving space for `Result<T, E>` fails.
1073    ///
1074    /// Iff the allocation succeeds but `f` fails, that error is forwarded by
1075    /// value inside the [`Init`](`AllocOrInitError::Init`) variant.
1076    ///
1077    /// ## Example
1078    ///
1079    /// ```
1080    /// let bump = bumpalo::Bump::new();
1081    /// let x = bump.try_alloc_try_with(|| Ok("hello"))?;
1082    /// assert_eq!(*x, "hello");
1083    /// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
1084    /// ```
1085    #[inline(always)]
1086    pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
1087    where
1088        F: FnOnce() -> Result<T, E>,
1089    {
1090        let rewind_footer = self.current_chunk_footer.get();
1091        let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
1092        let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?);
1093        match unsafe { inner_result_ptr.as_mut() } {
1094            Ok(t) => Ok(unsafe {
1095                //SAFETY:
1096                // The `&mut Result<T, E>` returned by `alloc_with` may be
1097                // lifetime-limited by `E`, but the derived `&mut T` still has
1098                // the same validity as in `alloc_with` since the error variant
1099                // is already ruled out here.
1100
1101                // We could conditionally truncate the allocation here, but
1102                // since it grows backwards, it seems unlikely that we'd get
1103                // any more than the `Result`'s discriminant this way, if
1104                // anything at all.
1105                &mut *(t as *mut _)
1106            }),
1107            Err(e) => unsafe {
1108                // If this result was the last allocation in this arena, we can
1109                // reclaim its space. In fact, sometimes we can do even better
1110                // than simply calling `dealloc` on the result pointer: we can
1111                // reclaim any alignment padding we might have added (which
1112                // `dealloc` cannot do) if we didn't allocate a new chunk for
1113                // this result.
1114                if self.is_last_allocation(inner_result_ptr.cast()) {
1115                    let current_footer_p = self.current_chunk_footer.get();
1116                    let current_ptr = &current_footer_p.as_ref().ptr;
1117                    if current_footer_p == rewind_footer {
1118                        // It's still the same chunk, so reset the bump pointer
1119                        // to its original value upon entry to this method
1120                        // (reclaiming any alignment padding we may have
1121                        // added).
1122                        current_ptr.set(rewind_ptr);
1123                    } else {
1124                        // We allocated a new chunk for this result.
1125                        //
1126                        // We know the result is the only allocation in this
1127                        // chunk: Any additional allocations since the start of
1128                        // this method could only have happened when running
1129                        // the initializer function, which is called *after*
1130                        // reserving space for this result. Therefore, since we
1131                        // already determined via the check above that this
1132                        // result was the last allocation, there must not have
1133                        // been any other allocations, and this result is the
1134                        // only allocation in this chunk.
1135                        //
1136                        // Because this is the only allocation in this chunk,
1137                        // we can reset the chunk's bump finger to the start of
1138                        // the chunk.
1139                        current_ptr.set(current_footer_p.as_ref().data);
1140                    }
1141                }
1142                //SAFETY:
1143                // As we received `E` semantically by value from `f`, we can
1144                // just copy that value here as long as we avoid a double-drop
1145                // (which can't happen as any specific references to the `E`'s
1146                // data in `self` are destroyed when this function returns).
1147                //
1148                // The order between this and the deallocation doesn't matter
1149                // because `Self: !Sync`.
1150                Err(AllocOrInitError::Init(ptr::read(e as *const _)))
1151            },
1152        }
1153    }
1154
1155    /// `Copy` a slice into this `Bump` and return an exclusive reference to
1156    /// the copy.
1157    ///
1158    /// ## Panics
1159    ///
1160    /// Panics if reserving space for the slice fails.
1161    ///
1162    /// ## Example
1163    ///
1164    /// ```
1165    /// let bump = bumpalo::Bump::new();
1166    /// let x = bump.alloc_slice_copy(&[1, 2, 3]);
1167    /// assert_eq!(x, &[1, 2, 3]);
1168    /// ```
1169    #[inline(always)]
1170    pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
1171    where
1172        T: Copy,
1173    {
1174        let layout = Layout::for_value(src);
1175        let dst = self.alloc_layout(layout).cast::<T>();
1176
1177        unsafe {
1178            ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1179            slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1180        }
1181    }
1182
1183    /// Like `alloc_slice_copy`, but does not panic in case of allocation failure.
1184    ///
1185    /// ## Example
1186    ///
1187    /// ```
1188    /// let bump = bumpalo::Bump::new();
1189    /// let x = bump.try_alloc_slice_copy(&[1, 2, 3]);
1190    /// assert_eq!(x, Ok(&mut[1, 2, 3] as &mut [_]));
1191    ///
1192    ///
1193    /// let bump = bumpalo::Bump::new();
1194    /// bump.set_allocation_limit(Some(4));
1195    /// let x = bump.try_alloc_slice_copy(&[1, 2, 3, 4, 5, 6]);
1196    /// assert_eq!(x, Err(bumpalo::AllocErr)); // too big
1197    /// ```
1198    #[inline(always)]
1199    pub fn try_alloc_slice_copy<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
1200    where
1201        T: Copy,
1202    {
1203        let layout = Layout::for_value(src);
1204        let dst = self.try_alloc_layout(layout)?.cast::<T>();
1205        let result = unsafe {
1206            core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1207            slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1208        };
1209        Ok(result)
1210    }
1211
1212    /// `Clone` a slice into this `Bump` and return an exclusive reference to
1213    /// the clone. Prefer [`alloc_slice_copy`](#method.alloc_slice_copy) if `T` is `Copy`.
1214    ///
1215    /// ## Panics
1216    ///
1217    /// Panics if reserving space for the slice fails.
1218    ///
1219    /// ## Example
1220    ///
1221    /// ```
1222    /// #[derive(Clone, Debug, Eq, PartialEq)]
1223    /// struct Sheep {
1224    ///     name: String,
1225    /// }
1226    ///
1227    /// let originals = [
1228    ///     Sheep { name: "Alice".into() },
1229    ///     Sheep { name: "Bob".into() },
1230    ///     Sheep { name: "Cathy".into() },
1231    /// ];
1232    ///
1233    /// let bump = bumpalo::Bump::new();
1234    /// let clones = bump.alloc_slice_clone(&originals);
1235    /// assert_eq!(originals, clones);
1236    /// ```
1237    #[inline(always)]
1238    pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
1239    where
1240        T: Clone,
1241    {
1242        let layout = Layout::for_value(src);
1243        let dst = self.alloc_layout(layout).cast::<T>();
1244
1245        unsafe {
1246            for (i, val) in src.iter().cloned().enumerate() {
1247                ptr::write(dst.as_ptr().add(i), val);
1248            }
1249
1250            slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1251        }
1252    }
1253
1254    /// Like `alloc_slice_clone` but does not panic on failure.
1255    #[inline(always)]
1256    pub fn try_alloc_slice_clone<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
1257    where
1258        T: Clone,
1259    {
1260        let layout = Layout::for_value(src);
1261        let dst = self.try_alloc_layout(layout)?.cast::<T>();
1262
1263        unsafe {
1264            for (i, val) in src.iter().cloned().enumerate() {
1265                ptr::write(dst.as_ptr().add(i), val);
1266            }
1267
1268            Ok(slice::from_raw_parts_mut(dst.as_ptr(), src.len()))
1269        }
1270    }
1271
1272    /// `Copy` a string slice into this `Bump` and return an exclusive reference to it.
1273    ///
1274    /// ## Panics
1275    ///
1276    /// Panics if reserving space for the string fails.
1277    ///
1278    /// ## Example
1279    ///
1280    /// ```
1281    /// let bump = bumpalo::Bump::new();
1282    /// let hello = bump.alloc_str("hello world");
1283    /// assert_eq!("hello world", hello);
1284    /// ```
1285    #[inline(always)]
1286    pub fn alloc_str(&self, src: &str) -> &mut str {
1287        let buffer = self.alloc_slice_copy(src.as_bytes());
1288        unsafe {
1289            // This is OK, because it already came in as str, so it is guaranteed to be utf8
1290            str::from_utf8_unchecked_mut(buffer)
1291        }
1292    }
1293
1294    /// Same as `alloc_str` but does not panic on failure.
1295    ///
1296    /// ## Example
1297    ///
1298    /// ```
1299    /// let bump = bumpalo::Bump::new();
1300    /// let hello = bump.try_alloc_str("hello world").unwrap();
1301    /// assert_eq!("hello world", hello);
1302    ///
1303    ///
1304    /// let bump = bumpalo::Bump::new();
1305    /// bump.set_allocation_limit(Some(5));
1306    /// let hello = bump.try_alloc_str("hello world");
1307    /// assert_eq!(Err(bumpalo::AllocErr), hello);
1308    /// ```
1309    #[inline(always)]
1310    pub fn try_alloc_str(&self, src: &str) -> Result<&mut str, AllocErr> {
1311        let buffer = self.try_alloc_slice_copy(src.as_bytes())?;
1312        unsafe {
1313            // This is OK, because it already came in as str, so it is guaranteed to be utf8
1314            Ok(str::from_utf8_unchecked_mut(buffer))
1315        }
1316    }
1317
1318    /// Allocates a new slice of size `len` into this `Bump` and returns an
1319    /// exclusive reference to the copy.
1320    ///
1321    /// The elements of the slice are initialized using the supplied closure.
1322    /// The closure argument is the position in the slice.
1323    ///
1324    /// ## Panics
1325    ///
1326    /// Panics if reserving space for the slice fails.
1327    ///
1328    /// ## Example
1329    ///
1330    /// ```
1331    /// let bump = bumpalo::Bump::new();
1332    /// let x = bump.alloc_slice_fill_with(5, |i| 5 * (i + 1));
1333    /// assert_eq!(x, &[5, 10, 15, 20, 25]);
1334    /// ```
1335    #[inline(always)]
1336    pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
1337    where
1338        F: FnMut(usize) -> T,
1339    {
1340        let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
1341        let dst = self.alloc_layout(layout).cast::<T>();
1342
1343        unsafe {
1344            for i in 0..len {
1345                ptr::write(dst.as_ptr().add(i), f(i));
1346            }
1347
1348            let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1349            debug_assert_eq!(Layout::for_value(result), layout);
1350            result
1351        }
1352    }
1353
1354    /// Allocates a new slice of size `len` into this `Bump` and returns an
1355    /// exclusive reference to the copy.
1356    ///
1357    /// The elements of the slice are initialized using the supplied closure.
1358    /// The closure argument is the position in the slice.
1359    ///
1360    /// ## Example
1361    ///
1362    /// ```
1363    /// let bump = bumpalo::Bump::new();
1364    /// let x = bump.try_alloc_slice_fill_with(5, |i| 5 * (i + 1));
1365    /// assert_eq!(x, Ok(&mut[5usize, 10, 15, 20, 25] as &mut [_]));
1366    ///
1367    ///
1368    /// let bump = bumpalo::Bump::new();
1369    /// bump.set_allocation_limit(Some(4));
1370    /// let x = bump.try_alloc_slice_fill_with(10, |i| 5 * (i + 1));
1371    /// assert_eq!(x, Err(bumpalo::AllocErr));
1372    /// ```
1373    #[inline(always)]
1374    pub fn try_alloc_slice_fill_with<T, F>(
1375        &self,
1376        len: usize,
1377        mut f: F,
1378    ) -> Result<&mut [T], AllocErr>
1379    where
1380        F: FnMut(usize) -> T,
1381    {
1382        let layout = Layout::array::<T>(len).map_err(|_| AllocErr)?;
1383        let dst = self.try_alloc_layout(layout)?.cast::<T>();
1384
1385        unsafe {
1386            for i in 0..len {
1387                ptr::write(dst.as_ptr().add(i), f(i));
1388            }
1389
1390            let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1391            debug_assert_eq!(Layout::for_value(result), layout);
1392            Ok(result)
1393        }
1394    }
1395
1396    /// Allocates a new slice of size `len` into this `Bump` and returns an
1397    /// exclusive reference to the copy.
1398    ///
1399    /// All elements of the slice are initialized to `value`.
1400    ///
1401    /// ## Panics
1402    ///
1403    /// Panics if reserving space for the slice fails.
1404    ///
1405    /// ## Example
1406    ///
1407    /// ```
1408    /// let bump = bumpalo::Bump::new();
1409    /// let x = bump.alloc_slice_fill_copy(5, 42);
1410    /// assert_eq!(x, &[42, 42, 42, 42, 42]);
1411    /// ```
1412    #[inline(always)]
1413    pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
1414        self.alloc_slice_fill_with(len, |_| value)
1415    }
1416
1417    /// Same as `alloc_slice_fill_copy` but does not panic on failure.
1418    #[inline(always)]
1419    pub fn try_alloc_slice_fill_copy<T: Copy>(
1420        &self,
1421        len: usize,
1422        value: T,
1423    ) -> Result<&mut [T], AllocErr> {
1424        self.try_alloc_slice_fill_with(len, |_| value)
1425    }
1426
1427    /// Allocates a new slice of size `len` slice into this `Bump` and return an
1428    /// exclusive reference to the copy.
1429    ///
1430    /// All elements of the slice are initialized to `value.clone()`.
1431    ///
1432    /// ## Panics
1433    ///
1434    /// Panics if reserving space for the slice fails.
1435    ///
1436    /// ## Example
1437    ///
1438    /// ```
1439    /// let bump = bumpalo::Bump::new();
1440    /// let s: String = "Hello Bump!".to_string();
1441    /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s);
1442    /// assert_eq!(x.len(), 2);
1443    /// assert_eq!(&x[0], &s);
1444    /// assert_eq!(&x[1], &s);
1445    /// ```
1446    #[inline(always)]
1447    pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
1448        self.alloc_slice_fill_with(len, |_| value.clone())
1449    }
1450
1451    /// Like `alloc_slice_fill_clone` but does not panic on failure.
1452    #[inline(always)]
1453    pub fn try_alloc_slice_fill_clone<T: Clone>(
1454        &self,
1455        len: usize,
1456        value: &T,
1457    ) -> Result<&mut [T], AllocErr> {
1458        self.try_alloc_slice_fill_with(len, |_| value.clone())
1459    }
1460
1461    /// Allocates a new slice of size `len` slice into this `Bump` and return an
1462    /// exclusive reference to the copy.
1463    ///
1464    /// The elements are initialized using the supplied iterator.
1465    ///
1466    /// ## Panics
1467    ///
1468    /// Panics if reserving space for the slice fails, or if the supplied
1469    /// iterator returns fewer elements than it promised.
1470    ///
1471    /// ## Example
1472    ///
1473    /// ```
1474    /// let bump = bumpalo::Bump::new();
1475    /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i));
1476    /// assert_eq!(x, [4, 9, 25]);
1477    /// ```
1478    #[inline(always)]
1479    pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
1480    where
1481        I: IntoIterator<Item = T>,
1482        I::IntoIter: ExactSizeIterator,
1483    {
1484        let mut iter = iter.into_iter();
1485        self.alloc_slice_fill_with(iter.len(), |_| {
1486            iter.next().expect("Iterator supplied too few elements")
1487        })
1488    }
1489
1490    /// Allocates a new slice of size `iter.len()` slice into this `Bump` and return an
1491    /// exclusive reference to the copy. Does not panic on failure.
1492    ///
1493    /// The elements are initialized using the supplied iterator.
1494    ///
1495    /// ## Example
1496    ///
1497    /// ```
1498    /// let bump = bumpalo::Bump::new();
1499    /// let x: &[i32] = bump.try_alloc_slice_fill_iter([2, 3, 5]
1500    ///     .iter().cloned().map(|i| i * i)).unwrap();
1501    /// assert_eq!(x, [4, 9, 25]);
1502    /// ```
1503    #[inline(always)]
1504    pub fn try_alloc_slice_fill_iter<T, I>(&self, iter: I) -> Result<&mut [T], AllocErr>
1505    where
1506        I: IntoIterator<Item = T>,
1507        I::IntoIter: ExactSizeIterator,
1508    {
1509        let mut iter = iter.into_iter();
1510        self.try_alloc_slice_fill_with(iter.len(), |_| {
1511            iter.next().expect("Iterator supplied too few elements")
1512        })
1513    }
1514
1515    /// Allocates a new slice of size `len` slice into this `Bump` and return an
1516    /// exclusive reference to the copy.
1517    ///
1518    /// All elements of the slice are initialized to [`T::default()`].
1519    ///
1520    /// [`T::default()`]: https://doc.rust-lang.org/std/default/trait.Default.html#tymethod.default
1521    ///
1522    /// ## Panics
1523    ///
1524    /// Panics if reserving space for the slice fails.
1525    ///
1526    /// ## Example
1527    ///
1528    /// ```
1529    /// let bump = bumpalo::Bump::new();
1530    /// let x = bump.alloc_slice_fill_default::<u32>(5);
1531    /// assert_eq!(x, &[0, 0, 0, 0, 0]);
1532    /// ```
1533    #[inline(always)]
1534    pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
1535        self.alloc_slice_fill_with(len, |_| T::default())
1536    }
1537
1538    /// Like `alloc_slice_fill_default` but does not panic on failure.
1539    #[inline(always)]
1540    pub fn try_alloc_slice_fill_default<T: Default>(
1541        &self,
1542        len: usize,
1543    ) -> Result<&mut [T], AllocErr> {
1544        self.try_alloc_slice_fill_with(len, |_| T::default())
1545    }
1546
1547    /// Allocate space for an object with the given `Layout`.
1548    ///
1549    /// The returned pointer points at uninitialized memory, and should be
1550    /// initialized with
1551    /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1552    ///
1553    /// # Panics
1554    ///
1555    /// Panics if reserving space matching `layout` fails.
1556    #[inline(always)]
1557    pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
1558        self.try_alloc_layout(layout).unwrap_or_else(|_| oom())
1559    }
1560
1561    /// Attempts to allocate space for an object with the given `Layout` or else returns
1562    /// an `Err`.
1563    ///
1564    /// The returned pointer points at uninitialized memory, and should be
1565    /// initialized with
1566    /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1567    ///
1568    /// # Errors
1569    ///
1570    /// Errors if reserving space matching `layout` fails.
1571    #[inline(always)]
1572    pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1573        if let Some(p) = self.try_alloc_layout_fast(layout) {
1574            Ok(p)
1575        } else {
1576            self.alloc_layout_slow(layout).ok_or(AllocErr)
1577        }
1578    }
1579
1580    #[inline(always)]
1581    fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
1582        // We don't need to check for ZSTs here since they will automatically
1583        // be handled properly: the pointer will be bumped by zero bytes,
1584        // modulo alignment. This keeps the fast path optimized for non-ZSTs,
1585        // which are much more common.
1586        unsafe {
1587            let footer = self.current_chunk_footer.get();
1588            let footer = footer.as_ref();
1589            let ptr = footer.ptr.get().as_ptr();
1590            let start = footer.data.as_ptr();
1591            debug_assert!(start <= ptr);
1592            debug_assert!(ptr as *const u8 <= footer as *const _ as *const u8);
1593
1594            if (ptr as usize) < layout.size() {
1595                return None;
1596            }
1597
1598            let ptr = ptr.wrapping_sub(layout.size());
1599            let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
1600
1601            if aligned_ptr >= start {
1602                let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
1603                footer.ptr.set(aligned_ptr);
1604                Some(aligned_ptr)
1605            } else {
1606                None
1607            }
1608        }
1609    }
1610
1611    /// Gets the remaining capacity in the current chunk (in bytes).
1612    ///
1613    /// ## Example
1614    ///
1615    /// ```
1616    /// use bumpalo::Bump;
1617    ///
1618    /// let bump = Bump::with_capacity(100);
1619    ///
1620    /// let capacity = bump.chunk_capacity();
1621    /// assert!(capacity >= 100);
1622    /// ```
1623    pub fn chunk_capacity(&self) -> usize {
1624        let current_footer = self.current_chunk_footer.get();
1625        let current_footer = unsafe { current_footer.as_ref() };
1626
1627        current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
1628    }
1629
1630    /// Slow path allocation for when we need to allocate a new chunk from the
1631    /// parent bump set because there isn't enough room in our current chunk.
1632    #[inline(never)]
1633    #[cold]
1634    fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
1635        unsafe {
1636            let size = layout.size();
1637            let allocation_limit_remaining = self.allocation_limit_remaining();
1638
1639            // Get a new chunk from the global allocator.
1640            let current_footer = self.current_chunk_footer.get();
1641            let current_layout = current_footer.as_ref().layout;
1642
1643            // By default, we want our new chunk to be about twice as big
1644            // as the previous chunk. If the global allocator refuses it,
1645            // we try to divide it by half until it works or the requested
1646            // size is smaller than the default footer size.
1647            let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
1648            let mut base_size = (current_layout.size() - FOOTER_SIZE)
1649                .checked_mul(2)?
1650                .max(min_new_chunk_size);
1651            let chunk_memory_details = iter::from_fn(|| {
1652                let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
1653                            && base_size >= layout.size()
1654                            && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
1655                            && self.allocated_bytes() == 0);
1656
1657                if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
1658                    let size = base_size;
1659                    base_size /= 2;
1660                    Bump::new_chunk_memory_details(Some(size), layout)
1661                } else {
1662                    None
1663                }
1664            });
1665
1666            let new_footer = chunk_memory_details
1667                .filter_map(|chunk_memory_details| {
1668                    if Bump::chunk_fits_under_limit(
1669                        allocation_limit_remaining,
1670                        chunk_memory_details,
1671                    ) {
1672                        Bump::new_chunk(chunk_memory_details, layout, current_footer)
1673                    } else {
1674                        None
1675                    }
1676                })
1677                .next()?;
1678
1679            debug_assert_eq!(
1680                new_footer.as_ref().data.as_ptr() as usize % layout.align(),
1681                0
1682            );
1683
1684            // Set the new chunk as our new current chunk.
1685            self.current_chunk_footer.set(new_footer);
1686
1687            let new_footer = new_footer.as_ref();
1688
1689            // Move the bump ptr finger down to allocate room for `val`. We know
1690            // this can't overflow because we successfully allocated a chunk of
1691            // at least the requested size.
1692            let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
1693            // Round the pointer down to the requested alignment.
1694            ptr = round_mut_ptr_down_to(ptr, layout.align());
1695            debug_assert!(
1696                ptr as *const _ <= new_footer,
1697                "{:p} <= {:p}",
1698                ptr,
1699                new_footer
1700            );
1701            let ptr = NonNull::new_unchecked(ptr);
1702            new_footer.ptr.set(ptr);
1703
1704            // Return a pointer to the freshly allocated region in this chunk.
1705            Some(ptr)
1706        }
1707    }
1708
1709    /// Returns an iterator over each chunk of allocated memory that
1710    /// this arena has bump allocated into.
1711    ///
1712    /// The chunks are returned ordered by allocation time, with the most
1713    /// recently allocated chunk being returned first, and the least recently
1714    /// allocated chunk being returned last.
1715    ///
1716    /// The values inside each chunk are also ordered by allocation time, with
1717    /// the most recent allocation being earlier in the slice, and the least
1718    /// recent allocation being towards the end of the slice.
1719    ///
1720    /// ## Safety
1721    ///
1722    /// Because this method takes `&mut self`, we know that the bump arena
1723    /// reference is unique and therefore there aren't any active references to
1724    /// any of the objects we've allocated in it either. This potential aliasing
1725    /// of exclusive references is one common footgun for unsafe code that we
1726    /// don't need to worry about here.
1727    ///
1728    /// However, there could be regions of uninitialized memory used as padding
1729    /// between allocations, which is why this iterator has items of type
1730    /// `[MaybeUninit<u8>]`, instead of simply `[u8]`.
1731    ///
1732    /// The only way to guarantee that there is no padding between allocations
1733    /// or within allocated objects is if all of these properties hold:
1734    ///
1735    /// 1. Every object allocated in this arena has the same alignment,
1736    ///    and that alignment is at most 16.
1737    /// 2. Every object's size is a multiple of its alignment.
1738    /// 3. None of the objects allocated in this arena contain any internal
1739    ///    padding.
1740    ///
1741    /// If you want to use this `iter_allocated_chunks` method, it is *your*
1742    /// responsibility to ensure that these properties hold before calling
1743    /// `MaybeUninit::assume_init` or otherwise reading the returned values.
1744    ///
1745    /// Finally, you must also ensure that any values allocated into the bump
1746    /// arena have not had their `Drop` implementations called on them,
1747    /// e.g. after dropping a [`bumpalo::boxed::Box<T>`][crate::boxed::Box].
1748    ///
1749    /// ## Example
1750    ///
1751    /// ```
1752    /// let mut bump = bumpalo::Bump::new();
1753    ///
1754    /// // Allocate a bunch of `i32`s in this bump arena, potentially causing
1755    /// // additional memory chunks to be reserved.
1756    /// for i in 0..10000 {
1757    ///     bump.alloc(i);
1758    /// }
1759    ///
1760    /// // Iterate over each chunk we've bump allocated into. This is safe
1761    /// // because we have only allocated `i32`s in this arena, which fulfills
1762    /// // the above requirements.
1763    /// for ch in bump.iter_allocated_chunks() {
1764    ///     println!("Used a chunk that is {} bytes long", ch.len());
1765    ///     println!("The first byte is {:?}", unsafe {
1766    ///         ch[0].assume_init()
1767    ///     });
1768    /// }
1769    ///
1770    /// // Within a chunk, allocations are ordered from most recent to least
1771    /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate
1772    /// // through the chunk's data, we get them in the order 'c', then 'b',
1773    /// // then 'a'.
1774    ///
1775    /// bump.reset();
1776    /// bump.alloc(b'a');
1777    /// bump.alloc(b'b');
1778    /// bump.alloc(b'c');
1779    ///
1780    /// assert_eq!(bump.iter_allocated_chunks().count(), 1);
1781    /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap();
1782    /// assert_eq!(chunk.len(), 3);
1783    ///
1784    /// // Safe because we've only allocated `u8`s in this arena, which
1785    /// // fulfills the above requirements.
1786    /// unsafe {
1787    ///     assert_eq!(chunk[0].assume_init(), b'c');
1788    ///     assert_eq!(chunk[1].assume_init(), b'b');
1789    ///     assert_eq!(chunk[2].assume_init(), b'a');
1790    /// }
1791    /// ```
1792    pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_> {
1793        // SAFE: Ensured by mutable borrow of `self`.
1794        let raw = unsafe { self.iter_allocated_chunks_raw() };
1795        ChunkIter {
1796            raw,
1797            bump: PhantomData,
1798        }
1799    }
1800
1801    /// Returns an iterator over raw pointers to chunks of allocated memory that
1802    /// this arena has bump allocated into.
1803    ///
1804    /// This is an unsafe version of [`iter_allocated_chunks()`](Bump::iter_allocated_chunks),
1805    /// with the caller responsible for safe usage of the returned pointers as
1806    /// well as ensuring that the iterator is not invalidated by new
1807    /// allocations.
1808    ///
1809    /// ## Safety
1810    ///
1811    /// Allocations from this arena must not be performed while the returned
1812    /// iterator is alive. If reading the chunk data (or casting to a reference)
1813    /// the caller must ensure that there exist no mutable references to
1814    /// previously allocated data.
1815    ///
1816    /// In addition, all of the caveats when reading the chunk data from
1817    /// [`iter_allocated_chunks()`](Bump::iter_allocated_chunks) still apply.
1818    pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_> {
1819        ChunkRawIter {
1820            footer: self.current_chunk_footer.get(),
1821            bump: PhantomData,
1822        }
1823    }
1824
1825    /// Calculates the number of bytes currently allocated across all chunks in
1826    /// this bump arena.
1827    ///
1828    /// If you allocate types of different alignments or types with
1829    /// larger-than-typical alignment in the same arena, some padding
1830    /// bytes might get allocated in the bump arena. Note that those padding
1831    /// bytes will add to this method's resulting sum, so you cannot rely
1832    /// on it only counting the sum of the sizes of the things
1833    /// you've allocated in the arena.
1834    ///
1835    /// The allocated bytes do not include the size of bumpalo's metadata,
1836    /// so the amount of memory requested from the Rust allocator is higher
1837    /// than the returned value.
1838    ///
1839    /// ## Example
1840    ///
1841    /// ```
1842    /// let bump = bumpalo::Bump::new();
1843    /// let _x = bump.alloc_slice_fill_default::<u32>(5);
1844    /// let bytes = bump.allocated_bytes();
1845    /// assert!(bytes >= core::mem::size_of::<u32>() * 5);
1846    /// ```
1847    pub fn allocated_bytes(&self) -> usize {
1848        let footer = self.current_chunk_footer.get();
1849
1850        unsafe { footer.as_ref().allocated_bytes }
1851    }
1852
1853    /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
1854    ///
1855    /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
1856    /// the size of the bump metadata.
1857    pub fn allocated_bytes_including_metadata(&self) -> usize {
1858        let metadata_size =
1859            unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
1860        self.allocated_bytes() + metadata_size
1861    }
1862
1863    #[inline]
1864    unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
1865        let footer = self.current_chunk_footer.get();
1866        let footer = footer.as_ref();
1867        footer.ptr.get() == ptr
1868    }
1869
1870    #[inline]
1871    unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
1872        // If the pointer is the last allocation we made, we can reuse the bytes,
1873        // otherwise they are simply leaked -- at least until somebody calls reset().
1874        if self.is_last_allocation(ptr) {
1875            let ptr = self.current_chunk_footer.get().as_ref().ptr.get();
1876            let ptr = NonNull::new_unchecked(ptr.as_ptr().add(layout.size()));
1877            self.current_chunk_footer.get().as_ref().ptr.set(ptr);
1878        }
1879    }
1880
1881    #[inline]
1882    unsafe fn shrink(
1883        &self,
1884        ptr: NonNull<u8>,
1885        old_layout: Layout,
1886        new_layout: Layout,
1887    ) -> Result<NonNull<u8>, AllocErr> {
1888        // If the new layout demands greater alignment than the old layout has,
1889        // then either
1890        //
1891        // 1. the pointer happens to satisfy the new layout's alignment, so we
1892        //    got lucky and can return the pointer as-is, or
1893        //
1894        // 2. the pointer is not aligned to the new layout's demanded alignment,
1895        //    and we are unlucky.
1896        //
1897        // In the case of (2), to successfully "shrink" the allocation, we would
1898        // have to allocate a whole new region for the new layout, without being
1899        // able to free the old region. That is unacceptable, so simply return
1900        // an allocation failure error instead.
1901        if old_layout.align() < new_layout.align() {
1902            if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
1903                return Ok(ptr);
1904            } else {
1905                return Err(AllocErr);
1906            }
1907        }
1908
1909        debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
1910
1911        let old_size = old_layout.size();
1912        let new_size = new_layout.size();
1913
1914        // This is how much space we would *actually* reclaim while satisfying
1915        // the requested alignment.
1916        let delta = round_down_to(old_size - new_size, new_layout.align());
1917
1918        if self.is_last_allocation(ptr)
1919                // Only reclaim the excess space (which requires a copy) if it
1920                // is worth it: we are actually going to recover "enough" space
1921                // and we can do a non-overlapping copy.
1922                //
1923                // We do `(old_size + 1) / 2` so division rounds up rather than
1924                // down. Consider when:
1925                //
1926                //     old_size = 5
1927                //     new_size = 3
1928                //
1929                // If we do not take care to round up, this will result in:
1930                //
1931                //     delta = 2
1932                //     (old_size / 2) = (5 / 2) = 2
1933                //
1934                // And the the check will succeed even though we are have
1935                // overlapping ranges:
1936                //
1937                //     |--------old-allocation-------|
1938                //     |------from-------|
1939                //                 |-------to--------|
1940                //     +-----+-----+-----+-----+-----+
1941                //     |  a  |  b  |  c  |  .  |  .  |
1942                //     +-----+-----+-----+-----+-----+
1943                //
1944                // But we MUST NOT have overlapping ranges because we use
1945                // `copy_nonoverlapping` below! Therefore, we round the division
1946                // up to avoid this issue.
1947                && delta >= (old_size + 1) / 2
1948        {
1949            let footer = self.current_chunk_footer.get();
1950            let footer = footer.as_ref();
1951
1952            // NB: new_ptr is aligned, because ptr *has to* be aligned, and we
1953            // made sure delta is aligned.
1954            let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta));
1955            footer.ptr.set(new_ptr);
1956
1957            // NB: we know it is non-overlapping because of the size check
1958            // in the `if` condition.
1959            ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
1960
1961            return Ok(new_ptr);
1962        }
1963
1964        // If this wasn't the last allocation, or shrinking wasn't worth it,
1965        // simply return the old pointer as-is.
1966        Ok(ptr)
1967    }
1968
1969    #[inline]
1970    unsafe fn grow(
1971        &self,
1972        ptr: NonNull<u8>,
1973        old_layout: Layout,
1974        new_layout: Layout,
1975    ) -> Result<NonNull<u8>, AllocErr> {
1976        let old_size = old_layout.size();
1977        let new_size = new_layout.size();
1978        let align_is_compatible = old_layout.align() >= new_layout.align();
1979
1980        if align_is_compatible && self.is_last_allocation(ptr) {
1981            // Try to allocate the delta size within this same block so we can
1982            // reuse the currently allocated space.
1983            let delta = new_size - old_size;
1984            if let Some(p) =
1985                self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
1986            {
1987                ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
1988                return Ok(p);
1989            }
1990        }
1991
1992        // Fallback: do a fresh allocation and copy the existing data into it.
1993        let new_ptr = self.try_alloc_layout(new_layout)?;
1994        ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
1995        Ok(new_ptr)
1996    }
1997}
1998
1999/// An iterator over each chunk of allocated memory that
2000/// an arena has bump allocated into.
2001///
2002/// The chunks are returned ordered by allocation time, with the most recently
2003/// allocated chunk being returned first.
2004///
2005/// The values inside each chunk are also ordered by allocation time, with the most
2006/// recent allocation being earlier in the slice.
2007///
2008/// This struct is created by the [`iter_allocated_chunks`] method on
2009/// [`Bump`]. See that function for a safety description regarding reading from the returned items.
2010///
2011/// [`Bump`]: struct.Bump.html
2012/// [`iter_allocated_chunks`]: struct.Bump.html#method.iter_allocated_chunks
2013#[derive(Debug)]
2014pub struct ChunkIter<'a> {
2015    raw: ChunkRawIter<'a>,
2016    bump: PhantomData<&'a mut Bump>,
2017}
2018
2019impl<'a> Iterator for ChunkIter<'a> {
2020    type Item = &'a [mem::MaybeUninit<u8>];
2021    fn next(&mut self) -> Option<&'a [mem::MaybeUninit<u8>]> {
2022        unsafe {
2023            let (ptr, len) = self.raw.next()?;
2024            let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len);
2025            Some(slice)
2026        }
2027    }
2028}
2029
2030impl<'a> iter::FusedIterator for ChunkIter<'a> {}
2031
2032/// An iterator over raw pointers to chunks of allocated memory that this
2033/// arena has bump allocated into.
2034///
2035/// See [`ChunkIter`] for details regarding the returned chunks.
2036///
2037/// This struct is created by the [`iter_allocated_chunks_raw`] method on
2038/// [`Bump`]. See that function for a safety description regarding reading from
2039/// the returned items.
2040///
2041/// [`Bump`]: struct.Bump.html
2042/// [`iter_allocated_chunks_raw`]: struct.Bump.html#method.iter_allocated_chunks_raw
2043#[derive(Debug)]
2044pub struct ChunkRawIter<'a> {
2045    footer: NonNull<ChunkFooter>,
2046    bump: PhantomData<&'a Bump>,
2047}
2048
2049impl Iterator for ChunkRawIter<'_> {
2050    type Item = (*mut u8, usize);
2051    fn next(&mut self) -> Option<(*mut u8, usize)> {
2052        unsafe {
2053            let foot = self.footer.as_ref();
2054            if foot.is_empty() {
2055                return None;
2056            }
2057            let (ptr, len) = foot.as_raw_parts();
2058            self.footer = foot.prev.get();
2059            Some((ptr as *mut u8, len))
2060        }
2061    }
2062}
2063
2064impl iter::FusedIterator for ChunkRawIter<'_> {}
2065
2066#[inline(never)]
2067#[cold]
2068fn oom() -> ! {
2069    panic!("out of memory")
2070}
2071
2072unsafe impl<'a> alloc::Alloc for &'a Bump {
2073    #[inline(always)]
2074    unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
2075        self.try_alloc_layout(layout)
2076    }
2077
2078    #[inline]
2079    unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
2080        Bump::dealloc(self, ptr, layout);
2081    }
2082
2083    #[inline]
2084    unsafe fn realloc(
2085        &mut self,
2086        ptr: NonNull<u8>,
2087        layout: Layout,
2088        new_size: usize,
2089    ) -> Result<NonNull<u8>, AllocErr> {
2090        let old_size = layout.size();
2091
2092        if old_size == 0 {
2093            return self.try_alloc_layout(layout);
2094        }
2095
2096        let new_layout = layout_from_size_align(new_size, layout.align())?;
2097        if new_size <= old_size {
2098            self.shrink(ptr, layout, new_layout)
2099        } else {
2100            self.grow(ptr, layout, new_layout)
2101        }
2102    }
2103}
2104
2105#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
2106unsafe impl<'a> Allocator for &'a Bump {
2107    #[inline]
2108    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
2109        self.try_alloc_layout(layout)
2110            .map(|p| unsafe {
2111                NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
2112            })
2113            .map_err(|_| AllocError)
2114    }
2115
2116    #[inline]
2117    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
2118        Bump::dealloc(self, ptr, layout)
2119    }
2120
2121    #[inline]
2122    unsafe fn shrink(
2123        &self,
2124        ptr: NonNull<u8>,
2125        old_layout: Layout,
2126        new_layout: Layout,
2127    ) -> Result<NonNull<[u8]>, AllocError> {
2128        Bump::shrink(self, ptr, old_layout, new_layout)
2129            .map(|p| unsafe {
2130                NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
2131            })
2132            .map_err(|_| AllocError)
2133    }
2134
2135    #[inline]
2136    unsafe fn grow(
2137        &self,
2138        ptr: NonNull<u8>,
2139        old_layout: Layout,
2140        new_layout: Layout,
2141    ) -> Result<NonNull<[u8]>, AllocError> {
2142        Bump::grow(self, ptr, old_layout, new_layout)
2143            .map(|p| unsafe {
2144                NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
2145            })
2146            .map_err(|_| AllocError)
2147    }
2148
2149    #[inline]
2150    unsafe fn grow_zeroed(
2151        &self,
2152        ptr: NonNull<u8>,
2153        old_layout: Layout,
2154        new_layout: Layout,
2155    ) -> Result<NonNull<[u8]>, AllocError> {
2156        let mut ptr = self.grow(ptr, old_layout, new_layout)?;
2157        ptr.as_mut()[old_layout.size()..].fill(0);
2158        Ok(ptr)
2159    }
2160}
2161
2162// NB: Only tests which require private types, fields, or methods should be in
2163// here. Anything that can just be tested via public API surface should be in
2164// `bumpalo/tests/all/*`.
2165#[cfg(test)]
2166mod tests {
2167    use super::*;
2168
2169    // Uses private type `ChunkFooter`.
2170    #[test]
2171    fn chunk_footer_is_five_words() {
2172        assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6);
2173    }
2174
2175    // Uses private `alloc` module.
2176    #[test]
2177    fn test_realloc() {
2178        use crate::alloc::Alloc;
2179
2180        unsafe {
2181            const CAPACITY: usize = 1024 - OVERHEAD;
2182            let mut b = Bump::with_capacity(CAPACITY);
2183
2184            // `realloc` doesn't shrink allocations that aren't "worth it".
2185            let layout = Layout::from_size_align(100, 1).unwrap();
2186            let p = b.alloc_layout(layout);
2187            let q = (&b).realloc(p, layout, 51).unwrap();
2188            assert_eq!(p, q);
2189            b.reset();
2190
2191            // `realloc` will shrink allocations that are "worth it".
2192            let layout = Layout::from_size_align(100, 1).unwrap();
2193            let p = b.alloc_layout(layout);
2194            let q = (&b).realloc(p, layout, 50).unwrap();
2195            assert!(p != q);
2196            b.reset();
2197
2198            // `realloc` will reuse the last allocation when growing.
2199            let layout = Layout::from_size_align(10, 1).unwrap();
2200            let p = b.alloc_layout(layout);
2201            let q = (&b).realloc(p, layout, 11).unwrap();
2202            assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
2203            b.reset();
2204
2205            // `realloc` will allocate a new chunk when growing the last
2206            // allocation, if need be.
2207            let layout = Layout::from_size_align(1, 1).unwrap();
2208            let p = b.alloc_layout(layout);
2209            let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
2210            assert!(q.as_ptr() as usize != p.as_ptr() as usize - CAPACITY);
2211            b = Bump::with_capacity(CAPACITY);
2212
2213            // `realloc` will allocate and copy when reallocating anything that
2214            // wasn't the last allocation.
2215            let layout = Layout::from_size_align(1, 1).unwrap();
2216            let p = b.alloc_layout(layout);
2217            let _ = b.alloc_layout(layout);
2218            let q = (&b).realloc(p, layout, 2).unwrap();
2219            assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
2220            b.reset();
2221        }
2222    }
2223
2224    // Uses our private `alloc` module.
2225    #[test]
2226    fn invalid_read() {
2227        use alloc::Alloc;
2228
2229        let mut b = &Bump::new();
2230
2231        unsafe {
2232            let l1 = Layout::from_size_align(12000, 4).unwrap();
2233            let p1 = Alloc::alloc(&mut b, l1).unwrap();
2234
2235            let l2 = Layout::from_size_align(1000, 4).unwrap();
2236            Alloc::alloc(&mut b, l2).unwrap();
2237
2238            let p1 = b.realloc(p1, l1, 24000).unwrap();
2239            let l3 = Layout::from_size_align(24000, 4).unwrap();
2240            b.realloc(p1, l3, 48000).unwrap();
2241        }
2242    }
2243}