indexmap/map/
core.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod entry;
11
12pub mod raw_entry_v1;
13
14use hashbrown::hash_table;
15
16use crate::vec::{self, Vec};
17use crate::TryReserveError;
18use core::mem;
19use core::ops::RangeBounds;
20
21use crate::util::simplify_range;
22use crate::{Bucket, Equivalent, HashValue};
23
24type Indices = hash_table::HashTable<usize>;
25type Entries<K, V> = Vec<Bucket<K, V>>;
26
27pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
28
29/// Core of the map that does not depend on S
30#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32    /// indices mapping from the entry hash to its index.
33    indices: Indices,
34    /// entries is a dense vec maintaining entry order.
35    entries: Entries<K, V>,
36}
37
38/// Mutable references to the parts of an `IndexMapCore`.
39///
40/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our
41/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type
42/// is used to implement methods on the split references, and `IndexMapCore` can also call those to
43/// avoid duplication.
44struct RefMut<'a, K, V> {
45    indices: &'a mut Indices,
46    entries: &'a mut Entries<K, V>,
47}
48
49#[inline(always)]
50fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
51    move |&i| entries[i].hash.get()
52}
53
54#[inline]
55fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
56    key: &'a Q,
57    entries: &'a [Bucket<K, V>],
58) -> impl Fn(&usize) -> bool + 'a {
59    move |&i| Q::equivalent(key, &entries[i].key)
60}
61
62#[inline]
63fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
64    if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
65        entry.remove();
66    } else if cfg!(debug_assertions) {
67        panic!("index not found");
68    }
69}
70
71#[inline]
72fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
73    let index = table
74        .find_mut(hash.get(), move |&i| i == old)
75        .expect("index not found");
76    *index = new;
77}
78
79/// Inserts many entries into the indices table without reallocating,
80/// and without regard for duplication.
81///
82/// ***Panics*** if there is not sufficient capacity already.
83fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
84    assert!(indices.capacity() - indices.len() >= entries.len());
85    for entry in entries {
86        indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
87    }
88}
89
90impl<K, V> Clone for IndexMapCore<K, V>
91where
92    K: Clone,
93    V: Clone,
94{
95    fn clone(&self) -> Self {
96        let mut new = Self::new();
97        new.clone_from(self);
98        new
99    }
100
101    fn clone_from(&mut self, other: &Self) {
102        self.indices.clone_from(&other.indices);
103        if self.entries.capacity() < other.entries.len() {
104            // If we must resize, match the indices capacity.
105            let additional = other.entries.len() - self.entries.len();
106            self.borrow_mut().reserve_entries(additional);
107        }
108        self.entries.clone_from(&other.entries);
109    }
110}
111
112impl<K, V> crate::Entries for IndexMapCore<K, V> {
113    type Entry = Bucket<K, V>;
114
115    #[inline]
116    fn into_entries(self) -> Vec<Self::Entry> {
117        self.entries
118    }
119
120    #[inline]
121    fn as_entries(&self) -> &[Self::Entry] {
122        &self.entries
123    }
124
125    #[inline]
126    fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
127        &mut self.entries
128    }
129
130    fn with_entries<F>(&mut self, f: F)
131    where
132        F: FnOnce(&mut [Self::Entry]),
133    {
134        f(&mut self.entries);
135        self.rebuild_hash_table();
136    }
137}
138
139impl<K, V> IndexMapCore<K, V> {
140    /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
141    const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
142
143    #[inline]
144    pub(crate) const fn new() -> Self {
145        IndexMapCore {
146            indices: Indices::new(),
147            entries: Vec::new(),
148        }
149    }
150
151    #[inline]
152    fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
153        RefMut::new(&mut self.indices, &mut self.entries)
154    }
155
156    #[inline]
157    pub(crate) fn with_capacity(n: usize) -> Self {
158        IndexMapCore {
159            indices: Indices::with_capacity(n),
160            entries: Vec::with_capacity(n),
161        }
162    }
163
164    #[inline]
165    pub(crate) fn len(&self) -> usize {
166        self.indices.len()
167    }
168
169    #[inline]
170    pub(crate) fn capacity(&self) -> usize {
171        Ord::min(self.indices.capacity(), self.entries.capacity())
172    }
173
174    pub(crate) fn clear(&mut self) {
175        self.indices.clear();
176        self.entries.clear();
177    }
178
179    pub(crate) fn truncate(&mut self, len: usize) {
180        if len < self.len() {
181            self.erase_indices(len, self.entries.len());
182            self.entries.truncate(len);
183        }
184    }
185
186    #[track_caller]
187    pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
188    where
189        R: RangeBounds<usize>,
190    {
191        let range = simplify_range(range, self.entries.len());
192        self.erase_indices(range.start, range.end);
193        self.entries.drain(range)
194    }
195
196    #[cfg(feature = "rayon")]
197    pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
198    where
199        K: Send,
200        V: Send,
201        R: RangeBounds<usize>,
202    {
203        use rayon::iter::ParallelDrainRange;
204        let range = simplify_range(range, self.entries.len());
205        self.erase_indices(range.start, range.end);
206        self.entries.par_drain(range)
207    }
208
209    #[track_caller]
210    pub(crate) fn split_off(&mut self, at: usize) -> Self {
211        let len = self.entries.len();
212        assert!(
213            at <= len,
214            "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
215        );
216
217        self.erase_indices(at, self.entries.len());
218        let entries = self.entries.split_off(at);
219
220        let mut indices = Indices::with_capacity(entries.len());
221        insert_bulk_no_grow(&mut indices, &entries);
222        Self { indices, entries }
223    }
224
225    #[track_caller]
226    pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
227    where
228        R: RangeBounds<usize>,
229    {
230        let range = simplify_range(range, self.len());
231        self.erase_indices(range.start, self.entries.len());
232        let entries = self.entries.split_off(range.end);
233        let drained = self.entries.split_off(range.start);
234
235        let mut indices = Indices::with_capacity(entries.len());
236        insert_bulk_no_grow(&mut indices, &entries);
237        (Self { indices, entries }, drained.into_iter())
238    }
239
240    /// Append from another map without checking whether items already exist.
241    pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
242        self.reserve(other.len());
243        insert_bulk_no_grow(&mut self.indices, &other.entries);
244        self.entries.append(&mut other.entries);
245        other.indices.clear();
246    }
247
248    /// Reserve capacity for `additional` more key-value pairs.
249    pub(crate) fn reserve(&mut self, additional: usize) {
250        self.indices.reserve(additional, get_hash(&self.entries));
251        // Only grow entries if necessary, since we also round up capacity.
252        if additional > self.entries.capacity() - self.entries.len() {
253            self.borrow_mut().reserve_entries(additional);
254        }
255    }
256
257    /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
258    pub(crate) fn reserve_exact(&mut self, additional: usize) {
259        self.indices.reserve(additional, get_hash(&self.entries));
260        self.entries.reserve_exact(additional);
261    }
262
263    /// Try to reserve capacity for `additional` more key-value pairs.
264    pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
265        self.indices
266            .try_reserve(additional, get_hash(&self.entries))
267            .map_err(TryReserveError::from_hashbrown)?;
268        // Only grow entries if necessary, since we also round up capacity.
269        if additional > self.entries.capacity() - self.entries.len() {
270            self.try_reserve_entries(additional)
271        } else {
272            Ok(())
273        }
274    }
275
276    /// Try to reserve entries capacity, rounded up to match the indices
277    fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
278        // Use a soft-limit on the maximum capacity, but if the caller explicitly
279        // requested more, do it and let them have the resulting error.
280        let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
281        let try_add = new_capacity - self.entries.len();
282        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
283            return Ok(());
284        }
285        self.entries
286            .try_reserve_exact(additional)
287            .map_err(TryReserveError::from_alloc)
288    }
289
290    /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
291    pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
292        self.indices
293            .try_reserve(additional, get_hash(&self.entries))
294            .map_err(TryReserveError::from_hashbrown)?;
295        self.entries
296            .try_reserve_exact(additional)
297            .map_err(TryReserveError::from_alloc)
298    }
299
300    /// Shrink the capacity of the map with a lower bound
301    pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
302        self.indices
303            .shrink_to(min_capacity, get_hash(&self.entries));
304        self.entries.shrink_to(min_capacity);
305    }
306
307    /// Remove the last key-value pair
308    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
309        if let Some(entry) = self.entries.pop() {
310            let last = self.entries.len();
311            erase_index(&mut self.indices, entry.hash, last);
312            Some((entry.key, entry.value))
313        } else {
314            None
315        }
316    }
317
318    /// Return the index in `entries` where an equivalent key can be found
319    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
320    where
321        Q: ?Sized + Equivalent<K>,
322    {
323        let eq = equivalent(key, &self.entries);
324        self.indices.find(hash.get(), eq).copied()
325    }
326
327    /// Append a key-value pair to `entries`,
328    /// *without* checking whether it already exists.
329    fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
330        if self.entries.len() == self.entries.capacity() {
331            // Reserve our own capacity synced to the indices,
332            // rather than letting `Vec::push` just double it.
333            self.borrow_mut().reserve_entries(1);
334        }
335        self.entries.push(Bucket { hash, key, value });
336    }
337
338    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
339    where
340        K: Eq,
341    {
342        let eq = equivalent(&key, &self.entries);
343        let hasher = get_hash(&self.entries);
344        match self.indices.entry(hash.get(), eq, hasher) {
345            hash_table::Entry::Occupied(entry) => {
346                let i = *entry.get();
347                (i, Some(mem::replace(&mut self.entries[i].value, value)))
348            }
349            hash_table::Entry::Vacant(entry) => {
350                let i = self.entries.len();
351                entry.insert(i);
352                self.push_entry(hash, key, value);
353                debug_assert_eq!(self.indices.len(), self.entries.len());
354                (i, None)
355            }
356        }
357    }
358
359    /// Same as `insert_full`, except it also replaces the key
360    pub(crate) fn replace_full(
361        &mut self,
362        hash: HashValue,
363        key: K,
364        value: V,
365    ) -> (usize, Option<(K, V)>)
366    where
367        K: Eq,
368    {
369        let eq = equivalent(&key, &self.entries);
370        let hasher = get_hash(&self.entries);
371        match self.indices.entry(hash.get(), eq, hasher) {
372            hash_table::Entry::Occupied(entry) => {
373                let i = *entry.get();
374                let entry = &mut self.entries[i];
375                let kv = (
376                    mem::replace(&mut entry.key, key),
377                    mem::replace(&mut entry.value, value),
378                );
379                (i, Some(kv))
380            }
381            hash_table::Entry::Vacant(entry) => {
382                let i = self.entries.len();
383                entry.insert(i);
384                self.push_entry(hash, key, value);
385                debug_assert_eq!(self.indices.len(), self.entries.len());
386                (i, None)
387            }
388        }
389    }
390
391    /// Remove an entry by shifting all entries that follow it
392    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
393    where
394        Q: ?Sized + Equivalent<K>,
395    {
396        let eq = equivalent(key, &self.entries);
397        match self.indices.find_entry(hash.get(), eq) {
398            Ok(entry) => {
399                let (index, _) = entry.remove();
400                let (key, value) = self.borrow_mut().shift_remove_finish(index);
401                Some((index, key, value))
402            }
403            Err(_) => None,
404        }
405    }
406
407    /// Remove an entry by shifting all entries that follow it
408    #[inline]
409    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
410        self.borrow_mut().shift_remove_index(index)
411    }
412
413    #[inline]
414    #[track_caller]
415    pub(super) fn move_index(&mut self, from: usize, to: usize) {
416        self.borrow_mut().move_index(from, to);
417    }
418
419    #[inline]
420    #[track_caller]
421    pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
422        self.borrow_mut().swap_indices(a, b);
423    }
424
425    /// Remove an entry by swapping it with the last
426    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
427    where
428        Q: ?Sized + Equivalent<K>,
429    {
430        let eq = equivalent(key, &self.entries);
431        match self.indices.find_entry(hash.get(), eq) {
432            Ok(entry) => {
433                let (index, _) = entry.remove();
434                let (key, value) = self.borrow_mut().swap_remove_finish(index);
435                Some((index, key, value))
436            }
437            Err(_) => None,
438        }
439    }
440
441    /// Remove an entry by swapping it with the last
442    #[inline]
443    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
444        self.borrow_mut().swap_remove_index(index)
445    }
446
447    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
448    ///
449    /// All of these items should still be at their original location in `entries`.
450    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
451    fn erase_indices(&mut self, start: usize, end: usize) {
452        let (init, shifted_entries) = self.entries.split_at(end);
453        let (start_entries, erased_entries) = init.split_at(start);
454
455        let erased = erased_entries.len();
456        let shifted = shifted_entries.len();
457        let half_capacity = self.indices.capacity() / 2;
458
459        // Use a heuristic between different strategies
460        if erased == 0 {
461            // Degenerate case, nothing to do
462        } else if start + shifted < half_capacity && start < erased {
463            // Reinsert everything, as there are few kept indices
464            self.indices.clear();
465
466            // Reinsert stable indices, then shifted indices
467            insert_bulk_no_grow(&mut self.indices, start_entries);
468            insert_bulk_no_grow(&mut self.indices, shifted_entries);
469        } else if erased + shifted < half_capacity {
470            // Find each affected index, as there are few to adjust
471
472            // Find erased indices
473            for (i, entry) in (start..).zip(erased_entries) {
474                erase_index(&mut self.indices, entry.hash, i);
475            }
476
477            // Find shifted indices
478            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
479                update_index(&mut self.indices, entry.hash, old, new);
480            }
481        } else {
482            // Sweep the whole table for adjustments
483            let offset = end - start;
484            self.indices.retain(move |i| {
485                if *i >= end {
486                    *i -= offset;
487                    true
488                } else {
489                    *i < start
490                }
491            });
492        }
493
494        debug_assert_eq!(self.indices.len(), start + shifted);
495    }
496
497    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
498    where
499        F: FnMut(&mut K, &mut V) -> bool,
500    {
501        self.entries
502            .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
503        if self.entries.len() < self.indices.len() {
504            self.rebuild_hash_table();
505        }
506    }
507
508    fn rebuild_hash_table(&mut self) {
509        self.indices.clear();
510        insert_bulk_no_grow(&mut self.indices, &self.entries);
511    }
512
513    pub(crate) fn reverse(&mut self) {
514        self.entries.reverse();
515
516        // No need to save hash indices, can easily calculate what they should
517        // be, given that this is an in-place reversal.
518        let len = self.entries.len();
519        for i in &mut self.indices {
520            *i = len - *i - 1;
521        }
522    }
523}
524
525/// Reserve entries capacity, rounded up to match the indices (via `try_capacity`).
526fn reserve_entries<K, V>(entries: &mut Entries<K, V>, additional: usize, try_capacity: usize) {
527    // Use a soft-limit on the maximum capacity, but if the caller explicitly
528    // requested more, do it and let them have the resulting panic.
529    let try_capacity = try_capacity.min(IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY);
530    let try_add = try_capacity - entries.len();
531    if try_add > additional && entries.try_reserve_exact(try_add).is_ok() {
532        return;
533    }
534    entries.reserve_exact(additional);
535}
536
537impl<'a, K, V> RefMut<'a, K, V> {
538    #[inline]
539    fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
540        Self { indices, entries }
541    }
542
543    /// Reserve entries capacity, rounded up to match the indices
544    #[inline]
545    fn reserve_entries(&mut self, additional: usize) {
546        reserve_entries(self.entries, additional, self.indices.capacity());
547    }
548
549    /// Insert a key-value pair in `entries`,
550    /// *without* checking whether it already exists.
551    fn insert_unique(self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
552        let i = self.indices.len();
553        debug_assert_eq!(i, self.entries.len());
554        let entry = self
555            .indices
556            .insert_unique(hash.get(), i, get_hash(self.entries));
557        if self.entries.len() == self.entries.capacity() {
558            // We can't call `indices.capacity()` while this `entry` has borrowed it, so we'll have
559            // to amortize growth on our own. It's still an improvement over the basic `Vec::push`
560            // doubling though, since we also consider `MAX_ENTRIES_CAPACITY`.
561            reserve_entries(self.entries, 1, 2 * self.entries.capacity());
562        }
563        self.entries.push(Bucket { hash, key, value });
564        OccupiedEntry::new(self.entries, entry)
565    }
566
567    /// Insert a key-value pair in `entries` at a particular index,
568    /// *without* checking whether it already exists.
569    fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
570        let end = self.indices.len();
571        assert!(index <= end);
572        // Increment others first so we don't have duplicate indices.
573        self.increment_indices(index, end);
574        let entries = &*self.entries;
575        self.indices.insert_unique(hash.get(), index, move |&i| {
576            // Adjust for the incremented indices to find hashes.
577            debug_assert_ne!(i, index);
578            let i = if i < index { i } else { i - 1 };
579            entries[i].hash.get()
580        });
581        if self.entries.len() == self.entries.capacity() {
582            // Reserve our own capacity synced to the indices,
583            // rather than letting `Vec::insert` just double it.
584            self.reserve_entries(1);
585        }
586        self.entries.insert(index, Bucket { hash, key, value });
587    }
588
589    /// Remove an entry by shifting all entries that follow it
590    fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
591        match self.entries.get(index) {
592            Some(entry) => {
593                erase_index(self.indices, entry.hash, index);
594                Some(self.shift_remove_finish(index))
595            }
596            None => None,
597        }
598    }
599
600    /// Remove an entry by shifting all entries that follow it
601    ///
602    /// The index should already be removed from `self.indices`.
603    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
604        // Correct indices that point to the entries that followed the removed entry.
605        self.decrement_indices(index + 1, self.entries.len());
606
607        // Use Vec::remove to actually remove the entry.
608        let entry = self.entries.remove(index);
609        (entry.key, entry.value)
610    }
611
612    /// Remove an entry by swapping it with the last
613    fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
614        match self.entries.get(index) {
615            Some(entry) => {
616                erase_index(self.indices, entry.hash, index);
617                Some(self.swap_remove_finish(index))
618            }
619            None => None,
620        }
621    }
622
623    /// Finish removing an entry by swapping it with the last
624    ///
625    /// The index should already be removed from `self.indices`.
626    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
627        // use swap_remove, but then we need to update the index that points
628        // to the other entry that has to move
629        let entry = self.entries.swap_remove(index);
630
631        // correct index that points to the entry that had to swap places
632        if let Some(entry) = self.entries.get(index) {
633            // was not last element
634            // examine new element in `index` and find it in indices
635            let last = self.entries.len();
636            update_index(self.indices, entry.hash, last, index);
637        }
638
639        (entry.key, entry.value)
640    }
641
642    /// Decrement all indices in the range `start..end`.
643    ///
644    /// The index `start - 1` should not exist in `self.indices`.
645    /// All entries should still be in their original positions.
646    fn decrement_indices(&mut self, start: usize, end: usize) {
647        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
648        let shifted_entries = &self.entries[start..end];
649        if shifted_entries.len() > self.indices.capacity() / 2 {
650            // Shift all indices in range.
651            for i in &mut *self.indices {
652                if start <= *i && *i < end {
653                    *i -= 1;
654                }
655            }
656        } else {
657            // Find each entry in range to shift its index.
658            for (i, entry) in (start..end).zip(shifted_entries) {
659                update_index(self.indices, entry.hash, i, i - 1);
660            }
661        }
662    }
663
664    /// Increment all indices in the range `start..end`.
665    ///
666    /// The index `end` should not exist in `self.indices`.
667    /// All entries should still be in their original positions.
668    fn increment_indices(&mut self, start: usize, end: usize) {
669        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
670        let shifted_entries = &self.entries[start..end];
671        if shifted_entries.len() > self.indices.capacity() / 2 {
672            // Shift all indices in range.
673            for i in &mut *self.indices {
674                if start <= *i && *i < end {
675                    *i += 1;
676                }
677            }
678        } else {
679            // Find each entry in range to shift its index, updated in reverse so
680            // we never have duplicated indices that might have a hash collision.
681            for (i, entry) in (start..end).zip(shifted_entries).rev() {
682                update_index(self.indices, entry.hash, i, i + 1);
683            }
684        }
685    }
686
687    #[track_caller]
688    fn move_index(&mut self, from: usize, to: usize) {
689        let from_hash = self.entries[from].hash;
690        let _ = self.entries[to]; // explicit bounds check
691        if from != to {
692            // Use a sentinel index so other indices don't collide.
693            update_index(self.indices, from_hash, from, usize::MAX);
694
695            // Update all other indices and rotate the entry positions.
696            if from < to {
697                self.decrement_indices(from + 1, to + 1);
698                self.entries[from..=to].rotate_left(1);
699            } else if to < from {
700                self.increment_indices(to, from);
701                self.entries[to..=from].rotate_right(1);
702            }
703
704            // Change the sentinel index to its final position.
705            update_index(self.indices, from_hash, usize::MAX, to);
706        }
707    }
708
709    #[track_caller]
710    fn swap_indices(&mut self, a: usize, b: usize) {
711        // If they're equal and in-bounds, there's nothing to do.
712        if a == b && a < self.entries.len() {
713            return;
714        }
715
716        // We'll get a "nice" bounds-check from indexing `entries`,
717        // and then we expect to find it in the table as well.
718        match self.indices.get_many_mut(
719            [self.entries[a].hash.get(), self.entries[b].hash.get()],
720            move |i, &x| if i == 0 { x == a } else { x == b },
721        ) {
722            [Some(ref_a), Some(ref_b)] => {
723                mem::swap(ref_a, ref_b);
724                self.entries.swap(a, b);
725            }
726            _ => panic!("indices not found"),
727        }
728    }
729}
730
731#[test]
732fn assert_send_sync() {
733    fn assert_send_sync<T: Send + Sync>() {}
734    assert_send_sync::<IndexMapCore<i32, i32>>();
735    assert_send_sync::<Entry<'_, i32, i32>>();
736    assert_send_sync::<IndexedEntry<'_, i32, i32>>();
737    assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
738}