iddqd/id_ord_map/iter.rs
1use super::{IdOrdItem, RefMut, tables::IdOrdMapTables};
2use crate::support::{
3 alloc::Global,
4 borrow::DormantMutRef,
5 btree_table,
6 item_set::{ConsumingItemSet, ItemSet, ItemSlot},
7};
8use core::{hash::Hash, iter::FusedIterator, marker::PhantomData};
9
10/// An iterator over the elements of an [`IdOrdMap`] by shared reference.
11///
12/// Created by [`IdOrdMap::iter`], and ordered by keys.
13///
14/// [`IdOrdMap`]: crate::IdOrdMap
15/// [`IdOrdMap::iter`]: crate::IdOrdMap::iter
16#[derive(Clone, Debug)]
17pub struct Iter<'a, T: IdOrdItem> {
18 items: &'a ItemSet<T, Global>,
19 iter: btree_table::Iter<'a>,
20}
21
22impl<'a, T: IdOrdItem> Iter<'a, T> {
23 pub(super) fn new(
24 items: &'a ItemSet<T, Global>,
25 tables: &'a IdOrdMapTables,
26 ) -> Self {
27 Self { items, iter: tables.key_to_item.iter() }
28 }
29}
30
31impl<'a, T: IdOrdItem> Iterator for Iter<'a, T> {
32 type Item = &'a T;
33
34 #[inline]
35 fn next(&mut self) -> Option<Self::Item> {
36 let index = self.iter.next()?;
37 Some(&self.items[index])
38 }
39}
40
41impl<T: IdOrdItem> ExactSizeIterator for Iter<'_, T> {
42 #[inline]
43 fn len(&self) -> usize {
44 self.iter.len()
45 }
46}
47
48// btree_set::Iter is a FusedIterator, so Iter is as well.
49impl<T: IdOrdItem> FusedIterator for Iter<'_, T> {}
50
51/// A raw pointer into the start of an `ItemSet`'s slot buffer, with the same
52/// thread-safety properties as an `&'a mut ItemSet<T, Global>`.
53///
54/// We use a raw pointer rather than lifetime extension as done by the hash map
55/// iterators to avoid reborrow invalidation under Stacked Borrows. Due to the
56/// way `Vec::index_mut` works, each iteration reborrowing `&mut self.items`
57/// would invalidate previously yielded `&mut T` children.
58struct ItemSetPtr<'a, T: IdOrdItem> {
59 // The pointer to the start of the slot buffer.
60 start_ptr: *mut ItemSlot<T>,
61 // Number of slots in the backing buffer at construction time.
62 slot_count: usize,
63 // Borrow the ItemSet for `'a` so the raw pointer stays live, and so that
64 // variance and drop-check work the same as `&'a mut ItemSet<T, Global>`.
65 _marker: PhantomData<&'a mut ItemSet<T, Global>>,
66}
67
68impl<T: IdOrdItem> core::fmt::Debug for ItemSetPtr<'_, T> {
69 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
70 f.debug_struct("ItemSetPtr")
71 .field("start_ptr", &self.start_ptr)
72 .field("slot_count", &self.slot_count)
73 .finish()
74 }
75}
76
77// SAFETY: `ItemSetPtr<'a, T>` has the same thread-safety semantics as `&'a mut
78// ItemSet<T, Global>`, which is `Send`/`Sync` iff `ItemSet<T, Global>` is
79// either of those, respectively. This reduces to `T: Send` / `T: Sync`, since
80// the global allocator `Global` is always `Send` + `Sync`.
81unsafe impl<'a, T: IdOrdItem + Send> Send for ItemSetPtr<'a, T> {}
82// SAFETY: see the `Send` impl above.
83unsafe impl<'a, T: IdOrdItem + Sync> Sync for ItemSetPtr<'a, T> {}
84
85/// An iterator over the elements of a [`IdOrdMap`] by mutable reference.
86///
87/// This iterator returns [`RefMut`] instances.
88///
89/// Created by [`IdOrdMap::iter_mut`], and ordered by keys.
90///
91/// [`IdOrdMap`]: crate::IdOrdMap
92/// [`IdOrdMap::iter_mut`]: crate::IdOrdMap::iter_mut
93#[derive(Debug)]
94pub struct IterMut<'a, T: IdOrdItem>
95where
96 T::Key<'a>: Hash,
97{
98 items: ItemSetPtr<'a, T>,
99 tables: &'a IdOrdMapTables,
100 iter: btree_table::Iter<'a>,
101}
102
103impl<'a, T: IdOrdItem> IterMut<'a, T>
104where
105 T::Key<'a>: Hash,
106{
107 pub(super) fn new(
108 items: &'a mut ItemSet<T, Global>,
109 tables: &'a IdOrdMapTables,
110 ) -> Self {
111 let slot_count = items.slot_count();
112 let start_ptr = items.start_ptr();
113 Self {
114 items: ItemSetPtr { start_ptr, slot_count, _marker: PhantomData },
115 tables,
116 iter: tables.key_to_item.iter(),
117 }
118 }
119}
120
121impl<'a, T: IdOrdItem + 'a> Iterator for IterMut<'a, T>
122where
123 T::Key<'a>: Hash,
124{
125 type Item = RefMut<'a, T>;
126
127 #[inline]
128 fn next(&mut self) -> Option<Self::Item> {
129 let index = self.iter.next()?;
130 let raw_index = index.as_u32() as usize;
131
132 // This is a belt-and-suspenders bounds check. As of 2026-04-28, we've
133 // carefully analyzed all the code paths (including for panic safety) to
134 // ensure that indexes stored in the B-tree are always in bounds. But a
135 // future change might inadvertently break things. Handle this kind of
136 // programmer error as a panic rather than UB.
137 assert!(
138 raw_index < self.items.slot_count,
139 "btree index {raw_index} out of bounds for slot count {}",
140 self.items.slot_count,
141 );
142
143 // SAFETY: We need to show:
144 //
145 // * `self.items.ptr.add(raw_index)` points at valid memory.
146 // * There are no overlapping mutable borrows of the same memory.
147 //
148 // This is shown by the following observations:
149 //
150 // * We construct `ItemSetPtr` by mutably borrowing the item set,
151 // which means that while this iterator is alive, no other code
152 // can access the item set.
153 // * The bounds check above shows that `raw_index` is in bounds.
154 // * The B-tree only stores indexes that currently point at `Some`
155 // slots in the backing `ItemSet`, so the slot is initialized.
156 // (Again, as of 2026-04-28 we've verified this invariant, but
157 // a future change might break things, so we use `expect` and not
158 // `unwrap_unchecked`.)
159 // * The B-tree is a set, so each call to `self.iter.next()` yields a
160 // distinct `index`. This means that the handed-out `&mut T`s
161 // never point to the same memory.
162 let item: &'a mut T = unsafe {
163 (*self.items.start_ptr.add(raw_index))
164 .as_mut()
165 .expect("btree index points at an Occupied slot in ItemSet")
166 };
167
168 let (hash, dormant) = {
169 let (item, dormant) = DormantMutRef::new(item);
170 let hash = self.tables.make_hash(item);
171 (hash, dormant)
172 };
173
174 // SAFETY: item is dropped above, and self is no longer used
175 // after this point.
176 let item = unsafe { dormant.awaken() };
177
178 Some(RefMut::new(self.tables.state().clone(), hash, item))
179 }
180}
181
182impl<'a, T: IdOrdItem + 'a> ExactSizeIterator for IterMut<'a, T>
183where
184 T::Key<'a>: Hash,
185{
186 #[inline]
187 fn len(&self) -> usize {
188 self.iter.len()
189 }
190}
191
192impl<'a, T: IdOrdItem + 'a> FusedIterator for IterMut<'a, T> where
193 T::Key<'a>: Hash
194{
195}
196
197/// An iterator over the elements of a [`IdOrdMap`] by ownership.
198///
199/// Created by [`IdOrdMap::into_iter`], and ordered by keys.
200///
201/// [`IdOrdMap`]: crate::IdOrdMap
202/// [`IdOrdMap::into_iter`]: crate::IdOrdMap::into_iter
203#[derive(Debug)]
204pub struct IntoIter<T: IdOrdItem> {
205 items: ConsumingItemSet<T, Global>,
206 iter: btree_table::IntoIter,
207}
208
209impl<T: IdOrdItem> IntoIter<T> {
210 pub(super) fn new(
211 items: ItemSet<T, Global>,
212 tables: IdOrdMapTables,
213 ) -> Self {
214 Self {
215 items: items.into_consuming(),
216 iter: tables.key_to_item.into_iter(),
217 }
218 }
219}
220
221impl<T: IdOrdItem> Iterator for IntoIter<T> {
222 type Item = T;
223
224 #[inline]
225 fn next(&mut self) -> Option<Self::Item> {
226 let index = self.iter.next()?;
227 // We own `self.items` and the B-tree's indexes are never revisited, so
228 // we can take directly from the consuming view.
229 let next = self
230 .items
231 .take(index)
232 .unwrap_or_else(|| panic!("index {index} not found in items"));
233 Some(next)
234 }
235}