Skip to content

Commit 9ab2ee0

Browse files
author
Markus Westerlind
committed
Fix tests
1 parent 2b7a513 commit 9ab2ee0

File tree

2 files changed

+12
-9
lines changed

2 files changed

+12
-9
lines changed

src/lib.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,11 @@ pub mod raw {
4848
pub use inner::*;
4949

5050
#[cfg(feature = "rayon")]
51+
/// [rayon]-based parallel iterator types for hash maps.
52+
/// You will rarely need to interact with it directly unless you have need
53+
/// to name one of the iterator types.
54+
///
55+
/// [rayon]: https://docs.rs/rayon/1.0/rayon
5156
pub mod rayon {
5257
pub use crate::external_trait_impls::rayon::raw::*;
5358
}

src/raw/mod.rs

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -730,7 +730,7 @@ impl<T> RawTable<T> {
730730
// - there are no DELETED entries.
731731
// - we know there is enough space in the table.
732732
// - all elements are unique.
733-
let index = new_table.prepare_insert_slot(hash);
733+
let (index, _) = new_table.prepare_insert_slot(hash);
734734
new_table.bucket(index).copy_from_nonoverlapping(&item);
735735
}
736736

@@ -787,12 +787,11 @@ impl<T> RawTable<T> {
787787
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
788788
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
789789
unsafe {
790-
let index = self.table.prepare_insert_slot(hash);
790+
let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
791791
let bucket = self.table.bucket(index);
792792

793793
// If we are replacing a DELETED entry then we don't need to update
794794
// the load counter.
795-
let old_ctrl = *self.table.ctrl(index);
796795
self.table.growth_left -= special_is_empty(old_ctrl) as usize;
797796

798797
bucket.write(value);
@@ -1041,10 +1040,11 @@ impl RawTableInner {
10411040
///
10421041
/// There must be at least 1 empty bucket in the table.
10431042
#[inline]
1044-
unsafe fn prepare_insert_slot(&self, hash: u64) -> usize {
1043+
unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
10451044
let index = self.find_insert_slot(hash);
1045+
let old_ctrl = *self.ctrl(index);
10461046
self.set_ctrl_h2(index, hash);
1047-
index
1047+
(index, old_ctrl)
10481048
}
10491049

10501050
/// Searches for an empty or deleted bucket which is suitable for inserting
@@ -1501,7 +1501,7 @@ impl<T: Clone> RawTable<T> {
15011501
// - there are no DELETED entries.
15021502
// - we know there is enough space in the table.
15031503
// - all elements are unique.
1504-
let index = guard_self.table.prepare_insert_slot(hash);
1504+
let (index, _) = guard_self.table.prepare_insert_slot(hash);
15051505
guard_self.bucket(index).write(item);
15061506
}
15071507
}
@@ -1707,9 +1707,7 @@ impl<T> Iterator for RawIterRange<T> {
17071707
// than the group size where the trailing control bytes are all
17081708
// EMPTY. On larger tables self.end is guaranteed to be aligned
17091709
// to the group size (since tables are power-of-two sized).
1710-
if let None = self.inner.next_group() {
1711-
return None;
1712-
}
1710+
self.inner.next_group()?;
17131711
self.data = self.data.next_n(Group::WIDTH);
17141712
}
17151713
}

0 commit comments

Comments
 (0)