@@ -730,7 +730,7 @@ impl<T> RawTable<T> {
730
730
// - there are no DELETED entries.
731
731
// - we know there is enough space in the table.
732
732
// - all elements are unique.
733
- let index = new_table. prepare_insert_slot ( hash) ;
733
+ let ( index, _ ) = new_table. prepare_insert_slot ( hash) ;
734
734
new_table. bucket ( index) . copy_from_nonoverlapping ( & item) ;
735
735
}
736
736
@@ -787,12 +787,11 @@ impl<T> RawTable<T> {
787
787
#[ cfg( any( feature = "raw" , feature = "rustc-internal-api" ) ) ]
788
788
pub fn insert_no_grow ( & mut self , hash : u64 , value : T ) -> Bucket < T > {
789
789
unsafe {
790
- let index = self . table . prepare_insert_slot ( hash) ;
790
+ let ( index, old_ctrl ) = self . table . prepare_insert_slot ( hash) ;
791
791
let bucket = self . table . bucket ( index) ;
792
792
793
793
// If we are replacing a DELETED entry then we don't need to update
794
794
// the load counter.
795
- let old_ctrl = * self . table . ctrl ( index) ;
796
795
self . table . growth_left -= special_is_empty ( old_ctrl) as usize ;
797
796
798
797
bucket. write ( value) ;
@@ -1041,10 +1040,11 @@ impl RawTableInner {
1041
1040
///
1042
1041
/// There must be at least 1 empty bucket in the table.
1043
1042
#[ inline]
1044
- unsafe fn prepare_insert_slot ( & self , hash : u64 ) -> usize {
1043
+ unsafe fn prepare_insert_slot ( & self , hash : u64 ) -> ( usize , u8 ) {
1045
1044
let index = self . find_insert_slot ( hash) ;
1045
+ let old_ctrl = * self . ctrl ( index) ;
1046
1046
self . set_ctrl_h2 ( index, hash) ;
1047
- index
1047
+ ( index, old_ctrl )
1048
1048
}
1049
1049
1050
1050
/// Searches for an empty or deleted bucket which is suitable for inserting
@@ -1501,7 +1501,7 @@ impl<T: Clone> RawTable<T> {
1501
1501
// - there are no DELETED entries.
1502
1502
// - we know there is enough space in the table.
1503
1503
// - all elements are unique.
1504
- let index = guard_self. table . prepare_insert_slot ( hash) ;
1504
+ let ( index, _ ) = guard_self. table . prepare_insert_slot ( hash) ;
1505
1505
guard_self. bucket ( index) . write ( item) ;
1506
1506
}
1507
1507
}
@@ -1707,9 +1707,7 @@ impl<T> Iterator for RawIterRange<T> {
1707
1707
// than the group size where the trailing control bytes are all
1708
1708
// EMPTY. On larger tables self.end is guaranteed to be aligned
1709
1709
// to the group size (since tables are power-of-two sized).
1710
- if let None = self . inner . next_group ( ) {
1711
- return None ;
1712
- }
1710
+ self . inner . next_group ( ) ?;
1713
1711
self . data = self . data . next_n ( Group :: WIDTH ) ;
1714
1712
}
1715
1713
}
0 commit comments